repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/doc_utils.py
from typing import Callable def is_documented_by(function_with_docstring: Callable): """Decorator to share docstrings across common functions. Args: function_with_docstring (`Callable`): Name of the function with the docstring. """ def wrapper(target_function): target_function.__doc__ = function_with_docstring.__doc__ return target_function return wrapper
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/metadata.py
import textwrap from collections import Counter from pathlib import Path from typing import Any, ClassVar, Dict, Optional, Tuple, Union import yaml from huggingface_hub import DatasetCardData from ..config import METADATA_CONFIGS_FIELD from ..utils.logging import get_logger from .deprecation_utils import deprecated logger = get_logger(__name__) class _NoDuplicateSafeLoader(yaml.SafeLoader): def _check_no_duplicates_on_constructed_node(self, node): keys = [self.constructed_objects[key_node] for key_node, _ in node.value] keys = [tuple(key) if isinstance(key, list) else key for key in keys] counter = Counter(keys) duplicate_keys = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}") def construct_mapping(self, node, deep=False): mapping = super().construct_mapping(node, deep=deep) self._check_no_duplicates_on_constructed_node(node) return mapping def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]: full_content = list(readme_content.splitlines()) if full_content and full_content[0] == "---" and "---" in full_content[1:]: sep_idx = full_content[1:].index("---") + 1 yamlblock = "\n".join(full_content[1:sep_idx]) return yamlblock, "\n".join(full_content[sep_idx + 1 :]) return None, "\n".join(full_content) @deprecated("Use `huggingface_hub.DatasetCardData` instead.") class DatasetMetadata(dict): # class attributes _FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def from_readme(cls, path: Union[Path, str]) -> "DatasetMetadata": """Loads and validates the dataset metadata from its dataset card (README.md) Args: path (:obj:`Path`): Path to the dataset card (its README.md file) Returns: :class:`DatasetMetadata`: The dataset's metadata Raises: :obj:`TypeError`: If the dataset's metadata is invalid """ with open(path, encoding="utf-8") as readme_file: yaml_string, _ = _split_yaml_from_readme(readme_file.read()) if yaml_string is not None: return cls.from_yaml_string(yaml_string) else: return cls() def to_readme(self, path: Path): if path.exists(): with open(path, encoding="utf-8") as readme_file: readme_content = readme_file.read() else: readme_content = None updated_readme_content = self._to_readme(readme_content) with open(path, "w", encoding="utf-8") as readme_file: readme_file.write(updated_readme_content) def _to_readme(self, readme_content: Optional[str] = None) -> str: if readme_content is not None: _, content = _split_yaml_from_readme(readme_content) full_content = "---\n" + self.to_yaml_string() + "---\n" + content else: full_content = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def from_yaml_string(cls, string: str) -> "DatasetMetadata": """Loads and validates the dataset metadata from a YAML string Args: string (:obj:`str`): The YAML string Returns: :class:`DatasetMetadata`: The dataset's metadata Raises: :obj:`TypeError`: If the dataset's metadata is invalid """ metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or {} # Convert the YAML keys to DatasetMetadata fields metadata_dict = { (key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**metadata_dict) def to_yaml_string(self) -> str: return yaml.safe_dump( { (key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() }, sort_keys=False, allow_unicode=True, encoding="utf-8", ).decode("utf-8") class MetadataConfigs(Dict[str, Dict[str, Any]]): """Should be in format {config_name: {**config_params}}.""" FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD @staticmethod def _raise_if_data_files_field_not_valid(metadata_config: dict): yaml_data_files = metadata_config.get("data_files") if yaml_data_files is not None: yaml_error_message = textwrap.dedent( f""" Expected data_files in YAML to be either a string or a list of strings or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files} Examples of data_files in YAML: data_files: data.csv data_files: data/*.png data_files: - part0/* - part1/* data_files: - split: train path: train/* - split: test path: test/* data_files: - split: train path: - train/part1/* - train/part2/* - split: test path: test/* """ ) if not isinstance(yaml_data_files, (list, str)): raise ValueError(yaml_error_message) if isinstance(yaml_data_files, list): for yaml_data_files_item in yaml_data_files: if ( not isinstance(yaml_data_files_item, (str, dict)) or isinstance(yaml_data_files_item, dict) and not ( len(yaml_data_files_item) == 2 and "split" in yaml_data_files_item and isinstance(yaml_data_files_item.get("path"), (str, list)) ) ): raise ValueError(yaml_error_message) @classmethod def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs": if dataset_card_data.get(cls.FIELD_NAME): metadata_configs = dataset_card_data[cls.FIELD_NAME] if not isinstance(metadata_configs, list): raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'") for metadata_config in metadata_configs: if "config_name" not in metadata_config: raise ValueError( f"Each config must include `config_name` field with a string name of a config, " f"but got {metadata_config}. " ) cls._raise_if_data_files_field_not_valid(metadata_config) return cls( { config["config_name"]: {param: value for param, value in config.items() if param != "config_name"} for config in metadata_configs } ) return cls() def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: if self: for metadata_config in self.values(): self._raise_if_data_files_field_not_valid(metadata_config) current_metadata_configs = self.from_dataset_card_data(dataset_card_data) total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items())) for config_name, config_metadata in total_metadata_configs.items(): config_metadata.pop("config_name", None) dataset_card_data[self.FIELD_NAME] = [ {"config_name": config_name, **config_metadata} for config_name, config_metadata in total_metadata_configs.items() ] def get_default_config_name(self) -> Optional[str]: default_config_name = None for config_name, metadata_config in self.items(): if config_name == "default" or metadata_config.get("default"): if default_config_name is None: default_config_name = config_name else: raise ValueError( f"Dataset has several default configs: '{default_config_name}' and '{config_name}'." ) return default_config_name # DEPRECATED - just here to support old versions of evaluate like 0.2.2 # To support new tasks on the Hugging Face Hub, please open a PR for this file: # https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts known_task_ids = { "image-classification": [], "translation": [], "image-segmentation": [], "fill-mask": [], "automatic-speech-recognition": [], "token-classification": [], "sentence-similarity": [], "audio-classification": [], "question-answering": [], "summarization": [], "zero-shot-classification": [], "table-to-text": [], "feature-extraction": [], "other": [], "multiple-choice": [], "text-classification": [], "text-to-image": [], "text2text-generation": [], "zero-shot-image-classification": [], "tabular-classification": [], "tabular-regression": [], "image-to-image": [], "tabular-to-text": [], "unconditional-image-generation": [], "text-retrieval": [], "text-to-speech": [], "object-detection": [], "audio-to-audio": [], "text-generation": [], "conversational": [], "table-question-answering": [], "visual-question-answering": [], "image-to-text": [], "reinforcement-learning": [], "voice-activity-detection": [], "time-series-forecasting": [], "document-question-answering": [], } if __name__ == "__main__": from argparse import ArgumentParser ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.") ap.add_argument("readme_filepath") args = ap.parse_args() readme_filepath = Path(args.readme_filepath) dataset_metadata = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/readme.py
# loading package files: https://stackoverflow.com/a/20885799 import importlib.resources as pkg_resources import logging from pathlib import Path from typing import Any, List, Tuple import yaml from . import resources from .deprecation_utils import deprecated BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils" this_url = f"{BASE_REF_URL}/{__file__}" logger = logging.getLogger(__name__) def load_yaml_resource(resource: str) -> Tuple[Any, str]: content = pkg_resources.read_text(resources, resource) return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}" readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml") FILLER_TEXT = [ "[Needs More Information]", "[More Information Needed]", "(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)", ] # Dictionary representation of section/readme, error_list, warning_list ReadmeValidatorOutput = Tuple[dict, List[str], List[str]] class Section: def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False): self.name = name self.level = level self.lines = lines self.text = "" self.is_empty_text = True self.content = {} self.parsing_error_list = [] self.parsing_warning_list = [] if self.lines is not None: self.parse(suppress_parsing_errors=suppress_parsing_errors) def parse(self, suppress_parsing_errors: bool = False): current_sub_level = "" current_lines = [] code_start = False for line in self.lines: if line.strip(" \n") == "": continue elif line.strip(" \n")[:3] == "```": code_start = not code_start elif line.split()[0] == self.level + "#" and not code_start: if current_sub_level != "": self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) current_lines = [] else: if current_lines != []: self.text += "".join(current_lines).strip() if self.text != "" and self.text not in FILLER_TEXT: self.is_empty_text = False current_lines = [] current_sub_level = " ".join(line.split()[1:]).strip(" \n") else: current_lines.append(line) else: if current_sub_level != "": if current_sub_level in self.content: self.parsing_error_list.append( f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections." ) self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) else: if current_lines != []: self.text += "".join(current_lines).strip() if self.text != "" and self.text not in FILLER_TEXT: self.is_empty_text = False if self.level == "" and not suppress_parsing_errors: if self.parsing_error_list != [] or self.parsing_warning_list != []: errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list) error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors raise ValueError(error_string) def validate(self, structure: dict) -> ReadmeValidatorOutput: """Validates a Section class object recursively using the structure provided as a dictionary. Args: structute (:obj: `dict`): The dictionary representing expected structure. Returns: :obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors. """ # Header text validation error_list = [] warning_list = [] if structure["allow_empty"] is False: # If content is expected if self.is_empty_text and self.content == {}: # If no content is found, mention it in the error_list error_list.append(f"Expected some content in section `{self.name}` but it is empty.") if structure["allow_empty_text"] is False: # If some text is expected if self.is_empty_text: # If no text is found, mention it in the error_list error_list.append( f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)." ) # Subsections Validation if structure["subsections"] is not None: # If subsections are expected if self.content == {}: # If no subsections are present values = [subsection["name"] for subsection in structure["subsections"]] # Mention the expected values in the error_list error_list.append( f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'." ) else: # If some subsections are present structure_names = [subsection["name"] for subsection in structure["subsections"]] has_missing_subsections = False for idx, name in enumerate(structure_names): if name not in self.content: # If the expected subsection is not present error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.") has_missing_subsections = True else: # If the subsection is present, validate subsection, return the result # and concat the errors from subsection to section error_list # Skip sublevel validation if current level is `###` if self.level == "###": continue else: _, subsec_error_list, subsec_warning_list = self.content[name].validate( structure["subsections"][idx] ) error_list += subsec_error_list warning_list += subsec_warning_list if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here for name in self.content: if name not in structure_names: # If an extra subsection is present warning_list.append( f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown." ) if error_list: # If there are errors, do not return the dictionary as it is invalid return {}, error_list, warning_list else: return self.to_dict(), error_list, warning_list def to_dict(self) -> dict: """Returns the dictionary representation of a section.""" return { "name": self.name, "text": self.text, "is_empty_text": self.is_empty_text, "subsections": [value.to_dict() for value in self.content.values()], } @deprecated("Use `huggingface_hub.DatasetCard` instead.") class ReadMe(Section): # Level 0 def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False): super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse self.structure = structure self.yaml_tags_line_count = -2 self.tag_count = 0 self.lines = lines if self.lines is not None: self.parse(suppress_parsing_errors=suppress_parsing_errors) def validate(self): if self.structure is None: content, error_list, warning_list = self._validate(readme_structure) else: content, error_list, warning_list = self._validate(self.structure) if error_list != [] or warning_list != []: errors = "\n".join(["-\t" + x for x in error_list + warning_list]) error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors raise ValueError(error_string) @classmethod def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False): with open(path, encoding="utf-8") as f: lines = f.readlines() return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors) @classmethod def from_string( cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False ): lines = string.split("\n") return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors) def parse(self, suppress_parsing_errors: bool = False): # Skip Tags line_count = 0 for line in self.lines: self.yaml_tags_line_count += 1 if line.strip(" \n") == "---": self.tag_count += 1 if self.tag_count == 2: break line_count += 1 if self.tag_count == 2: self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item. else: self.lines = self.lines[self.tag_count :] super().parse(suppress_parsing_errors=suppress_parsing_errors) def __str__(self): """Returns the string of dictionary representation of the ReadMe.""" return str(self.to_dict()) def _validate(self, readme_structure): error_list = [] warning_list = [] if self.yaml_tags_line_count == 0: warning_list.append("Empty YAML markers are present in the README.") elif self.tag_count == 0: warning_list.append("No YAML markers are present in the README.") elif self.tag_count == 1: warning_list.append("Only the start of YAML tags present in the README.") # Check how many first level sections are present. num_first_level_keys = len(self.content.keys()) if num_first_level_keys > 1: # If more than one, add to the error list, continue error_list.append( f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README." ) elif num_first_level_keys < 1: # If less than one, append error. error_list.append( "The README has no first-level headings. One heading is expected. Skipping further validation for this README." ) else: # If one exactly start_key = list(self.content.keys())[0] # Get the key if start_key.startswith("Dataset Card for"): # Check correct start # If the starting is correct, validate all the sections _, sec_error_list, sec_warning_list = self.content[start_key].validate( readme_structure["subsections"][0] ) error_list += sec_error_list warning_list += sec_warning_list else: # If not found, append error error_list.append( "No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README." ) if error_list: # If there are errors, do not return the dictionary as it is invalid return {}, error_list, warning_list else: return self.to_dict(), error_list, warning_list if __name__ == "__main__": from argparse import ArgumentParser ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.") ap.add_argument("readme_filepath") args = ap.parse_args() readme_filepath = Path(args.readme_filepath) readme = ReadMe.from_readme(readme_filepath)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/sharding.py
from typing import List import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sharding without ambiguity for users lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)} if len(set(lists_lengths.values())) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items()) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) max_length = max(lists_lengths.values(), default=0) return max(1, max_length) def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]: """ Get the range of shard indices per job. If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard. The shards indices order is preserved: e.g. all the first shards are given the first job. Moreover all the jobs are given approximately the same number of shards. Example: ```python >>> _distribute_shards(2, max_num_jobs=4) [range(0, 1), range(1, 2)] >>> _distribute_shards(10, max_num_jobs=3) [range(0, 4), range(4, 7), range(7, 10)] ``` """ shards_indices_per_group = [] for group_idx in range(max_num_jobs): num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 shard_indices = range(start, start + num_shards_to_add) shards_indices_per_group.append(shard_indices) return shards_indices_per_group def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]: """Split the gen_kwargs into `max_num_job` gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs) if num_shards == 1: return [dict(gen_kwargs)] else: shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(value, list) else value for key, value in gen_kwargs.items() } for group_idx in range(len(shard_indices_per_group)) ] def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key], list) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict: """Return a shuffled copy of the input gen_kwargs""" # We must shuffle all the lists, and lists of the same size must have the same shuffling. # This way entangled lists of (shard, shard_metadata) are still in the right order. # First, let's generate the shuffled indices per list size list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)} indices_per_size = {} for size in list_sizes: indices_per_size[size] = list(range(size)) rng.shuffle(indices_per_size[size]) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes shuffled_kwargs = dict(gen_kwargs) for key, value in shuffled_kwargs.items(): if isinstance(value, list): shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]] return shuffled_kwargs
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/filelock.py
# deprecated, please use the `filelock` package instead from filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0 BaseFileLock, SoftFileLock, Timeout, UnixFileLock, WindowsFileLock, ) from ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/deprecation_utils.py
import enum import inspect import warnings from functools import wraps from typing import Callable, Optional from .logging import get_logger _emitted_deprecation_warnings = set() logger = get_logger(__name__) def deprecated(help_message: Optional[str] = None): """Decorator to mark a class or a function as deprecated. Args: help_message (:obj:`str`, optional): An optional message to guide the user on how to switch to non-deprecated usage of the library. """ def decorator(deprecated_class_or_function: Callable): global _emitted_deprecation_warnings if inspect.isclass(deprecated_class_or_function): deprecated_function = deprecated_class_or_function.__init__ name = deprecated_class_or_function.__name__ else: deprecated_function = deprecated_class_or_function name = deprecated_function.__name__ # Support deprecating __init__ class method: class name instead name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2] warning_msg = ( f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}" if help_message else "" ) @wraps(deprecated_function) def wrapper(*args, **kwargs): func_hash = hash(deprecated_function) if func_hash not in _emitted_deprecation_warnings: warnings.warn(warning_msg, category=FutureWarning, stacklevel=2) _emitted_deprecation_warnings.add(func_hash) return deprecated_function(*args, **kwargs) wrapper._decorator_name_ = "deprecated" if inspect.isclass(deprecated_class_or_function): deprecated_class_or_function.__init__ = wrapper return deprecated_class_or_function else: return wrapper return decorator class OnAccess(enum.EnumMeta): """ Enum metaclass that calls a user-specified function whenever a member is accessed. """ def __getattribute__(cls, name): obj = super().__getattribute__(name) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj def __getitem__(cls, name): member = super().__getitem__(name) if member._on_access: member._on_access() return member def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj class DeprecatedEnum(enum.Enum, metaclass=OnAccess): """ Enum class that calls `deprecate` method whenever a member is accessed. """ def __new__(cls, value): member = object.__new__(cls) member._value_ = value member._on_access = member.deprecate return member @property def help_message(self): return "" def deprecate(self): help_message = f" {self.help_message}" if self.help_message else "" warnings.warn( f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets." + help_message, FutureWarning, stacklevel=3, )
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/logging.py
# Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities. """ import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from .tqdm import ( # noqa: F401 # imported for backward compatibility disable_progress_bar, enable_progress_bar, is_progress_bar_enabled, tqdm, ) log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING def _get_default_logging_level(): """ If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to ``_default_log_level`` """ env_level_str = os.getenv("DATASETS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DATASETS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(logging.StreamHandler()) library_root_logger.setLevel(_get_default_logging_level()) def _reset_library_root_logger() -> None: library_root_logger = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET) def get_logger(name: Optional[str] = None) -> logging.Logger: """Return a logger with the specified name. This function can be used in dataset scripts. """ if name is None: name = _get_library_name() return logging.getLogger(name) def get_verbosity() -> int: """Return the current level for the HuggingFace datasets library's root logger. Returns: Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. <Tip> HuggingFace datasets library has following logging levels: - `datasets.logging.CRITICAL`, `datasets.logging.FATAL` - `datasets.logging.ERROR` - `datasets.logging.WARNING`, `datasets.logging.WARN` - `datasets.logging.INFO` - `datasets.logging.DEBUG` </Tip> """ return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """Set the level for the Hugging Face Datasets library's root logger. Args: verbosity: Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. """ _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the level for the Hugging Face datasets library's root logger to `INFO`. This will display most of the logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`. """ return set_verbosity(INFO) def set_verbosity_warning(): """Set the level for the Hugging Face datasets library's root logger to `WARNING`. This will display only the warning and errors logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`. """ return set_verbosity(WARNING) def set_verbosity_debug(): """Set the level for the Hugging Face datasets library's root logger to `DEBUG`. This will display all the logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`. """ return set_verbosity(DEBUG) def set_verbosity_error(): """Set the level for the Hugging Face datasets library's root logger to `ERROR`. This will display only the errors logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`. """ return set_verbosity(ERROR) def disable_propagation() -> None: """Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _get_library_root_logger().propagate = False def enable_propagation() -> None: """Enable propagation of the library log outputs. Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has been configured. """ _get_library_root_logger().propagate = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger()
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/file_utils.py
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import copy import io import json import os import posixpath import re import shutil import sys import time import urllib import warnings from contextlib import closing, contextmanager from functools import partial from pathlib import Path from typing import List, Optional, Type, TypeVar, Union from urllib.parse import urljoin, urlparse import fsspec import huggingface_hub import requests from fsspec.core import strip_protocol from fsspec.utils import can_be_local from huggingface_hub import HfFolder from huggingface_hub.utils import insecure_hashlib from packaging import version from .. import __version__, config from ..download.download_config import DownloadConfig from . import _tqdm, logging from . import tqdm as hf_tqdm from ._filelock import FileLock from .extract import ExtractManager logger = logging.get_logger(__name__) # pylint: disable=invalid-name INCOMPLETE_SUFFIX = ".incomplete" T = TypeVar("T", str, Path) def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str: """ Add hf_modules_cache to the python path. By default hf_modules_cache='~/.cache/huggingface/modules'. It can also be set with the environment variable HF_MODULES_CACHE. This is used to add modules such as `datasets_modules` """ hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE hf_modules_cache = str(hf_modules_cache) if hf_modules_cache not in sys.path: sys.path.append(hf_modules_cache) os.makedirs(hf_modules_cache, exist_ok=True) if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")): with open(os.path.join(hf_modules_cache, "__init__.py"), "w"): pass return hf_modules_cache def is_remote_url(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_local_path(url_or_filename: str) -> bool: # On unix the scheme of a local path is empty (for both absolute and relative), # while on windows the scheme is the drive name (ex: "c") for absolute paths. # for details on the windows behavior, see https://bugs.python.org/issue42215 return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_relative_path(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) def relative_to_absolute_path(path: T) -> T: """Convert relative path to absolute path.""" abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) return Path(abs_path_str) if isinstance(path, Path) else abs_path_str def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str: if dataset: endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX else: endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX return "/".join((endpoint, identifier, filename)) def head_hf_s3( identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0 ) -> Union[requests.Response, Exception]: return http_head( hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset), max_retries=max_retries, ) def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str: default_revision = "main" if version.parse(__version__).is_devrelease else __version__ revision = revision or default_revision if dataset: return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name) else: return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name) def url_or_path_join(base_name: str, *pathnames: str) -> str: if is_remote_url(base_name): return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) else: return Path(base_name, *pathnames).as_posix() def url_or_path_parent(url_or_path: str) -> str: if is_remote_url(url_or_path): return url_or_path[: url_or_path.rindex("/")] else: return os.path.dirname(url_or_path) def hash_url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can identify it as a HDF5 file (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) """ url_bytes = url.encode("utf-8") url_hash = insecure_hashlib.sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = insecure_hashlib.sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".py"): filename += ".py" return filename def cached_path( url_or_filename, download_config=None, **download_kwargs, ) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk ValueError: if it couldn't parse the url or filename correctly requests.exceptions.ConnectionError: in case of internet connection issue """ if download_config is None: download_config = DownloadConfig(**download_kwargs) cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) # Convert fsspec URL in the format "file://local/path" to "local/path" if can_be_local(url_or_filename): url_or_filename = strip_protocol(url_or_filename) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=download_config.force_download, proxies=download_config.proxies, resume_download=download_config.resume_download, user_agent=download_config.user_agent, local_files_only=download_config.local_files_only, use_etag=download_config.use_etag, max_retries=download_config.max_retries, token=download_config.token, ignore_url_params=download_config.ignore_url_params, storage_options=download_config.storage_options, download_desc=download_config.download_desc, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif is_local_path(url_or_filename): # File, but it doesn't exist. raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if output_path is None: return output_path if download_config.extract_compressed_file: output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( output_path, force_extract=download_config.force_extract ) return output_path def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: ua = f"datasets/{__version__}" ua += f"; python/{config.PY_VERSION}" ua += f"; huggingface_hub/{huggingface_hub.__version__}" ua += f"; pyarrow/{config.PYARROW_VERSION}" if config.TORCH_AVAILABLE: ua += f"; torch/{config.TORCH_VERSION}" if config.TF_AVAILABLE: ua += f"; tensorflow/{config.TF_VERSION}" if config.JAX_AVAILABLE: ua += f"; jax/{config.JAX_VERSION}" if config.BEAM_AVAILABLE: ua += f"; apache_beam/{config.BEAM_VERSION}" if isinstance(user_agent, dict): ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" elif isinstance(user_agent, str): ua += "; " + user_agent return ua def get_authentication_headers_for_url( url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" ) -> dict: """Handle the HF authentication""" if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token headers = {} if url.startswith(config.HF_ENDPOINT): if token is False: token = None elif isinstance(token, str): token = token else: token = HfFolder.get_token() if token: headers["authorization"] = f"Bearer {token}" return headers class OfflineModeIsEnabled(ConnectionError): pass def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True.""" if config.HF_DATASETS_OFFLINE: raise OfflineModeIsEnabled( "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) ) def _retry( func, func_args: Optional[tuple] = None, func_kwargs: Optional[dict] = None, exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException, status_codes: Optional[List[int]] = None, max_retries: int = 0, base_wait_time: float = 0.5, max_wait_time: float = 2, ): func_args = func_args or () func_kwargs = func_kwargs or {} retry = 0 while True: try: return func(*func_args, **func_kwargs) except exceptions as err: if retry >= max_retries or (status_codes and err.response.status_code not in status_codes): raise err else: sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]") time.sleep(sleep_time) retry += 1 def _request_with_retry( method: str, url: str, max_retries: int = 0, base_wait_time: float = 0.5, max_wait_time: float = 2, timeout: float = 10.0, **params, ) -> requests.Response: """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff. Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised. Args: method (str): HTTP method, such as 'GET' or 'HEAD'. url (str): The URL of the resource to fetch. max_retries (int): Maximum number of retries, defaults to 0 (no retries). base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between retries then grows exponentially, capped by max_wait_time. max_wait_time (float): Maximum amount of time between two retries, in seconds. **params (additional keyword arguments): Params to pass to :obj:`requests.request`. """ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") tries, success = 0, False while not success: tries += 1 try: response = requests.request(method=method.upper(), url=url, timeout=timeout, **params) success = True except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err: if tries > max_retries: raise err else: logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]") sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff time.sleep(sleep_time) return response def fsspec_head(url, storage_options=None): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) if len(paths) > 1: raise ValueError(f"HEAD can be called with at most one path but was called with {paths}") return fs.info(paths[0]) class TqdmCallback(fsspec.callbacks.TqdmCallback): def __init__(self, tqdm_kwargs=None, *args, **kwargs): super().__init__(tqdm_kwargs, *args, **kwargs) self._tqdm = _tqdm # replace tqdm.tqdm by datasets.tqdm.tqdm def fsspec_get(url, temp_file, storage_options=None, desc=None): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) if len(paths) > 1: raise ValueError(f"GET can be called with at most one path but was called with {paths}") callback = TqdmCallback( tqdm_kwargs={ "desc": desc or "Downloading", "unit": "B", "unit_scale": True, } ) fs.get_file(paths[0], temp_file.name, callback=callback) def ftp_head(url, timeout=10.0): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") try: with closing(urllib.request.urlopen(url, timeout=timeout)) as r: r.read(1) except Exception: return False return True def ftp_get(url, temp_file, timeout=10.0): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") try: logger.info(f"Getting through FTP {url} into {temp_file.name}") with closing(urllib.request.urlopen(url, timeout=timeout)) as r: shutil.copyfileobj(r, temp_file) except urllib.error.URLError as e: raise ConnectionError(e) from None def http_get( url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None ): headers = copy.deepcopy(headers) or {} headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) if resume_size > 0: headers["Range"] = f"bytes={resume_size:d}-" response = _request_with_retry( method="GET", url=url, stream=True, proxies=proxies, headers=headers, cookies=cookies, max_retries=max_retries, timeout=timeout, ) if response.status_code == 416: # Range not satisfiable return content_length = response.headers.get("Content-Length") total = resume_size + int(content_length) if content_length is not None else None with hf_tqdm( unit="B", unit_scale=True, total=total, initial=resume_size, desc=desc or "Downloading", ) as progress: for chunk in response.iter_content(chunk_size=1024): progress.update(len(chunk)) temp_file.write(chunk) def http_head( url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0 ) -> requests.Response: headers = copy.deepcopy(headers) or {} headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) response = _request_with_retry( method="HEAD", url=url, proxies=proxies, headers=headers, cookies=cookies, allow_redirects=allow_redirects, timeout=timeout, max_retries=max_retries, ) return response def request_etag( url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" ) -> Optional[str]: if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token if urlparse(url).scheme not in ("http", "https"): return None headers = get_authentication_headers_for_url(url, token=token) response = http_head(url, headers=headers, max_retries=3) response.raise_for_status() etag = response.headers.get("ETag") if response.ok else None return etag def get_from_cache( url, cache_dir=None, force_download=False, proxies=None, etag_timeout=100, resume_download=False, user_agent=None, local_files_only=False, use_etag=True, max_retries=0, token=None, use_auth_token="deprecated", ignore_url_params=False, storage_options=None, download_desc=None, ) -> str: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk """ if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token if cache_dir is None: cache_dir = config.HF_DATASETS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) if ignore_url_params: # strip all query parameters and #fragments from the URL cached_url = urljoin(url, urlparse(url).path) else: cached_url = url # additional parameters may be added to the given URL connected = False response = None cookies = None etag = None head_error = None scheme = None # Try a first time to file the file on the local file system without eTag (None) # if we don't ask for 'force_download' then we spare a request filename = hash_url_to_filename(cached_url, etag=None) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download and not use_etag: return cache_path # Prepare headers for authentication headers = get_authentication_headers_for_url(url, token=token) if user_agent is not None: headers["user-agent"] = user_agent # We don't have the file locally or we need an eTag if not local_files_only: scheme = urlparse(url).scheme if scheme == "ftp": connected = ftp_head(url) elif scheme not in ("http", "https"): response = fsspec_head(url, storage_options=storage_options) # s3fs uses "ETag", gcsfs uses "etag" etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None connected = True try: response = http_head( url, allow_redirects=True, proxies=proxies, timeout=etag_timeout, max_retries=max_retries, headers=headers, ) if response.status_code == 200: # ok etag = response.headers.get("ETag") if use_etag else None for k, v in response.cookies.items(): # In some edge cases, we need to get a confirmation token if k.startswith("download_warning") and "drive.google.com" in url: url += "&confirm=" + v cookies = response.cookies connected = True # Fix Google Drive URL to avoid Virus scan warning if "drive.google.com" in url and "confirm=" not in url: url += "&confirm=t" # In some edge cases, head request returns 400 but the connection is actually ok elif ( (response.status_code == 400 and "firebasestorage.googleapis.com" in url) or (response.status_code == 405 and "drive.google.com" in url) or ( response.status_code == 403 and ( re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url) or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url) ) ) or (response.status_code == 403 and "ndownloader.figstatic.com" in url) ): connected = True logger.info(f"Couldn't get ETag version for url {url}") elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None: raise ConnectionError( f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`" ) except (OSError, requests.exceptions.Timeout) as e: # not connected head_error = e pass # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if not connected: if os.path.exists(cache_path) and not force_download: return cache_path if local_files_only: raise FileNotFoundError( f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been" " disabled. To enable file online look-ups, set 'local_files_only' to False." ) elif response is not None and response.status_code == 404: raise FileNotFoundError(f"Couldn't find file at {url}") _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") if head_error is not None: raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") elif response is not None: raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})") else: raise ConnectionError(f"Couldn't reach {url}") # Try a second time filename = hash_url_to_filename(cached_url, etag) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download: return cache_path # From now on, connected is True. # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # Retry in case previously locked processes just enter after the precedent process releases the lock if os.path.exists(cache_path) and not force_download: return cache_path incomplete_path = cache_path + ".incomplete" @contextmanager def temp_file_manager(mode="w+b"): with open(incomplete_path, mode) as f: yield f resume_size = 0 if resume_download: temp_file_manager = partial(temp_file_manager, mode="a+b") if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size # Download to temporary file, then copy to cache path once finished. # Otherwise, you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") # GET file object if scheme == "ftp": ftp_get(url, temp_file) elif scheme not in ("http", "https"): fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc) else: http_get( url, temp_file, proxies=proxies, resume_size=resume_size, headers=headers, cookies=cookies, max_retries=max_retries, desc=download_desc, ) logger.info(f"storing {url} in cache at {cache_path}") shutil.move(temp_file.name, cache_path) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_path, 0o666 & ~umask) logger.info(f"creating metadata file for {cache_path}") meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w", encoding="utf-8") as meta_file: json.dump(meta, meta_file) return cache_path def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) return fn return docstring_decorator def estimate_dataset_size(paths): return sum(path.stat().st_size for path in paths) def readline(f: io.RawIOBase): # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 res = bytearray() while True: b = f.read(1) if not b: break res += b if res.endswith(b"\n"): break return bytes(res)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/info_utils.py
import enum import os from typing import Optional from huggingface_hub.utils import insecure_hashlib from .. import config from .logging import get_logger logger = get_logger(__name__) class VerificationMode(enum.Enum): """`Enum` that specifies which verification checks to run. The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns when generating/downloading a dataset for the first time. The verification modes: | | Verification checks | |---------------------------|------------------------------------------------------------------------------ | | `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder | | | and the validity (number of files, checksums, etc.) of downloaded files | | `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files | | `NO_CHECKS` | None | """ ALL_CHECKS = "all_checks" BASIC_CHECKS = "basic_checks" NO_CHECKS = "no_checks" class ChecksumVerificationException(Exception): """Exceptions during checksums verifications of downloaded files.""" class UnexpectedDownloadedFile(ChecksumVerificationException): """Some downloaded files were not expected.""" class ExpectedMoreDownloadedFiles(ChecksumVerificationException): """Some files were supposed to be downloaded but were not.""" class NonMatchingChecksumError(ChecksumVerificationException): """The downloaded file checksum don't match the expected checksum.""" def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None): if expected_checksums is None: logger.info("Unable to verify checksums.") return if len(set(expected_checksums) - set(recorded_checksums)) > 0: raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums))) if len(set(recorded_checksums) - set(expected_checksums)) > 0: raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums))) bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] for_verification_name = " for " + verification_name if verification_name is not None else "" if len(bad_urls) > 0: raise NonMatchingChecksumError( f"Checksums didn't match{for_verification_name}:\n" f"{bad_urls}\n" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name) class SplitsVerificationException(Exception): """Exceptions during splis verifications""" class UnexpectedSplits(SplitsVerificationException): """The expected splits of the downloaded file is missing.""" class ExpectedMoreSplits(SplitsVerificationException): """Some recorded splits are missing.""" class NonMatchingSplitsSizesError(SplitsVerificationException): """The splits sizes don't match the expected splits sizes.""" def verify_splits(expected_splits: Optional[dict], recorded_splits: dict): if expected_splits is None: logger.info("Unable to verify splits sizes.") return if len(set(expected_splits) - set(recorded_splits)) > 0: raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits))) if len(set(recorded_splits) - set(expected_splits)) > 0: raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits))) bad_splits = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(bad_splits) > 0: raise NonMatchingSplitsSizesError(str(bad_splits)) logger.info("All the splits matched successfully.") def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict: """Compute the file size and the sha256 checksum of a file""" if record_checksum: m = insecure_hashlib.sha256() with open(path, "rb") as f: for chunk in iter(lambda: f.read(1 << 20), b""): m.update(chunk) checksum = m.hexdigest() else: checksum = None return {"num_bytes": os.path.getsize(path), "checksum": checksum} def is_small_dataset(dataset_size): """Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. Args: dataset_size (int): Dataset size in bytes. Returns: bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. """ if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/extract.py
import bz2 import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from ._filelock import FileLock from .logging import get_logger logger = get_logger(__name__) class ExtractManager: def __init__(self, cache_dir: Optional[str] = None): self.extract_dir = ( os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH ) self.extractor = Extractor def _get_output_path(self, path: str) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" abs_path = os.path.abspath(path) return os.path.join(self.extract_dir, hash_url_to_filename(abs_path)) def _do_extract(self, output_path: str, force_extract: bool) -> bool: return force_extract or ( not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path)) ) def extract(self, input_path: str, force_extract: bool = False) -> str: extractor_format = self.extractor.infer_extractor_format(input_path) if not extractor_format: return input_path output_path = self._get_output_path(input_path) if self._do_extract(output_path, force_extract): self.extractor.extract(input_path, output_path, extractor_format) return output_path class BaseExtractor(ABC): @classmethod @abstractmethod def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: ... @staticmethod @abstractmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: ... class MagicNumberBaseExtractor(BaseExtractor, ABC): magic_numbers: List[bytes] = [] @staticmethod def read_magic_number(path: Union[Path, str], magic_number_length: int): with open(path, "rb") as f: return f.read(magic_number_length) @classmethod def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: if not magic_number: magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers) try: magic_number = cls.read_magic_number(path, magic_number_length) except OSError: return False return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers) class TarExtractor(BaseExtractor): @classmethod def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: return tarfile.is_tarfile(path) @staticmethod def safemembers(members, output_path): """ Fix for CVE-2007-4559 Desc: Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot) sequence in filenames in a TAR archive, a related issue to CVE-2001-1267. See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559 From: https://stackoverflow.com/a/10077309 """ def resolved(path: str) -> str: return os.path.realpath(os.path.abspath(path)) def badpath(path: str, base: str) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(base, path)).startswith(base) def badlink(info, base: str) -> bool: # Links are interpreted relative to the directory containing the link tip = resolved(os.path.join(base, os.path.dirname(info.name))) return badpath(info.linkname, base=tip) base = resolved(output_path) for finfo in members: if badpath(finfo.name, base): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)") elif finfo.issym() and badlink(finfo, base): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}") elif finfo.islnk() and badlink(finfo, base): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}") else: yield finfo @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: os.makedirs(output_path, exist_ok=True) tar_file = tarfile.open(input_path) tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path)) tar_file.close() class GzipExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x1F\x8B"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with gzip.open(input_path, "rb") as gzip_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(gzip_file, extracted_file) class ZipExtractor(MagicNumberBaseExtractor): magic_numbers = [ b"PK\x03\x04", b"PK\x05\x06", # empty archive b"PK\x07\x08", # spanned archive ] @classmethod def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: if super().is_extractable(path, magic_number=magic_number): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(path, "rb") as fp: endrec = _EndRecData(fp) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: data = fp.read(sizeCentralDir) # CD is where we expect it to be if len(data) == sizeCentralDir: centdir = struct.unpack(structCentralDir, data) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: os.makedirs(output_path, exist_ok=True) with zipfile.ZipFile(input_path, "r") as zip_file: zip_file.extractall(output_path) zip_file.close() class XzExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\xFD\x37\x7A\x58\x5A\x00"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with lzma.open(input_path) as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class RarExtractor(MagicNumberBaseExtractor): magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile") import rarfile os.makedirs(output_path, exist_ok=True) rf = rarfile.RarFile(input_path) rf.extractall(output_path) rf.close() class ZstdExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x28\xb5\x2F\xFD"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard") import zstandard as zstd dctx = zstd.ZstdDecompressor() with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh: dctx.copy_stream(ifh, ofh) class Bzip2Extractor(MagicNumberBaseExtractor): magic_numbers = [b"\x42\x5A\x68"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with bz2.open(input_path, "rb") as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class SevenZipExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x37\x7A\xBC\xAF\x27\x1C"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr") import py7zr os.makedirs(output_path, exist_ok=True) with py7zr.SevenZipFile(input_path, "r") as archive: archive.extractall(output_path) class Lz4Extractor(MagicNumberBaseExtractor): magic_numbers = [b"\x04\x22\x4D\x18"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4") import lz4.frame with lz4.frame.open(input_path, "rb") as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class Extractor: # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) extractors: Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": Bzip2Extractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": Lz4Extractor, # <Added version="2.4.0"/> } @classmethod def _get_magic_number_max_length(cls): return max( len(extractor_magic_number) for extractor in cls.extractors.values() if issubclass(extractor, MagicNumberBaseExtractor) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _read_magic_number(path: Union[Path, str], magic_number_length: int): try: return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length) except OSError: return b"" @classmethod def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead.", category=FutureWarning, ) extractor_format = cls.infer_extractor_format(path) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def infer_extractor_format(cls, path: Union[Path, str]) -> str: # <Added version="2.4.0"/> magic_number_max_length = cls._get_magic_number_max_length() magic_number = cls._read_magic_number(path, magic_number_max_length) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(path, magic_number=magic_number): return extractor_format @classmethod def extract( cls, input_path: Union[Path, str], output_path: Union[Path, str], extractor_format: Optional[str] = None, # <Added version="2.4.0"/> extractor: Optional[BaseExtractor] = "deprecated", ) -> None: os.makedirs(os.path.dirname(output_path), exist_ok=True) # Prevent parallel extractions lock_path = str(Path(output_path).with_suffix(".lock")) with FileLock(lock_path): shutil.rmtree(output_path, ignore_errors=True) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(extractor_format, str): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead.", category=FutureWarning, ) extractor = extractor if extractor != "deprecated" else extractor_format else: extractor = cls.extractors[extractor_format] return extractor.extract(input_path, output_path) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0.", category=FutureWarning, ) for extractor in cls.extractors.values(): if extractor.is_extractable(input_path): return extractor.extract(input_path, output_path)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/download_manager.py
# deprecated, please use datasets.download.download_manager
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/patching.py
from importlib import import_module from .logging import get_logger logger = get_logger(__name__) class _PatchedModuleObj: """Set all the modules components as attributes of the _PatchedModuleObj object.""" def __init__(self, module, attrs=None): attrs = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__"): setattr(self, key, getattr(module, key)) self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module class patch_submodule: """ Patch a submodule attribute of an object, by keeping all other submodules intact at all levels. Example:: >>> import importlib >>> from datasets.load import dataset_module_factory >>> from datasets.streaming import patch_submodule, xjoin >>> >>> dataset_module = dataset_module_factory("snli") >>> snli_module = importlib.import_module(dataset_module.module_path) >>> patcher = patch_submodule(snli_module, "os.path.join", xjoin) >>> patcher.start() >>> assert snli_module.os.path.join is xjoin """ _active_patches = [] def __init__(self, obj, target: str, new, attrs=None): self.obj = obj self.target = target self.new = new self.key = target.split(".")[0] self.original = {} self.attrs = attrs or [] def __enter__(self): *submodules, target_attr = self.target.split(".") # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(submodules)): try: submodule = import_module(".".join(submodules[: i + 1])) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): obj_attr = getattr(self.obj, attr) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule ): self.original[attr] = obj_attr # patch at top level setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs)) patched = getattr(self.obj, attr) # construct lower levels patches for key in submodules[i + 1 :]: setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs)) patched = getattr(patched, key) # finally set the target attribute setattr(patched, target_attr, self.new) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: attr_value = getattr(import_module(".".join(submodules)), target_attr) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj, attr) is attr_value: self.original[attr] = getattr(self.obj, attr) setattr(self.obj, attr, self.new) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" self.original[target_attr] = globals()["__builtins__"][target_attr] setattr(self.obj, target_attr, self.new) else: raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.") def __exit__(self, *exc_info): for attr in list(self.original): setattr(self.obj, attr, self.original.pop(attr)) def start(self): """Activate a patch.""" self.__enter__() self._active_patches.append(self) def stop(self): """Stop an active patch.""" try: self._active_patches.remove(self) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/typing.py
import os from typing import Dict, List, Tuple, TypeVar, Union T = TypeVar("T") ListLike = Union[List[T], Tuple[T, ...]] NestedDataStructureLike = Union[T, List[T], Dict[str, T]] PathLike = Union[str, bytes, os.PathLike]
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/__init__.py
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa # Lint as: python3 from . import tqdm as _tqdm # _tqdm is the module from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental from .tqdm import ( disable_progress_bars, enable_progress_bars, are_progress_bars_disabled, tqdm, )
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/_filelock.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """Utilities to handle file locking in `datasets`.""" import os from filelock import FileLock as FileLock_ class FileLock(FileLock_): """ A `filelock.FileLock` initializer that handles long paths. """ MAX_FILENAME_LENGTH = 255 def __init__(self, lock_file, *args, **kwargs): lock_file = self.hash_filename_if_too_long(lock_file) super().__init__(lock_file, *args, **kwargs) @classmethod def hash_filename_if_too_long(cls, path: str) -> str: filename = os.path.basename(path) if len(filename) > cls.MAX_FILENAME_LENGTH: dirname = os.path.dirname(path) hashed_filename = str(hash(filename)) new_filename = ( filename[: cls.MAX_FILENAME_LENGTH - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock" ) return os.path.join(dirname, new_filename) else: return path
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/utils/version.py
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Version utils.""" import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _VERSION_REG = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$") @total_ordering @dataclass class Version: """Dataset version `MAJOR.MINOR.PATCH`. Args: version_str (`str`): The dataset version. description (`str`): A description of what is new in this version. major (`str`): minor (`str`): patch (`str`): Example: ```py >>> VERSION = datasets.Version("1.0.0") ``` """ version_str: str description: Optional[str] = None major: Optional[Union[str, int]] = None minor: Optional[Union[str, int]] = None patch: Optional[Union[str, int]] = None def __post_init__(self): self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str) def __repr__(self): return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def tuple(self): return self.major, self.minor, self.patch def _validate_operand(self, other): if isinstance(other, str): return Version(other) elif isinstance(other, Version): return other raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.") def __eq__(self, other): try: other = self._validate_operand(other) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__(self, other): other = self._validate_operand(other) return self.tuple < other.tuple def __hash__(self): return hash(_version_tuple_to_str(self.tuple)) @classmethod def from_dict(cls, dic): field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _to_yaml_string(self) -> str: return self.version_str def _str_to_version_tuple(version_str): """Return the tuple (major, minor, patch) version extracted from the str.""" res = _VERSION_REG.match(version_str) if not res: raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.") return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")]) def _version_tuple_to_str(version_tuple): """Return the str version from the version tuple (major, minor, patch).""" return ".".join(str(v) for v in version_tuple)
0
hf_public_repos/datasets/src/datasets/utils
hf_public_repos/datasets/src/datasets/utils/resources/readme_structure.yaml
name: "" # Filename comes here allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null # meaning it should not be checked. - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: true allow_empty_text: true subsections: null - name: "Dataset Structure" allow_empty: false allow_empty_text: true subsections: - name: "Data Instances" allow_empty: false allow_empty_text: true subsections: null - name: "Data Fields" allow_empty: false allow_empty_text: true subsections: null - name: "Data Splits" allow_empty: false allow_empty_text: true subsections: null - name: "Dataset Creation" allow_empty: false allow_empty_text: true subsections: - name: "Curation Rationale" allow_empty: true allow_empty_text: true subsections: null - name: "Source Data" allow_empty: false allow_empty_text: true subsections: - name: "Initial Data Collection and Normalization" allow_empty: true allow_empty_text: true subsections: null - name: "Who are the source language producers?" allow_empty: true allow_empty_text: true subsections: null - name: "Annotations" allow_empty: false allow_empty_text: true subsections: - name: "Annotation process" allow_empty: true allow_empty_text: true subsections: null - name: "Who are the annotators?" allow_empty: true allow_empty_text: true subsections: null - name: "Personal and Sensitive Information" allow_empty: true allow_empty_text: true subsections: null - name: "Considerations for Using the Data" allow_empty: true allow_empty_text: true subsections: - name: "Social Impact of Dataset" allow_empty: true allow_empty_text: true subsections: null - name: "Discussion of Biases" allow_empty: true allow_empty_text: true subsections: null - name: "Other Known Limitations" allow_empty: true allow_empty_text: true subsections: null - name: "Additional Information" allow_empty: true allow_empty_text: true subsections: - name: "Dataset Curators" allow_empty: true allow_empty_text: true subsections: null - name: "Licensing Information" allow_empty: true allow_empty_text: true subsections: null - name: "Citation Information" allow_empty: false allow_empty_text: true subsections: null - name: "Contributions" allow_empty: false allow_empty_text: false subsections: null
0
hf_public_repos/datasets/src/datasets/utils
hf_public_repos/datasets/src/datasets/utils/resources/multilingualities.json
{ "monolingual": "contains a single language", "multilingual": "contains multiple languages", "translation": "contains translated or aligned text", "other": "other type of language distribution" }
0
hf_public_repos/datasets/src/datasets/utils
hf_public_repos/datasets/src/datasets/utils/resources/size_categories.json
[ "unknown", "n<1K", "1K<n<10K", "10K<n<100K", "100K<n<1M", "1M<n<10M", "10M<n<100M", "100M<n<1B", "1B<n<10B", "10B<n<100B", "100B<n<1T", "n>1T" ]
0
hf_public_repos/datasets/src/datasets/utils
hf_public_repos/datasets/src/datasets/utils/resources/creators.json
{ "language": [ "found", "crowdsourced", "expert-generated", "machine-generated", "other" ], "annotations": [ "found", "crowdsourced", "expert-generated", "machine-generated", "no-annotation", "other" ] }
0
hf_public_repos/datasets/src/datasets/utils
hf_public_repos/datasets/src/datasets/utils/resources/languages.json
{ "code": "Programming language (C++, Java, Javascript, Python, etc.)", "aa": "Afar", "aaa": "Ghotuo", "aab": "Alumu-Tesu", "aac": "Ari", "aad": "Amal", "aae": "Arbëreshë Albanian", "aaf": "Aranadan", "aag": "Ambrak", "aah": "Abu' Arapesh", "aai": "Arifama-Miniafia", "aak": "Ankave", "aal": "Afade", "aan": "Anambé", "aao": "Algerian Saharan Arabic", "aap": "Pará Arára", "aaq": "Eastern Abnaki", "aas": "Aasáx", "aat": "Arvanitika Albanian", "aau": "Abau", "aav": "Austro-Asiatic languages", "aaw": "Solong", "aax": "Mandobo Atas", "aaz": "Amarasi", "ab": "Abkhazian", "aba": "Abé", "abb": "Bankon", "abc": "Ambala Ayta", "abd": "Manide", "abe": "Western Abnaki", "abf": "Abai Sungai", "abg": "Abaga", "abh": "Tajiki Arabic", "abi": "Abidji", "abj": "Aka-Bea", "abl": "Lampung Nyo", "abm": "Abanyom", "abn": "Abua", "abo": "Abon", "abp": "Abellen Ayta", "abq": "Abaza", "abr": "Abron", "abs": "Ambonese Malay", "abt": "Ambulas", "abu": "Abure", "abv": "Baharna Arabic", "abw": "Pal", "abx": "Inabaknon", "aby": "Aneme Wake", "abz": "Abui", "aca": "Achagua", "acb": "Áncá", "acd": "Gikyode", "ace": "Achinese", "acf": "Saint Lucian Creole French", "ach": "Acoli", "aci": "Aka-Cari", "ack": "Aka-Kora", "acl": "Akar-Bale", "acm": "Mesopotamian Arabic", "acn": "Achang", "acp": "Eastern Acipa", "acq": "Ta'izzi-Adeni Arabic", "acr": "Achi", "acs": "Acroá", "act": "Achterhoeks", "acu": "Achuar-Shiwiar", "acv": "Achumawi", "acw": "Hijazi Arabic", "acx": "Omani Arabic", "acy": "Cypriot Arabic", "acz": "Acheron", "ada": "Adangme", "adb": "Atauran", "add": "Lidzonka; Dzodinka", "ade": "Adele", "adf": "Dhofari Arabic", "adg": "Andegerebinha", "adh": "Adhola", "adi": "Adi", "adj": "Adioukrou", "adl": "Galo", "adn": "Adang", "ado": "Abu", "adq": "Adangbe", "adr": "Adonara", "ads": "Adamorobe Sign Language", "adt": "Adnyamathanha", "adu": "Aduge", "adw": "Amundava", "adx": "Amdo Tibetan", "ady": "Adyghe; Adygei", "adz": "Adzera", "ae": "Avestan", "aea": "Areba", "aeb": "Tunisian Arabic", "aec": "Saidi Arabic", "aed": "Argentine Sign Language", "aee": "Northeast Pashai; Northeast Pashayi", "aek": "Haeke", "ael": "Ambele", "aem": "Arem", "aen": "Armenian Sign Language", "aeq": "Aer", "aer": "Eastern Arrernte", "aes": "Alsea", "aeu": "Akeu", "aew": "Ambakich", "aey": "Amele", "aez": "Aeka", "af": "Afrikaans", "afa": "Afro-Asiatic languages", "afb": "Gulf Arabic", "afd": "Andai", "afe": "Putukwam", "afg": "Afghan Sign Language", "afh": "Afrihili", "afi": "Akrukay; Chini", "afk": "Nanubae", "afn": "Defaka", "afo": "Eloyi", "afp": "Tapei", "afs": "Afro-Seminole Creole", "aft": "Afitti", "afu": "Awutu", "afz": "Obokuitai", "aga": "Aguano", "agb": "Legbo", "agc": "Agatu", "agd": "Agarabi", "age": "Angal", "agf": "Arguni", "agg": "Angor", "agh": "Ngelima", "agi": "Agariya", "agj": "Argobba", "agk": "Isarog Agta", "agl": "Fembe", "agm": "Angaataha", "agn": "Agutaynen", "ago": "Tainae", "agq": "Aghem", "agr": "Aguaruna", "ags": "Esimbi", "agt": "Central Cagayan Agta", "agu": "Aguacateco", "agv": "Remontado Dumagat", "agw": "Kahua", "agx": "Aghul", "agy": "Southern Alta", "agz": "Mt. Iriga Agta", "aha": "Ahanta", "ahb": "Axamb", "ahg": "Qimant", "ahh": "Aghu", "ahi": "Tiagbamrin Aizi", "ahk": "Akha", "ahl": "Igo", "ahm": "Mobumrin Aizi", "ahn": "Àhàn", "aho": "Ahom", "ahp": "Aproumu Aizi", "ahr": "Ahirani", "ahs": "Ashe", "aht": "Ahtena", "aia": "Arosi", "aib": "Ainu (China)", "aic": "Ainbai", "aid": "Alngith", "aie": "Amara", "aif": "Agi", "aig": "Antigua and Barbuda Creole English", "aih": "Ai-Cham", "aii": "Assyrian Neo-Aramaic", "aij": "Lishanid Noshan", "aik": "Ake", "ail": "Aimele", "aim": "Aimol", "ain": "Ainu (Japan)", "aio": "Aiton", "aip": "Burumakok", "aiq": "Aimaq", "air": "Airoran", "ait": "Arikem", "aiw": "Aari", "aix": "Aighon", "aiy": "Ali", "aja": "Aja (South Sudan)", "ajg": "Aja (Benin)", "aji": "Ajië", "ajn": "Andajin", "ajp": "South Levantine Arabic", "ajs": "Algerian Jewish Sign Language", "aju": "Judeo-Moroccan Arabic", "ajw": "Ajawa", "ajz": "Amri Karbi", "ak": "Akan", "akb": "Batak Angkola", "akc": "Mpur", "akd": "Ukpet-Ehom", "ake": "Akawaio", "akf": "Akpa", "akg": "Anakalangu", "akh": "Angal Heneng", "aki": "Aiome", "akj": "Aka-Jeru", "akk": "Akkadian", "akl": "Aklanon", "akm": "Aka-Bo", "ako": "Akurio", "akp": "Siwu", "akq": "Ak", "akr": "Araki", "aks": "Akaselem", "akt": "Akolet", "aku": "Akum", "akv": "Akhvakh", "akw": "Akwa", "akx": "Aka-Kede", "aky": "Aka-Kol", "akz": "Alabama", "ala": "Alago", "alc": "Qawasqar", "ald": "Alladian", "ale": "Aleut", "alf": "Alege", "alg": "Algonquian languages", "alh": "Alawa", "ali": "Amaimon", "alj": "Alangan", "alk": "Alak", "all": "Allar", "alm": "Amblong", "aln": "Gheg Albanian", "alo": "Larike-Wakasihu", "alp": "Alune", "alq": "Algonquin", "alr": "Alutor", "als": "Tosk Albanian", "alt": "Southern Altai", "alu": "'Are'are", "alv": "Atlantic-Congo languages", "alw": "Alaba-K’abeena; Wanbasana", "alx": "Amol", "aly": "Alyawarr", "alz": "Alur", "am": "Amharic", "ama": "Amanayé", "amb": "Ambo", "amc": "Amahuaca", "ame": "Yanesha'", "amf": "Hamer-Banna", "amg": "Amurdak", "ami": "Amis", "amj": "Amdang", "amk": "Ambai", "aml": "War-Jaintia", "amm": "Ama (Papua New Guinea)", "amn": "Amanab", "amo": "Amo", "amp": "Alamblak", "amq": "Amahai", "amr": "Amarakaeri", "ams": "Southern Amami-Oshima", "amt": "Amto", "amu": "Guerrero Amuzgo", "amv": "Ambelau", "amw": "Western Neo-Aramaic", "amx": "Anmatyerre", "amy": "Ami", "amz": "Atampaya", "an": "Aragonese", "ana": "Andaqui", "anb": "Andoa", "anc": "Ngas", "and": "Ansus", "ane": "Xârâcùù", "anf": "Animere", "ang": "Old English (ca. 450-1100)", "anh": "Nend", "ani": "Andi", "anj": "Anor", "ank": "Goemai", "anl": "Anu-Hkongso Chin", "anm": "Anal", "ann": "Obolo", "ano": "Andoque", "anp": "Angika", "anq": "Jarawa (India)", "anr": "Andh", "ans": "Anserma", "ant": "Antakarinya; Antikarinya", "anu": "Anuak", "anv": "Denya", "anw": "Anaang", "anx": "Andra-Hus", "any": "Anyin", "anz": "Anem", "aoa": "Angolar", "aob": "Abom", "aoc": "Pemon", "aod": "Andarum", "aoe": "Angal Enen", "aof": "Bragat", "aog": "Angoram", "aoi": "Anindilyakwa", "aoj": "Mufian", "aok": "Arhö", "aol": "Alor", "aom": "Ömie", "aon": "Bumbita Arapesh", "aor": "Aore", "aos": "Taikat", "aot": "Atong (India); A'tong", "aou": "A'ou", "aox": "Atorada", "aoz": "Uab Meto", "apa": "Apache languages", "apb": "Sa'a", "apc": "North Levantine Arabic", "apd": "Sudanese Arabic", "ape": "Bukiyip", "apf": "Pahanan Agta", "apg": "Ampanang", "aph": "Athpariya", "api": "Apiaká", "apj": "Jicarilla Apache", "apk": "Kiowa Apache", "apl": "Lipan Apache", "apm": "Mescalero-Chiricahua Apache", "apn": "Apinayé", "apo": "Ambul", "app": "Apma", "apq": "A-Pucikwar", "apr": "Arop-Lokep", "aps": "Arop-Sissano", "apt": "Apatani", "apu": "Apurinã", "apv": "Alapmunte", "apw": "Western Apache", "apx": "Aputai", "apy": "Apalaí", "apz": "Safeyoka", "aqa": "Alacalufan languages", "aqc": "Archi", "aqd": "Ampari Dogon", "aqg": "Arigidi", "aqk": "Aninka", "aql": "Algic languages", "aqm": "Atohwaim", "aqn": "Northern Alta", "aqp": "Atakapa", "aqr": "Arhâ", "aqt": "Angaité", "aqz": "Akuntsu", "ar": "Arabic", "arb": "Standard Arabic", "arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)", "ard": "Arabana", "are": "Western Arrarnta", "arh": "Arhuaco", "ari": "Arikara", "arj": "Arapaso", "ark": "Arikapú", "arl": "Arabela", "arn": "Mapudungun; Mapuche", "aro": "Araona", "arp": "Arapaho", "arq": "Algerian Arabic", "arr": "Karo (Brazil)", "ars": "Najdi Arabic", "art": "Artificial languages", "aru": "Aruá (Amazonas State); Arawá", "arv": "Arbore", "arw": "Arawak", "arx": "Aruá (Rodonia State)", "ary": "Moroccan Arabic", "arz": "Egyptian Arabic", "as": "Assamese", "asa": "Asu (Tanzania)", "asb": "Assiniboine", "asc": "Casuarina Coast Asmat", "ase": "American Sign Language", "asf": "Auslan; Australian Sign Language", "asg": "Cishingini", "ash": "Abishira", "asi": "Buruwai", "asj": "Sari", "ask": "Ashkun", "asl": "Asilulu", "asn": "Xingú Asuriní", "aso": "Dano", "asp": "Algerian Sign Language", "asq": "Austrian Sign Language", "asr": "Asuri", "ass": "Ipulo", "ast": "Asturian; Asturleonese; Bable; Leonese", "asu": "Tocantins Asurini", "asv": "Asoa", "asw": "Australian Aborigines Sign Language", "asx": "Muratayak", "asy": "Yaosakor Asmat", "asz": "As", "ata": "Pele-Ata", "atb": "Zaiwa", "atc": "Atsahuaca", "atd": "Ata Manobo", "ate": "Atemble", "atg": "Ivbie North-Okpela-Arhe", "ath": "Athapascan languages", "ati": "Attié", "atj": "Atikamekw", "atk": "Ati", "atl": "Mt. Iraya Agta", "atm": "Ata", "atn": "Ashtiani", "ato": "Atong (Cameroon)", "atp": "Pudtol Atta", "atq": "Aralle-Tabulahan", "atr": "Waimiri-Atroari", "ats": "Gros Ventre", "att": "Pamplona Atta", "atu": "Reel", "atv": "Northern Altai", "atw": "Atsugewi", "atx": "Arutani", "aty": "Aneityum", "atz": "Arta", "aua": "Asumboa", "aub": "Alugu", "auc": "Waorani", "aud": "Anuta", "auf": "Arauan languages", "aug": "Aguna", "auh": "Aushi", "aui": "Anuki", "auj": "Awjilah", "auk": "Heyo", "aul": "Aulua", "aum": "Asu (Nigeria)", "aun": "Molmo One", "auo": "Auyokawa", "aup": "Makayam", "auq": "Anus; Korur", "aur": "Aruek", "aus": "Australian languages", "aut": "Austral", "auu": "Auye", "auw": "Awyi", "aux": "Aurá", "auy": "Awiyaana", "auz": "Uzbeki Arabic", "av": "Avaric", "avb": "Avau", "avd": "Alviri-Vidari", "avi": "Avikam", "avk": "Kotava", "avl": "Eastern Egyptian Bedawi Arabic", "avm": "Angkamuthi", "avn": "Avatime", "avo": "Agavotaguerra", "avs": "Aushiri", "avt": "Au", "avu": "Avokaya", "avv": "Avá-Canoeiro", "awa": "Awadhi", "awb": "Awa (Papua New Guinea)", "awc": "Cicipu", "awd": "Arawakan languages", "awe": "Awetí", "awg": "Anguthimri", "awh": "Awbono", "awi": "Aekyom", "awk": "Awabakal", "awm": "Arawum", "awn": "Awngi", "awo": "Awak", "awr": "Awera", "aws": "South Awyu", "awt": "Araweté", "awu": "Central Awyu", "awv": "Jair Awyu", "aww": "Awun", "awx": "Awara", "awy": "Edera Awyu", "axb": "Abipon", "axe": "Ayerrerenge", "axg": "Mato Grosso Arára", "axk": "Yaka (Central African Republic)", "axl": "Lower Southern Aranda", "axm": "Middle Armenian", "axx": "Xârâgurè", "ay": "Aymara", "aya": "Awar", "ayb": "Ayizo Gbe", "ayc": "Southern Aymara", "ayd": "Ayabadhu", "aye": "Ayere", "ayg": "Ginyanga", "ayh": "Hadrami Arabic", "ayi": "Leyigha", "ayk": "Akuku", "ayl": "Libyan Arabic", "ayn": "Sanaani Arabic", "ayo": "Ayoreo", "ayp": "North Mesopotamian Arabic", "ayq": "Ayi (Papua New Guinea)", "ayr": "Central Aymara", "ays": "Sorsogon Ayta", "ayt": "Magbukun Ayta", "ayu": "Ayu", "ayz": "Mai Brat", "az": "Azerbaijani", "aza": "Azha", "azb": "South Azerbaijani", "azc": "Uto-Aztecan languages", "azd": "Eastern Durango Nahuatl", "azg": "San Pedro Amuzgos Amuzgo", "azj": "North Azerbaijani", "azm": "Ipalapa Amuzgo", "azn": "Western Durango Nahuatl", "azo": "Awing", "azt": "Faire Atta", "azz": "Highland Puebla Nahuatl", "ba": "Bashkir", "baa": "Babatana", "bab": "Bainouk-Gunyuño", "bac": "Badui", "bad": "Banda languages", "bae": "Baré", "baf": "Nubaca", "bag": "Tuki", "bah": "Bahamas Creole English", "bai": "Bamileke languages", "baj": "Barakai", "bal": "Baluchi", "ban": "Balinese", "bao": "Waimaha", "bap": "Bantawa", "bar": "Bavarian", "bas": "Basa (Cameroon)", "bat": "Baltic languages", "bau": "Bada (Nigeria)", "bav": "Vengo", "baw": "Bambili-Bambui", "bax": "Bamun", "bay": "Batuley", "bba": "Baatonum", "bbb": "Barai", "bbc": "Batak Toba", "bbd": "Bau", "bbe": "Bangba", "bbf": "Baibai", "bbg": "Barama", "bbh": "Bugan", "bbi": "Barombi", "bbj": "Ghomálá'", "bbk": "Babanki", "bbl": "Bats", "bbm": "Babango", "bbn": "Uneapa", "bbo": "Northern Bobo Madaré; Konabéré", "bbp": "West Central Banda", "bbq": "Bamali", "bbr": "Girawa", "bbs": "Bakpinka", "bbt": "Mburku", "bbu": "Kulung (Nigeria)", "bbv": "Karnai", "bbw": "Baba", "bbx": "Bubia", "bby": "Befang", "bca": "Central Bai", "bcb": "Bainouk-Samik", "bcc": "Southern Balochi", "bcd": "North Babar", "bce": "Bamenyam", "bcf": "Bamu", "bcg": "Baga Pokur", "bch": "Bariai", "bci": "Baoulé", "bcj": "Bardi", "bck": "Bunuba", "bcl": "Central Bikol", "bcm": "Bannoni", "bcn": "Bali (Nigeria)", "bco": "Kaluli", "bcp": "Bali (Democratic Republic of Congo)", "bcq": "Bench", "bcr": "Babine", "bcs": "Kohumono", "bct": "Bendi", "bcu": "Awad Bing", "bcv": "Shoo-Minda-Nye", "bcw": "Bana", "bcy": "Bacama", "bcz": "Bainouk-Gunyaamolo", "bda": "Bayot", "bdb": "Basap", "bdc": "Emberá-Baudó", "bdd": "Bunama", "bde": "Bade", "bdf": "Biage", "bdg": "Bonggi", "bdh": "Baka (South Sudan)", "bdi": "Burun", "bdj": "Bai (South Sudan); Bai", "bdk": "Budukh", "bdl": "Indonesian Bajau", "bdm": "Buduma", "bdn": "Baldemu", "bdo": "Morom", "bdp": "Bende", "bdq": "Bahnar", "bdr": "West Coast Bajau", "bds": "Burunge", "bdt": "Bokoto", "bdu": "Oroko", "bdv": "Bodo Parja", "bdw": "Baham", "bdx": "Budong-Budong", "bdy": "Bandjalang", "bdz": "Badeshi", "be": "Belarusian", "bea": "Beaver", "beb": "Bebele", "bec": "Iceve-Maci", "bed": "Bedoanas", "bee": "Byangsi", "bef": "Benabena", "beg": "Belait", "beh": "Biali", "bei": "Bekati'", "bej": "Beja; Bedawiyet", "bek": "Bebeli", "bem": "Bemba (Zambia)", "beo": "Beami", "bep": "Besoa", "beq": "Beembe", "ber": "Berber languages", "bes": "Besme", "bet": "Guiberoua Béte", "beu": "Blagar", "bev": "Daloa Bété", "bew": "Betawi", "bex": "Jur Modo", "bey": "Beli (Papua New Guinea)", "bez": "Bena (Tanzania)", "bfa": "Bari", "bfb": "Pauri Bareli", "bfc": "Panyi Bai; Northern Bai", "bfd": "Bafut", "bfe": "Betaf; Tena", "bff": "Bofi", "bfg": "Busang Kayan", "bfh": "Blafe", "bfi": "British Sign Language", "bfj": "Bafanji", "bfk": "Ban Khor Sign Language", "bfl": "Banda-Ndélé", "bfm": "Mmen", "bfn": "Bunak", "bfo": "Malba Birifor", "bfp": "Beba", "bfq": "Badaga", "bfr": "Bazigar", "bfs": "Southern Bai", "bft": "Balti", "bfu": "Gahri", "bfw": "Bondo", "bfx": "Bantayanon", "bfy": "Bagheli", "bfz": "Mahasu Pahari", "bg": "Bulgarian", "bga": "Gwamhi-Wuri", "bgb": "Bobongko", "bgc": "Haryanvi", "bgd": "Rathwi Bareli", "bge": "Bauria", "bgf": "Bangandu", "bgg": "Bugun", "bgi": "Giangan", "bgj": "Bangolan", "bgk": "Bit; Buxinhua", "bgl": "Bo (Laos)", "bgn": "Western Balochi", "bgo": "Baga Koga", "bgp": "Eastern Balochi", "bgq": "Bagri", "bgr": "Bawm Chin", "bgs": "Tagabawa", "bgt": "Bughotu", "bgu": "Mbongno", "bgv": "Warkay-Bipim", "bgw": "Bhatri", "bgx": "Balkan Gagauz Turkish", "bgy": "Benggoi", "bgz": "Banggai", "bh": "Bihari languages", "bha": "Bharia", "bhb": "Bhili", "bhc": "Biga", "bhd": "Bhadrawahi", "bhe": "Bhaya", "bhf": "Odiai", "bhg": "Binandere", "bhh": "Bukharic", "bhi": "Bhilali", "bhj": "Bahing", "bhl": "Bimin", "bhm": "Bathari", "bhn": "Bohtan Neo-Aramaic", "bho": "Bhojpuri", "bhp": "Bima", "bhq": "Tukang Besi South", "bhr": "Bara Malagasy", "bhs": "Buwal", "bht": "Bhattiyali", "bhu": "Bhunjia", "bhv": "Bahau", "bhw": "Biak", "bhx": "Bhalay", "bhy": "Bhele", "bhz": "Bada (Indonesia)", "bi": "Bislama", "bia": "Badimaya", "bib": "Bissa; Bisa", "bid": "Bidiyo", "bie": "Bepour", "bif": "Biafada", "big": "Biangai", "bik": "Bikol", "bil": "Bile", "bim": "Bimoba", "bin": "Bini; Edo", "bio": "Nai", "bip": "Bila", "biq": "Bipi", "bir": "Bisorio", "bit": "Berinomo", "biu": "Biete", "biv": "Southern Birifor", "biw": "Kol (Cameroon)", "bix": "Bijori", "biy": "Birhor", "biz": "Baloi", "bja": "Budza", "bjb": "Banggarla", "bjc": "Bariji", "bje": "Biao-Jiao Mien", "bjf": "Barzani Jewish Neo-Aramaic", "bjg": "Bidyogo", "bjh": "Bahinemo", "bji": "Burji", "bjj": "Kanauji", "bjk": "Barok", "bjl": "Bulu (Papua New Guinea)", "bjm": "Bajelani", "bjn": "Banjar", "bjo": "Mid-Southern Banda", "bjp": "Fanamaket", "bjr": "Binumarien", "bjs": "Bajan", "bjt": "Balanta-Ganja", "bju": "Busuu", "bjv": "Bedjond", "bjw": "Bakwé", "bjx": "Banao Itneg", "bjy": "Bayali", "bjz": "Baruga", "bka": "Kyak", "bkc": "Baka (Cameroon)", "bkd": "Binukid; Talaandig", "bkf": "Beeke", "bkg": "Buraka", "bkh": "Bakoko", "bki": "Baki", "bkj": "Pande", "bkk": "Brokskat", "bkl": "Berik", "bkm": "Kom (Cameroon)", "bkn": "Bukitan", "bko": "Kwa'", "bkp": "Boko (Democratic Republic of Congo)", "bkq": "Bakairí", "bkr": "Bakumpai", "bks": "Northern Sorsoganon", "bkt": "Boloki", "bku": "Buhid", "bkv": "Bekwarra", "bkw": "Bekwel", "bkx": "Baikeno", "bky": "Bokyi", "bkz": "Bungku", "bla": "Siksika", "blb": "Bilua", "blc": "Bella Coola", "bld": "Bolango", "ble": "Balanta-Kentohe", "blf": "Buol", "blh": "Kuwaa", "bli": "Bolia", "blj": "Bolongan", "blk": "Pa'o Karen; Pa'O", "bll": "Biloxi", "blm": "Beli (South Sudan)", "bln": "Southern Catanduanes Bikol", "blo": "Anii", "blp": "Blablanga", "blq": "Baluan-Pam", "blr": "Blang", "bls": "Balaesang", "blt": "Tai Dam", "blv": "Kibala; Bolo", "blw": "Balangao", "blx": "Mag-Indi Ayta", "bly": "Notre", "blz": "Balantak", "bm": "Bambara", "bma": "Lame", "bmb": "Bembe", "bmc": "Biem", "bmd": "Baga Manduri", "bme": "Limassa", "bmf": "Bom-Kim", "bmg": "Bamwe", "bmh": "Kein", "bmi": "Bagirmi", "bmj": "Bote-Majhi", "bmk": "Ghayavi", "bml": "Bomboli", "bmm": "Northern Betsimisaraka Malagasy", "bmn": "Bina (Papua New Guinea)", "bmo": "Bambalang", "bmp": "Bulgebi", "bmq": "Bomu", "bmr": "Muinane", "bms": "Bilma Kanuri", "bmt": "Biao Mon", "bmu": "Somba-Siawari", "bmv": "Bum", "bmw": "Bomwali", "bmx": "Baimak", "bmz": "Baramu", "bn": "Bengali; Bangla", "bna": "Bonerate", "bnb": "Bookan", "bnc": "Bontok", "bnd": "Banda (Indonesia)", "bne": "Bintauna", "bnf": "Masiwang", "bng": "Benga", "bni": "Bangi", "bnj": "Eastern Tawbuid", "bnk": "Bierebo", "bnl": "Boon", "bnm": "Batanga", "bnn": "Bunun", "bno": "Bantoanon", "bnp": "Bola", "bnq": "Bantik", "bnr": "Butmas-Tur", "bns": "Bundeli", "bnt": "Bantu languages", "bnu": "Bentong", "bnv": "Bonerif; Beneraf; Edwas", "bnw": "Bisis", "bnx": "Bangubangu", "bny": "Bintulu", "bnz": "Beezen", "bo": "Tibetan", "boa": "Bora", "bob": "Aweer", "boe": "Mundabli", "bof": "Bolon", "bog": "Bamako Sign Language", "boh": "Boma", "boi": "Barbareño", "boj": "Anjam", "bok": "Bonjo", "bol": "Bole", "bom": "Berom", "bon": "Bine", "boo": "Tiemacèwè Bozo", "bop": "Bonkiman", "boq": "Bogaya", "bor": "Borôro", "bot": "Bongo", "bou": "Bondei", "bov": "Tuwuli", "bow": "Rema", "box": "Buamu", "boy": "Bodo (Central African Republic)", "boz": "Tiéyaxo Bozo", "bpa": "Daakaka", "bpc": "Mbuk", "bpd": "Banda-Banda", "bpe": "Bauni", "bpg": "Bonggo", "bph": "Botlikh", "bpi": "Bagupi", "bpj": "Binji", "bpk": "Orowe; 'Ôrôê", "bpl": "Broome Pearling Lugger Pidgin", "bpm": "Biyom", "bpn": "Dzao Min", "bpo": "Anasi", "bpp": "Kaure", "bpq": "Banda Malay", "bpr": "Koronadal Blaan", "bps": "Sarangani Blaan", "bpt": "Barrow Point", "bpu": "Bongu", "bpv": "Bian Marind", "bpw": "Bo (Papua New Guinea)", "bpx": "Palya Bareli", "bpy": "Bishnupriya", "bpz": "Bilba", "bqa": "Tchumbuli", "bqb": "Bagusa", "bqc": "Boko (Benin); Boo", "bqd": "Bung", "bqf": "Baga Kaloum", "bqg": "Bago-Kusuntu", "bqh": "Baima", "bqi": "Bakhtiari", "bqj": "Bandial", "bqk": "Banda-Mbrès", "bql": "Bilakura", "bqm": "Wumboko", "bqn": "Bulgarian Sign Language", "bqo": "Balo", "bqp": "Busa", "bqq": "Biritai", "bqr": "Burusu", "bqs": "Bosngun", "bqt": "Bamukumbit", "bqu": "Boguru", "bqv": "Koro Wachi; Begbere-Ejar", "bqw": "Buru (Nigeria)", "bqx": "Baangi", "bqy": "Bengkala Sign Language", "bqz": "Bakaka", "br": "Breton", "bra": "Braj", "brb": "Brao; Lave", "brc": "Berbice Creole Dutch", "brd": "Baraamu", "brf": "Bira", "brg": "Baure", "brh": "Brahui", "bri": "Mokpwe", "brj": "Bieria", "brk": "Birked", "brl": "Birwa", "brm": "Barambu", "brn": "Boruca", "bro": "Brokkat", "brp": "Barapasi", "brq": "Breri", "brr": "Birao", "brs": "Baras", "brt": "Bitare", "bru": "Eastern Bru", "brv": "Western Bru", "brw": "Bellari", "brx": "Bodo (India)", "bry": "Burui", "brz": "Bilbil", "bs": "Bosnian", "bsa": "Abinomn", "bsb": "Brunei Bisaya", "bsc": "Bassari; Oniyan", "bse": "Wushi", "bsf": "Bauchi", "bsg": "Bashkardi", "bsh": "Kati", "bsi": "Bassossi", "bsj": "Bangwinji", "bsk": "Burushaski", "bsl": "Basa-Gumna", "bsm": "Busami", "bsn": "Barasana-Eduria", "bso": "Buso", "bsp": "Baga Sitemu", "bsq": "Bassa", "bsr": "Bassa-Kontagora", "bss": "Akoose", "bst": "Basketo", "bsu": "Bahonsuai", "bsv": "Baga Sobané", "bsw": "Baiso", "bsx": "Yangkam", "bsy": "Sabah Bisaya", "bta": "Bata", "btc": "Bati (Cameroon)", "btd": "Batak Dairi", "bte": "Gamo-Ningi", "btf": "Birgit", "btg": "Gagnoa Bété", "bth": "Biatah Bidayuh", "bti": "Burate", "btj": "Bacanese Malay", "btk": "Batak languages", "btm": "Batak Mandailing", "btn": "Ratagnon", "bto": "Rinconada Bikol", "btp": "Budibud", "btq": "Batek", "btr": "Baetora", "bts": "Batak Simalungun", "btt": "Bete-Bendi", "btu": "Batu", "btv": "Bateri", "btw": "Butuanon", "btx": "Batak Karo", "bty": "Bobot", "btz": "Batak Alas-Kluet", "bua": "Buriat", "bub": "Bua", "buc": "Bushi", "bud": "Ntcham", "bue": "Beothuk", "buf": "Bushoong", "bug": "Buginese", "buh": "Younuo Bunu", "bui": "Bongili", "buj": "Basa-Gurmana", "buk": "Bugawac", "bum": "Bulu (Cameroon)", "bun": "Sherbro", "buo": "Terei", "bup": "Busoa", "buq": "Brem", "bus": "Bokobaru", "but": "Bungain", "buu": "Budu", "buv": "Bun", "buw": "Bubi", "bux": "Boghom", "buy": "Bullom So", "buz": "Bukwen", "bva": "Barein", "bvb": "Bube", "bvc": "Baelelea", "bvd": "Baeggu", "bve": "Berau Malay", "bvf": "Boor", "bvg": "Bonkeng", "bvh": "Bure", "bvi": "Belanda Viri", "bvj": "Baan", "bvk": "Bukat", "bvl": "Bolivian Sign Language", "bvm": "Bamunka", "bvn": "Buna", "bvo": "Bolgo", "bvp": "Bumang", "bvq": "Birri", "bvr": "Burarra", "bvt": "Bati (Indonesia)", "bvu": "Bukit Malay", "bvv": "Baniva", "bvw": "Boga", "bvx": "Dibole", "bvy": "Baybayanon", "bvz": "Bauzi", "bwa": "Bwatoo", "bwb": "Namosi-Naitasiri-Serua", "bwc": "Bwile", "bwd": "Bwaidoka", "bwe": "Bwe Karen", "bwf": "Boselewa", "bwg": "Barwe", "bwh": "Bishuo", "bwi": "Baniwa", "bwj": "Láá Láá Bwamu", "bwk": "Bauwaki", "bwl": "Bwela", "bwm": "Biwat", "bwn": "Wunai Bunu", "bwo": "Boro (Ethiopia); Borna (Ethiopia)", "bwp": "Mandobo Bawah", "bwq": "Southern Bobo Madaré", "bwr": "Bura-Pabir", "bws": "Bomboma", "bwt": "Bafaw-Balong", "bwu": "Buli (Ghana)", "bww": "Bwa", "bwx": "Bu-Nao Bunu", "bwy": "Cwi Bwamu", "bwz": "Bwisi", "bxa": "Tairaha", "bxb": "Belanda Bor", "bxc": "Molengue", "bxd": "Pela", "bxe": "Birale", "bxf": "Bilur; Minigir", "bxg": "Bangala", "bxh": "Buhutu", "bxi": "Pirlatapa", "bxj": "Bayungu", "bxk": "Bukusu; Lubukusu", "bxl": "Jalkunan", "bxm": "Mongolia Buriat", "bxn": "Burduna", "bxo": "Barikanchi", "bxp": "Bebil", "bxq": "Beele", "bxr": "Russia Buriat", "bxs": "Busam", "bxu": "China Buriat", "bxv": "Berakou", "bxw": "Bankagooma", "bxz": "Binahari", "bya": "Batak", "byb": "Bikya", "byc": "Ubaghara", "byd": "Benyadu'", "bye": "Pouye", "byf": "Bete", "byg": "Baygo", "byh": "Bhujel", "byi": "Buyu", "byj": "Bina (Nigeria)", "byk": "Biao", "byl": "Bayono", "bym": "Bidjara", "byn": "Bilin; Blin", "byo": "Biyo", "byp": "Bumaji", "byq": "Basay", "byr": "Baruya; Yipma", "bys": "Burak", "byt": "Berti", "byv": "Medumba", "byw": "Belhariya", "byx": "Qaqet", "byz": "Banaro", "bza": "Bandi", "bzb": "Andio", "bzc": "Southern Betsimisaraka Malagasy", "bzd": "Bribri", "bze": "Jenaama Bozo", "bzf": "Boikin", "bzg": "Babuza", "bzh": "Mapos Buang", "bzi": "Bisu", "bzj": "Belize Kriol English", "bzk": "Nicaragua Creole English", "bzl": "Boano (Sulawesi)", "bzm": "Bolondo", "bzn": "Boano (Maluku)", "bzo": "Bozaba", "bzp": "Kemberano", "bzq": "Buli (Indonesia)", "bzr": "Biri", "bzs": "Brazilian Sign Language", "bzt": "Brithenig", "bzu": "Burmeso", "bzv": "Naami", "bzw": "Basa (Nigeria)", "bzx": "Kɛlɛngaxo Bozo", "bzy": "Obanliku", "bzz": "Evant", "ca": "Catalan; Valencian", "caa": "Chortí", "cab": "Garifuna", "cac": "Chuj", "cad": "Caddo", "cae": "Lehar; Laalaa", "caf": "Southern Carrier", "cag": "Nivaclé", "cah": "Cahuarano", "cai": "Central American Indian languages", "caj": "Chané", "cak": "Kaqchikel; Cakchiquel", "cal": "Carolinian", "cam": "Cemuhî", "can": "Chambri", "cao": "Chácobo", "cap": "Chipaya", "caq": "Car Nicobarese", "car": "Galibi Carib", "cas": "Tsimané", "cau": "Caucasian languages", "cav": "Cavineña", "caw": "Callawalla", "cax": "Chiquitano", "cay": "Cayuga", "caz": "Canichana", "cba": "Chibchan languages", "cbb": "Cabiyarí", "cbc": "Carapana", "cbd": "Carijona", "cbg": "Chimila", "cbi": "Chachi", "cbj": "Ede Cabe", "cbk": "Chavacano", "cbl": "Bualkhaw Chin", "cbn": "Nyahkur", "cbo": "Izora", "cbq": "Tsucuba; Cuba", "cbr": "Cashibo-Cacataibo", "cbs": "Cashinahua", "cbt": "Chayahuita", "cbu": "Candoshi-Shapra", "cbv": "Cacua", "cbw": "Kinabalian", "cby": "Carabayo", "ccc": "Chamicuro", "ccd": "Cafundo Creole", "cce": "Chopi", "ccg": "Samba Daka", "cch": "Atsam", "ccj": "Kasanga", "ccl": "Cutchi-Swahili", "ccm": "Malaccan Creole Malay", "ccn": "North Caucasian languages", "cco": "Comaltepec Chinantec", "ccp": "Chakma", "ccr": "Cacaopera", "ccs": "South Caucasian languages", "cda": "Choni", "cdc": "Chadic languages", "cdd": "Caddoan languages", "cde": "Chenchu", "cdf": "Chiru", "cdh": "Chambeali", "cdi": "Chodri", "cdj": "Churahi", "cdm": "Chepang", "cdn": "Chaudangsi", "cdo": "Min Dong Chinese", "cdr": "Cinda-Regi-Tiyal", "cds": "Chadian Sign Language", "cdy": "Chadong", "cdz": "Koda", "ce": "Chechen", "cea": "Lower Chehalis", "ceb": "Cebuano", "ceg": "Chamacoco", "cek": "Eastern Khumi Chin", "cel": "Celtic languages", "cen": "Cen", "cet": "Centúúm", "cey": "Ekai Chin", "cfa": "Dijim-Bwilim", "cfd": "Cara", "cfg": "Como Karim", "cfm": "Falam Chin", "cga": "Changriwa", "cgc": "Kagayanen", "cgg": "Chiga", "cgk": "Chocangacakha", "ch": "Chamorro", "chb": "Chibcha", "chc": "Catawba", "chd": "Highland Oaxaca Chontal", "chf": "Tabasco Chontal", "chg": "Chagatai", "chh": "Chinook", "chj": "Ojitlán Chinantec", "chk": "Chuukese", "chl": "Cahuilla", "chm": "Mari (Russia)", "chn": "Chinook jargon", "cho": "Choctaw", "chp": "Chipewyan; Dene Suline", "chq": "Quiotepec Chinantec", "chr": "Cherokee", "cht": "Cholón", "chw": "Chuwabu", "chx": "Chantyal", "chy": "Cheyenne", "chz": "Ozumacín Chinantec", "cia": "Cia-Cia", "cib": "Ci Gbe", "cic": "Chickasaw", "cid": "Chimariko", "cie": "Cineni", "cih": "Chinali", "cik": "Chitkuli Kinnauri", "cim": "Cimbrian", "cin": "Cinta Larga", "cip": "Chiapanec", "cir": "Tiri; Haméa; Méa", "ciw": "Chippewa", "ciy": "Chaima", "cja": "Western Cham", "cje": "Chru", "cjh": "Upper Chehalis", "cji": "Chamalal", "cjk": "Chokwe", "cjm": "Eastern Cham", "cjn": "Chenapian", "cjo": "Ashéninka Pajonal", "cjp": "Cabécar", "cjs": "Shor", "cjv": "Chuave", "cjy": "Jinyu Chinese", "ckb": "Central Kurdish", "ckh": "Chak", "ckl": "Cibak", "ckm": "Chakavian", "ckn": "Kaang Chin", "cko": "Anufo", "ckq": "Kajakse", "ckr": "Kairak", "cks": "Tayo", "ckt": "Chukot", "cku": "Koasati", "ckv": "Kavalan", "ckx": "Caka", "cky": "Cakfem-Mushere", "ckz": "Cakchiquel-Quiché Mixed Language", "cla": "Ron", "clc": "Chilcotin", "cld": "Chaldean Neo-Aramaic", "cle": "Lealao Chinantec", "clh": "Chilisso", "cli": "Chakali", "clj": "Laitu Chin", "clk": "Idu-Mishmi", "cll": "Chala", "clm": "Clallam", "clo": "Lowland Oaxaca Chontal", "clt": "Lautu Chin", "clu": "Caluyanun", "clw": "Chulym", "cly": "Eastern Highland Chatino", "cma": "Maa", "cmc": "Chamic languages", "cme": "Cerma", "cmg": "Classical Mongolian", "cmi": "Emberá-Chamí", "cml": "Campalagian", "cmm": "Michigamea", "cmn": "Mandarin Chinese", "cmo": "Central Mnong", "cmr": "Mro-Khimi Chin", "cms": "Messapic", "cmt": "Camtho", "cna": "Changthang", "cnb": "Chinbon Chin", "cnc": "Côông", "cng": "Northern Qiang", "cnh": "Hakha Chin; Haka Chin", "cni": "Asháninka", "cnk": "Khumi Chin", "cnl": "Lalana Chinantec", "cno": "Con", "cnp": "Northern Ping Chinese; Northern Pinghua", "cnq": "Chung", "cnr": "Montenegrin", "cns": "Central Asmat", "cnt": "Tepetotutla Chinantec", "cnu": "Chenoua", "cnw": "Ngawn Chin", "cnx": "Middle Cornish", "co": "Corsican", "coa": "Cocos Islands Malay", "cob": "Chicomuceltec", "coc": "Cocopa", "cod": "Cocama-Cocamilla", "coe": "Koreguaje", "cof": "Colorado", "cog": "Chong", "coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma", "coj": "Cochimi", "cok": "Santa Teresa Cora", "col": "Columbia-Wenatchi", "com": "Comanche", "con": "Cofán", "coo": "Comox", "cop": "Coptic", "coq": "Coquille", "cot": "Caquinte", "cou": "Wamey", "cov": "Cao Miao", "cow": "Cowlitz", "cox": "Nanti", "coz": "Chochotec", "cpa": "Palantla Chinantec", "cpb": "Ucayali-Yurúa Ashéninka", "cpc": "Ajyíninka Apurucayali", "cpe": "English-based creoles and pidgins", "cpf": "French-based creoles and pidgins", "cpg": "Cappadocian Greek", "cpi": "Chinese Pidgin English", "cpn": "Cherepon", "cpo": "Kpeego", "cpp": "Portuguese-based creoles and pidgins", "cps": "Capiznon", "cpu": "Pichis Ashéninka", "cpx": "Pu-Xian Chinese", "cpy": "South Ucayali Ashéninka", "cqd": "Chuanqiandian Cluster Miao", "cr": "Cree", "cra": "Chara", "crb": "Island Carib", "crc": "Lonwolwol", "crd": "Coeur d'Alene", "crf": "Caramanta", "crg": "Michif", "crh": "Crimean Tatar; Crimean Turkish", "cri": "Sãotomense", "crj": "Southern East Cree", "crk": "Plains Cree", "crl": "Northern East Cree", "crm": "Moose Cree", "crn": "El Nayar Cora", "cro": "Crow", "crp": "Creoles and pidgins", "crq": "Iyo'wujwa Chorote", "crr": "Carolina Algonquian", "crs": "Seselwa Creole French", "crt": "Iyojwa'ja Chorote", "crv": "Chaura", "crw": "Chrau", "crx": "Carrier", "cry": "Cori", "crz": "Cruzeño", "cs": "Czech", "csa": "Chiltepec Chinantec", "csb": "Kashubian", "csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana", "csd": "Chiangmai Sign Language", "cse": "Czech Sign Language", "csf": "Cuba Sign Language", "csg": "Chilean Sign Language", "csh": "Asho Chin", "csi": "Coast Miwok", "csj": "Songlai Chin", "csk": "Jola-Kasa", "csl": "Chinese Sign Language", "csm": "Central Sierra Miwok", "csn": "Colombian Sign Language", "cso": "Sochiapam Chinantec; Sochiapan Chinantec", "csp": "Southern Ping Chinese; Southern Pinghua", "csq": "Croatia Sign Language", "csr": "Costa Rican Sign Language", "css": "Southern Ohlone", "cst": "Northern Ohlone", "csu": "Central Sudanic languages", "csv": "Sumtu Chin", "csw": "Swampy Cree", "csx": "Cambodian Sign Language", "csy": "Siyin Chin", "csz": "Coos", "cta": "Tataltepec Chatino", "ctc": "Chetco", "ctd": "Tedim Chin", "cte": "Tepinapa Chinantec", "ctg": "Chittagonian", "cth": "Thaiphum Chin", "ctl": "Tlacoatzintepec Chinantec", "ctm": "Chitimacha", "ctn": "Chhintange", "cto": "Emberá-Catío", "ctp": "Western Highland Chatino", "cts": "Northern Catanduanes Bikol", "ctt": "Wayanad Chetti", "ctu": "Chol", "cty": "Moundadan Chetty", "ctz": "Zacatepec Chatino", "cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic", "cua": "Cua", "cub": "Cubeo", "cuc": "Usila Chinantec", "cuh": "Chuka; Gichuka", "cui": "Cuiba", "cuj": "Mashco Piro", "cuk": "San Blas Kuna", "cul": "Culina; Kulina", "cuo": "Cumanagoto", "cup": "Cupeño", "cuq": "Cun", "cur": "Chhulung", "cus": "Cushitic languages", "cut": "Teutila Cuicatec", "cuu": "Tai Ya", "cuv": "Cuvok", "cuw": "Chukwa", "cux": "Tepeuxila Cuicatec", "cuy": "Cuitlatec", "cv": "Chuvash", "cvg": "Chug", "cvn": "Valle Nacional Chinantec", "cwa": "Kabwa", "cwb": "Maindo", "cwd": "Woods Cree", "cwe": "Kwere", "cwg": "Chewong; Cheq Wong", "cwt": "Kuwaataay", "cy": "Welsh", "cya": "Nopala Chatino", "cyb": "Cayubaba", "cyo": "Cuyonon", "czh": "Huizhou Chinese", "czk": "Knaanic", "czn": "Zenzontepec Chatino", "czo": "Min Zhong Chinese", "czt": "Zotung Chin", "da": "Danish", "daa": "Dangaléat", "dac": "Dambi", "dad": "Marik", "dae": "Duupa", "dag": "Dagbani", "dah": "Gwahatike", "dai": "Day", "daj": "Dar Fur Daju", "dak": "Dakota", "dal": "Dahalo", "dam": "Damakawa", "dao": "Daai Chin", "daq": "Dandami Maria", "dar": "Dargwa", "das": "Daho-Doo", "dau": "Dar Sila Daju", "dav": "Taita; Dawida", "daw": "Davawenyo", "dax": "Dayi", "day": "Land Dayak languages", "daz": "Dao", "dba": "Bangime", "dbb": "Deno", "dbd": "Dadiya", "dbe": "Dabe", "dbf": "Edopi", "dbg": "Dogul Dom Dogon", "dbi": "Doka", "dbj": "Ida'an", "dbl": "Dyirbal", "dbm": "Duguri", "dbn": "Duriankere", "dbo": "Dulbu", "dbp": "Duwai", "dbq": "Daba", "dbr": "Dabarre", "dbt": "Ben Tey Dogon", "dbu": "Bondum Dom Dogon", "dbv": "Dungu", "dbw": "Bankan Tey Dogon", "dby": "Dibiyaso", "dcc": "Deccan", "dcr": "Negerhollands", "dda": "Dadi Dadi", "ddd": "Dongotono", "dde": "Doondo", "ddg": "Fataluku", "ddi": "West Goodenough", "ddj": "Jaru", "ddn": "Dendi (Benin)", "ddo": "Dido", "ddr": "Dhudhuroa", "dds": "Donno So Dogon", "ddw": "Dawera-Daweloor", "de": "German", "dec": "Dagik", "ded": "Dedua", "dee": "Dewoin", "def": "Dezfuli", "deg": "Degema", "deh": "Dehwari", "dei": "Demisa", "dek": "Dek", "del": "Delaware", "dem": "Dem", "den": "Slave (Athapascan)", "dep": "Pidgin Delaware", "deq": "Dendi (Central African Republic)", "der": "Deori", "des": "Desano", "dev": "Domung", "dez": "Dengese", "dga": "Southern Dagaare", "dgb": "Bunoge Dogon", "dgc": "Casiguran Dumagat Agta", "dgd": "Dagaari Dioula", "dge": "Degenan", "dgg": "Doga", "dgh": "Dghwede", "dgi": "Northern Dagara", "dgk": "Dagba", "dgl": "Andaandi; Dongolawi", "dgn": "Dagoman", "dgo": "Dogri (individual language)", "dgr": "Dogrib; Tłı̨chǫ", "dgs": "Dogoso", "dgt": "Ndra'ngith", "dgw": "Daungwurrung", "dgx": "Doghoro", "dgz": "Daga", "dhd": "Dhundari", "dhg": "Dhangu-Djangu; Dhangu; Djangu", "dhi": "Dhimal", "dhl": "Dhalandji", "dhm": "Zemba", "dhn": "Dhanki", "dho": "Dhodia", "dhr": "Dhargari", "dhs": "Dhaiso", "dhu": "Dhurga", "dhv": "Dehu; Drehu", "dhw": "Dhanwar (Nepal)", "dhx": "Dhungaloo", "dia": "Dia", "dib": "South Central Dinka", "dic": "Lakota Dida", "did": "Didinga", "dif": "Dieri; Diyari", "dig": "Digo; Chidigo", "dih": "Kumiai", "dii": "Dimbong", "dij": "Dai", "dik": "Southwestern Dinka", "dil": "Dilling", "dim": "Dime", "din": "Dinka", "dio": "Dibo", "dip": "Northeastern Dinka", "diq": "Dimli (individual language)", "dir": "Dirim", "dis": "Dimasa", "diu": "Diriku", "diw": "Northwestern Dinka", "dix": "Dixon Reef", "diy": "Diuwe", "diz": "Ding", "dja": "Djadjawurrung", "djb": "Djinba", "djc": "Dar Daju Daju", "djd": "Djamindjung; Ngaliwurru", "dje": "Zarma", "djf": "Djangun", "dji": "Djinang", "djj": "Djeebbana", "djk": "Eastern Maroon Creole; Businenge Tongo; Nenge", "djm": "Jamsay Dogon", "djn": "Jawoyn; Djauan", "djo": "Jangkang", "djr": "Djambarrpuyngu", "dju": "Kapriman", "djw": "Djawi", "dka": "Dakpakha", "dkg": "Kadung", "dkk": "Dakka", "dkr": "Kuijau", "dks": "Southeastern Dinka", "dkx": "Mazagway", "dlg": "Dolgan", "dlk": "Dahalik", "dlm": "Dalmatian", "dln": "Darlong", "dma": "Duma", "dmb": "Mombo Dogon", "dmc": "Gavak", "dmd": "Madhi Madhi", "dme": "Dugwor", "dmf": "Medefaidrin", "dmg": "Upper Kinabatangan", "dmk": "Domaaki", "dml": "Dameli", "dmm": "Dama", "dmn": "Mande languages", "dmo": "Kemedzung", "dmr": "East Damar", "dms": "Dampelas", "dmu": "Dubu; Tebi", "dmv": "Dumpas", "dmw": "Mudburra", "dmx": "Dema", "dmy": "Demta; Sowari", "dna": "Upper Grand Valley Dani", "dnd": "Daonda", "dne": "Ndendeule", "dng": "Dungan", "dni": "Lower Grand Valley Dani", "dnj": "Dan", "dnk": "Dengka", "dnn": "Dzùùngoo", "dno": "Ndrulo; Northern Lendu", "dnr": "Danaru", "dnt": "Mid Grand Valley Dani", "dnu": "Danau", "dnv": "Danu", "dnw": "Western Dani", "dny": "Dení", "doa": "Dom", "dob": "Dobu", "doc": "Northern Dong", "doe": "Doe", "dof": "Domu", "doh": "Dong", "doi": "Dogri (macrolanguage)", "dok": "Dondo", "dol": "Doso", "don": "Toura (Papua New Guinea)", "doo": "Dongo", "dop": "Lukpa", "doq": "Dominican Sign Language", "dor": "Dori'o", "dos": "Dogosé", "dot": "Dass", "dov": "Dombe", "dow": "Doyayo", "dox": "Bussa", "doy": "Dompo", "doz": "Dorze", "dpp": "Papar", "dra": "Dravidian languages", "drb": "Dair", "drc": "Minderico", "drd": "Darmiya", "dre": "Dolpo", "drg": "Rungus", "dri": "C'Lela", "drl": "Paakantyi", "drn": "West Damar", "dro": "Daro-Matu Melanau", "drq": "Dura", "drs": "Gedeo", "drt": "Drents", "dru": "Rukai", "dry": "Darai", "dsb": "Lower Sorbian", "dse": "Dutch Sign Language", "dsh": "Daasanach", "dsi": "Disa", "dsl": "Danish Sign Language", "dsn": "Dusner", "dso": "Desiya", "dsq": "Tadaksahak", "dsz": "Mardin Sign Language", "dta": "Daur", "dtb": "Labuk-Kinabatangan Kadazan", "dtd": "Ditidaht", "dth": "Adithinngithigh", "dti": "Ana Tinga Dogon", "dtk": "Tene Kan Dogon", "dtm": "Tomo Kan Dogon", "dtn": "Daatsʼíin", "dto": "Tommo So Dogon", "dtp": "Kadazan Dusun; Central Dusun", "dtr": "Lotud", "dts": "Toro So Dogon", "dtt": "Toro Tegu Dogon", "dtu": "Tebul Ure Dogon", "dty": "Dotyali", "dua": "Duala", "dub": "Dubli", "duc": "Duna", "due": "Umiray Dumaget Agta", "duf": "Dumbea; Drubea", "dug": "Duruma; Chiduruma", "duh": "Dungra Bhil", "dui": "Dumun", "duk": "Uyajitaya", "dul": "Alabat Island Agta", "dum": "Middle Dutch (ca. 1050-1350)", "dun": "Dusun Deyah", "duo": "Dupaninan Agta", "dup": "Duano", "duq": "Dusun Malang", "dur": "Dii", "dus": "Dumi", "duu": "Drung", "duv": "Duvle", "duw": "Dusun Witu", "dux": "Duungooma", "duy": "Dicamay Agta", "duz": "Duli-Gey", "dv": "Dhivehi; Divehi; Maldivian", "dva": "Duau", "dwa": "Diri", "dwk": "Dawik Kui", "dwr": "Dawro", "dws": "Dutton World Speedwords", "dwu": "Dhuwal", "dww": "Dawawa", "dwy": "Dhuwaya", "dwz": "Dewas Rai", "dya": "Dyan", "dyb": "Dyaberdyaber", "dyd": "Dyugun", "dyg": "Villa Viciosa Agta", "dyi": "Djimini Senoufo", "dym": "Yanda Dom Dogon", "dyn": "Dyangadi; Dhanggatti", "dyo": "Jola-Fonyi", "dyu": "Dyula", "dyy": "Djabugay; Dyaabugay", "dz": "Dzongkha", "dza": "Tunzu", "dze": "Djiwarli", "dzg": "Dazaga", "dzl": "Dzalakha", "dzn": "Dzando", "eaa": "Karenggapa", "ebc": "Beginci", "ebg": "Ebughu", "ebk": "Eastern Bontok", "ebo": "Teke-Ebo", "ebr": "Ebrié", "ebu": "Embu; Kiembu", "ecr": "Eteocretan", "ecs": "Ecuadorian Sign Language", "ecy": "Eteocypriot", "ee": "Ewe", "eee": "E", "efa": "Efai", "efe": "Efe", "efi": "Efik", "ega": "Ega", "egl": "Emilian", "egm": "Benamanga", "ego": "Eggon", "egx": "Egyptian languages", "egy": "Egyptian (Ancient)", "ehs": "Miyakubo Sign Language", "ehu": "Ehueun", "eip": "Eipomek", "eit": "Eitiep", "eiv": "Askopan", "eja": "Ejamat", "eka": "Ekajuk", "eke": "Ekit", "ekg": "Ekari", "eki": "Eki", "ekk": "Standard Estonian", "ekl": "Kol (Bangladesh); Kol", "ekm": "Elip", "eko": "Koti", "ekp": "Ekpeye", "ekr": "Yace", "eky": "Eastern Kayah", "el": "Modern Greek (1453-)", "ele": "Elepi", "elh": "El Hugeirat", "eli": "Nding", "elk": "Elkei", "elm": "Eleme", "elo": "El Molo", "elu": "Elu", "elx": "Elamite", "ema": "Emai-Iuleha-Ora", "emb": "Embaloh", "eme": "Emerillon", "emg": "Eastern Meohang", "emi": "Mussau-Emira", "emk": "Eastern Maninkakan", "emm": "Mamulique", "emn": "Eman", "emp": "Northern Emberá", "emq": "Eastern Minyag", "ems": "Pacific Gulf Yupik", "emu": "Eastern Muria", "emw": "Emplawas", "emx": "Erromintxela", "emy": "Epigraphic Mayan", "emz": "Mbessa", "en": "English", "ena": "Apali", "enb": "Markweeta", "enc": "En", "end": "Ende", "enf": "Forest Enets", "enh": "Tundra Enets", "enl": "Enlhet", "enm": "Middle English (1100-1500)", "enn": "Engenni", "eno": "Enggano", "enq": "Enga", "enr": "Emumu; Emem", "enu": "Enu", "env": "Enwan (Edo State)", "enw": "Enwan (Akwa Ibom State)", "enx": "Enxet", "eo": "Esperanto", "eot": "Beti (Côte d'Ivoire)", "epi": "Epie", "era": "Eravallan", "erg": "Sie", "erh": "Eruwa", "eri": "Ogea", "erk": "South Efate", "ero": "Horpa", "err": "Erre", "ers": "Ersu", "ert": "Eritai", "erw": "Erokwanas", "es": "Spanish; Castilian", "ese": "Ese Ejja", "esg": "Aheri Gondi", "esh": "Eshtehardi", "esi": "North Alaskan Inupiatun", "esk": "Northwest Alaska Inupiatun", "esl": "Egypt Sign Language", "esm": "Esuma", "esn": "Salvadoran Sign Language", "eso": "Estonian Sign Language", "esq": "Esselen", "ess": "Central Siberian Yupik", "esu": "Central Yupik", "esx": "Eskimo-Aleut languages", "esy": "Eskayan", "et": "Estonian", "etb": "Etebi", "etc": "Etchemin", "eth": "Ethiopian Sign Language", "etn": "Eton (Vanuatu)", "eto": "Eton (Cameroon)", "etr": "Edolo", "ets": "Yekhee", "ett": "Etruscan", "etu": "Ejagham", "etx": "Eten", "etz": "Semimi", "eu": "Basque", "euq": "Basque (family)", "eve": "Even", "evh": "Uvbie", "evn": "Evenki", "ewo": "Ewondo", "ext": "Extremaduran", "eya": "Eyak", "eyo": "Keiyo", "eza": "Ezaa", "eze": "Uzekwe", "fa": "Persian", "faa": "Fasu", "fab": "Fa d'Ambu", "fad": "Wagi", "faf": "Fagani", "fag": "Finongan", "fah": "Baissa Fali", "fai": "Faiwol", "faj": "Faita", "fak": "Fang (Cameroon)", "fal": "South Fali", "fam": "Fam", "fan": "Fang (Equatorial Guinea)", "fap": "Paloor", "far": "Fataleka", "fat": "Fanti", "fau": "Fayu", "fax": "Fala", "fay": "Southwestern Fars", "faz": "Northwestern Fars", "fbl": "West Albay Bikol", "fcs": "Quebec Sign Language", "fer": "Feroge", "ff": "Fulah", "ffi": "Foia Foia", "ffm": "Maasina Fulfulde", "fgr": "Fongoro", "fi": "Finnish", "fia": "Nobiin", "fie": "Fyer", "fif": "Faifi", "fil": "Filipino; Pilipino", "fip": "Fipa", "fir": "Firan", "fit": "Tornedalen Finnish; Meänkieli", "fiu": "Finno-Ugrian languages", "fiw": "Fiwaga", "fj": "Fijian", "fkk": "Kirya-Konzəl", "fkv": "Kven Finnish", "fla": "Kalispel-Pend d'Oreille", "flh": "Foau", "fli": "Fali", "fll": "North Fali", "fln": "Flinders Island", "flr": "Fuliiru", "fly": "Flaaitaal; Tsotsitaal", "fmp": "Fe'fe'", "fmu": "Far Western Muria", "fnb": "Fanbak", "fng": "Fanagalo", "fni": "Fania", "fo": "Faroese", "fod": "Foodo", "foi": "Foi", "fom": "Foma", "fon": "Fon", "for": "Fore", "fos": "Siraya", "fox": "Formosan languages", "fpe": "Fernando Po Creole English", "fqs": "Fas", "fr": "French", "frc": "Cajun French", "frd": "Fordata", "frk": "Frankish", "frm": "Middle French (ca. 1400-1600)", "fro": "Old French (842-ca. 1400)", "frp": "Arpitan; Francoprovençal", "frq": "Forak", "frr": "Northern Frisian", "frs": "Eastern Frisian", "frt": "Fortsenal", "fse": "Finnish Sign Language", "fsl": "French Sign Language", "fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli", "fub": "Adamawa Fulfulde", "fuc": "Pulaar", "fud": "East Futuna", "fue": "Borgu Fulfulde", "fuf": "Pular", "fuh": "Western Niger Fulfulde", "fui": "Bagirmi Fulfulde", "fuj": "Ko", "fum": "Fum", "fun": "Fulniô", "fuq": "Central-Eastern Niger Fulfulde", "fur": "Friulian", "fut": "Futuna-Aniwa", "fuu": "Furu", "fuv": "Nigerian Fulfulde", "fuy": "Fuyug", "fvr": "Fur", "fwa": "Fwâi", "fwe": "Fwe", "fy": "Western Frisian", "ga": "Irish", "gaa": "Ga", "gab": "Gabri", "gac": "Mixed Great Andamanese", "gad": "Gaddang", "gae": "Guarequena", "gaf": "Gende", "gag": "Gagauz", "gah": "Alekano", "gai": "Borei", "gaj": "Gadsup", "gak": "Gamkonora", "gal": "Galolen", "gam": "Kandawo", "gan": "Gan Chinese", "gao": "Gants", "gap": "Gal", "gaq": "Gata'", "gar": "Galeya", "gas": "Adiwasi Garasia", "gat": "Kenati", "gau": "Mudhili Gadaba", "gaw": "Nobonob", "gax": "Borana-Arsi-Guji Oromo", "gay": "Gayo", "gaz": "West Central Oromo", "gba": "Gbaya (Central African Republic)", "gbb": "Kaytetye", "gbd": "Karajarri", "gbe": "Niksek", "gbf": "Gaikundi", "gbg": "Gbanziri", "gbh": "Defi Gbe", "gbi": "Galela", "gbj": "Bodo Gadaba", "gbk": "Gaddi", "gbl": "Gamit", "gbm": "Garhwali", "gbn": "Mo'da", "gbo": "Northern Grebo", "gbp": "Gbaya-Bossangoa", "gbq": "Gbaya-Bozoum", "gbr": "Gbagyi", "gbs": "Gbesi Gbe", "gbu": "Gagadu", "gbv": "Gbanu", "gbw": "Gabi-Gabi", "gbx": "Eastern Xwla Gbe", "gby": "Gbari", "gbz": "Zoroastrian Dari", "gcc": "Mali", "gcd": "Ganggalida", "gce": "Galice", "gcf": "Guadeloupean Creole French", "gcl": "Grenadian Creole English", "gcn": "Gaina", "gcr": "Guianese Creole French", "gct": "Colonia Tovar German", "gd": "Scottish Gaelic; Gaelic", "gda": "Gade Lohar", "gdb": "Pottangi Ollar Gadaba", "gdc": "Gugu Badhun", "gdd": "Gedaged", "gde": "Gude", "gdf": "Guduf-Gava", "gdg": "Ga'dang", "gdh": "Gadjerawang; Gajirrabeng", "gdi": "Gundi", "gdj": "Gurdjar", "gdk": "Gadang", "gdl": "Dirasha", "gdm": "Laal", "gdn": "Umanakaina", "gdo": "Ghodoberi", "gdq": "Mehri", "gdr": "Wipi", "gds": "Ghandruk Sign Language", "gdt": "Kungardutyi", "gdu": "Gudu", "gdx": "Godwari", "gea": "Geruma", "geb": "Kire", "gec": "Gboloo Grebo", "ged": "Gade", "gef": "Gerai", "geg": "Gengle", "geh": "Hutterite German; Hutterisch", "gei": "Gebe", "gej": "Gen", "gek": "Ywom", "gel": "ut-Ma'in", "gem": "Germanic languages", "geq": "Geme", "ges": "Geser-Gorom", "gev": "Eviya", "gew": "Gera", "gex": "Garre", "gey": "Enya", "gez": "Geez", "gfk": "Patpatar", "gft": "Gafat", "gga": "Gao", "ggb": "Gbii", "ggd": "Gugadj", "gge": "Gurr-goni", "ggg": "Gurgula", "ggk": "Kungarakany", "ggl": "Ganglau", "ggt": "Gitua", "ggu": "Gagu; Gban", "ggw": "Gogodala", "gha": "Ghadamès", "ghc": "Hiberno-Scottish Gaelic", "ghe": "Southern Ghale", "ghh": "Northern Ghale", "ghk": "Geko Karen", "ghl": "Ghulfan", "ghn": "Ghanongga", "gho": "Ghomara", "ghr": "Ghera", "ghs": "Guhu-Samane", "ght": "Kuke; Kutang Ghale", "gia": "Kija", "gib": "Gibanawa", "gic": "Gail", "gid": "Gidar", "gie": "Gaɓogbo; Guébie", "gig": "Goaria", "gih": "Githabul", "gii": "Girirra", "gil": "Gilbertese", "gim": "Gimi (Eastern Highlands)", "gin": "Hinukh", "gip": "Gimi (West New Britain)", "giq": "Green Gelao", "gir": "Red Gelao", "gis": "North Giziga", "git": "Gitxsan", "giu": "Mulao", "giw": "White Gelao", "gix": "Gilima", "giy": "Giyug", "giz": "South Giziga", "gjk": "Kachi Koli", "gjm": "Gunditjmara", "gjn": "Gonja", "gjr": "Gurindji Kriol", "gju": "Gujari", "gka": "Guya", "gkd": "Magɨ (Madang Province)", "gke": "Ndai", "gkn": "Gokana", "gko": "Kok-Nar", "gkp": "Guinea Kpelle", "gku": "ǂUngkue", "gl": "Galician", "glb": "Belning", "glc": "Bon Gula", "gld": "Nanai", "glh": "Northwest Pashai; Northwest Pashayi", "glj": "Gula Iro", "glk": "Gilaki", "gll": "Garlali", "glo": "Galambu", "glr": "Glaro-Twabo", "glu": "Gula (Chad)", "glw": "Glavda", "gly": "Gule", "gma": "Gambera", "gmb": "Gula'alaa", "gmd": "Mághdì", "gme": "East Germanic languages", "gmg": "Magɨyi", "gmh": "Middle High German (ca. 1050-1500)", "gml": "Middle Low German", "gmm": "Gbaya-Mbodomo", "gmn": "Gimnime", "gmq": "North Germanic languages", "gmr": "Mirning; Mirniny", "gmu": "Gumalu", "gmv": "Gamo", "gmw": "West Germanic languages", "gmx": "Magoma", "gmy": "Mycenaean Greek", "gmz": "Mgbolizhia", "gn": "Guarani", "gna": "Kaansa", "gnb": "Gangte", "gnc": "Guanche", "gnd": "Zulgo-Gemzek", "gne": "Ganang", "gng": "Ngangam", "gnh": "Lere", "gni": "Gooniyandi", "gnj": "Ngen", "gnk": "ǁGana", "gnl": "Gangulu", "gnm": "Ginuman", "gnn": "Gumatj", "gno": "Northern Gondi", "gnq": "Gana", "gnr": "Gureng Gureng", "gnt": "Guntai", "gnu": "Gnau", "gnw": "Western Bolivian Guaraní", "gnz": "Ganzi", "goa": "Guro", "gob": "Playero", "goc": "Gorakor", "god": "Godié", "goe": "Gongduk", "gof": "Gofa", "gog": "Gogo", "goh": "Old High German (ca. 750-1050)", "goi": "Gobasi", "goj": "Gowlan", "gok": "Gowli", "gol": "Gola", "gom": "Goan Konkani", "gon": "Gondi", "goo": "Gone Dau", "gop": "Yeretuar", "goq": "Gorap", "gor": "Gorontalo", "gos": "Gronings", "got": "Gothic", "gou": "Gavar", "gov": "Goo", "gow": "Gorowa", "gox": "Gobu", "goy": "Goundo", "goz": "Gozarkhani", "gpa": "Gupa-Abawa", "gpe": "Ghanaian Pidgin English", "gpn": "Taiap", "gqa": "Ga'anda", "gqi": "Guiqiong", "gqn": "Guana (Brazil)", "gqr": "Gor", "gqu": "Qau", "gra": "Rajput Garasia", "grb": "Grebo", "grc": "Ancient Greek (to 1453)", "grd": "Guruntum-Mbaaru", "grg": "Madi", "grh": "Gbiri-Niragu", "gri": "Ghari", "grj": "Southern Grebo", "grk": "Greek languages", "grm": "Kota Marudu Talantang", "gro": "Groma", "grq": "Gorovu", "grr": "Taznatit", "grs": "Gresi", "grt": "Garo", "gru": "Kistane", "grv": "Central Grebo", "grw": "Gweda", "grx": "Guriaso", "gry": "Barclayville Grebo", "grz": "Guramalum", "gse": "Ghanaian Sign Language", "gsg": "German Sign Language", "gsl": "Gusilay", "gsm": "Guatemalan Sign Language", "gsn": "Nema; Gusan", "gso": "Southwest Gbaya", "gsp": "Wasembo", "gss": "Greek Sign Language", "gsw": "Swiss German; Alemannic; Alsatian", "gta": "Guató", "gtu": "Aghu-Tharnggala", "gu": "Gujarati", "gua": "Shiki", "gub": "Guajajára", "guc": "Wayuu", "gud": "Yocoboué Dida", "gue": "Gurindji", "guf": "Gupapuyngu", "gug": "Paraguayan Guaraní", "guh": "Guahibo", "gui": "Eastern Bolivian Guaraní", "guk": "Gumuz", "gul": "Sea Island Creole English", "gum": "Guambiano", "gun": "Mbyá Guaraní", "guo": "Guayabero", "gup": "Gunwinggu", "guq": "Aché", "gur": "Farefare", "gus": "Guinean Sign Language", "gut": "Maléku Jaíka", "guu": "Yanomamö", "guw": "Gun", "gux": "Gourmanchéma", "guz": "Gusii; Ekegusii", "gv": "Manx", "gva": "Guana (Paraguay)", "gvc": "Guanano", "gve": "Duwet", "gvf": "Golin", "gvj": "Guajá", "gvl": "Gulay", "gvm": "Gurmana", "gvn": "Kuku-Yalanji", "gvo": "Gavião Do Jiparaná", "gvp": "Pará Gavião", "gvr": "Gurung", "gvs": "Gumawana", "gvy": "Guyani", "gwa": "Mbato", "gwb": "Gwa", "gwc": "Gawri; Kalami", "gwd": "Gawwada", "gwe": "Gweno", "gwf": "Gowro", "gwg": "Moo", "gwi": "Gwichʼin", "gwj": "ǀGwi", "gwm": "Awngthim", "gwn": "Gwandara", "gwr": "Gwere", "gwt": "Gawar-Bati", "gwu": "Guwamu", "gww": "Kwini", "gwx": "Gua", "gxx": "Wè Southern", "gya": "Northwest Gbaya", "gyb": "Garus", "gyd": "Kayardild", "gye": "Gyem", "gyf": "Gungabula", "gyg": "Gbayi", "gyi": "Gyele", "gyl": "Gayil", "gym": "Ngäbere", "gyn": "Guyanese Creole English", "gyo": "Gyalsumdo", "gyr": "Guarayu", "gyy": "Gunya", "gyz": "Geji; Gyaazi", "gza": "Ganza", "gzi": "Gazi", "gzn": "Gane", "ha": "Hausa", "haa": "Han", "hab": "Hanoi Sign Language", "hac": "Gurani", "had": "Hatam", "hae": "Eastern Oromo", "haf": "Haiphong Sign Language", "hag": "Hanga", "hah": "Hahon", "hai": "Haida", "haj": "Hajong", "hak": "Hakka Chinese", "hal": "Halang", "ham": "Hewa", "han": "Hangaza", "hao": "Hakö", "hap": "Hupla", "haq": "Ha", "har": "Harari", "has": "Haisla", "hav": "Havu", "haw": "Hawaiian", "hax": "Southern Haida", "hay": "Haya", "haz": "Hazaragi", "hba": "Hamba", "hbb": "Huba", "hbn": "Heiban", "hbo": "Ancient Hebrew", "hbu": "Habu", "hca": "Andaman Creole Hindi", "hch": "Huichol", "hdn": "Northern Haida", "hds": "Honduras Sign Language", "hdy": "Hadiyya", "he": "Hebrew", "hea": "Northern Qiandong Miao", "hed": "Herdé", "heg": "Helong", "heh": "Hehe", "hei": "Heiltsuk", "hem": "Hemba", "hgm": "Haiǁom", "hgw": "Haigwai", "hhi": "Hoia Hoia", "hhr": "Kerak", "hhy": "Hoyahoya", "hi": "Hindi", "hia": "Lamang", "hib": "Hibito", "hid": "Hidatsa", "hif": "Fiji Hindi", "hig": "Kamwe", "hih": "Pamosu", "hii": "Hinduri", "hij": "Hijuk", "hik": "Seit-Kaitetu", "hil": "Hiligaynon", "him": "Himachali languages; Western Pahari languages", "hio": "Tsoa", "hir": "Himarimã", "hit": "Hittite", "hiw": "Hiw", "hix": "Hixkaryána", "hji": "Haji", "hka": "Kahe", "hke": "Hunde", "hkh": "Khah; Poguli", "hkk": "Hunjara-Kaina Ke", "hkn": "Mel-Khaonh", "hks": "Hong Kong Sign Language; Heung Kong Sau Yue", "hla": "Halia", "hlb": "Halbi", "hld": "Halang Doan", "hle": "Hlersu", "hlt": "Matu Chin", "hlu": "Hieroglyphic Luwian", "hma": "Southern Mashan Hmong; Southern Mashan Miao", "hmb": "Humburi Senni Songhay", "hmc": "Central Huishui Hmong; Central Huishui Miao", "hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao", "hme": "Eastern Huishui Hmong; Eastern Huishui Miao", "hmf": "Hmong Don", "hmg": "Southwestern Guiyang Hmong", "hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao", "hmi": "Northern Huishui Hmong; Northern Huishui Miao", "hmj": "Ge; Gejia", "hmk": "Maek", "hml": "Luopohe Hmong; Luopohe Miao", "hmm": "Central Mashan Hmong; Central Mashan Miao", "hmn": "Hmong; Mong", "hmp": "Northern Mashan Hmong; Northern Mashan Miao", "hmq": "Eastern Qiandong Miao", "hmr": "Hmar", "hms": "Southern Qiandong Miao", "hmt": "Hamtai", "hmu": "Hamap", "hmv": "Hmong Dô", "hmw": "Western Mashan Hmong; Western Mashan Miao", "hmx": "Hmong-Mien languages", "hmy": "Southern Guiyang Hmong; Southern Guiyang Miao", "hmz": "Hmong Shua; Sinicized Miao", "hna": "Mina (Cameroon)", "hnd": "Southern Hindko", "hne": "Chhattisgarhi", "hng": "Hungu", "hnh": "ǁAni", "hni": "Hani", "hnj": "Hmong Njua; Mong Leng; Mong Njua", "hnn": "Hanunoo", "hno": "Northern Hindko", "hns": "Caribbean Hindustani", "hnu": "Hung", "ho": "Hiri Motu", "hoa": "Hoava", "hob": "Mari (Madang Province)", "hoc": "Ho", "hod": "Holma", "hoe": "Horom", "hoh": "Hobyót", "hoi": "Holikachuk", "hoj": "Hadothi; Haroti", "hok": "Hokan languages", "hol": "Holu", "hom": "Homa", "hoo": "Holoholo", "hop": "Hopi", "hor": "Horo", "hos": "Ho Chi Minh City Sign Language", "hot": "Hote; Malê", "hov": "Hovongan", "how": "Honi", "hoy": "Holiya", "hoz": "Hozo", "hpo": "Hpon", "hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language", "hr": "Croatian", "hra": "Hrangkhol", "hrc": "Niwer Mil", "hre": "Hre", "hrk": "Haruku", "hrm": "Horned Miao", "hro": "Haroi", "hrp": "Nhirrpi", "hrt": "Hértevin", "hru": "Hruso", "hrw": "Warwar Feni", "hrx": "Hunsrik", "hrz": "Harzani", "hsb": "Upper Sorbian", "hsh": "Hungarian Sign Language", "hsl": "Hausa Sign Language", "hsn": "Xiang Chinese", "hss": "Harsusi", "ht": "Haitian; Haitian Creole", "hti": "Hoti", "hto": "Minica Huitoto", "hts": "Hadza", "htu": "Hitu", "htx": "Middle Hittite", "hu": "Hungarian", "hub": "Huambisa", "huc": "ǂHua; ǂʼAmkhoe", "hud": "Huaulu", "hue": "San Francisco Del Mar Huave", "huf": "Humene", "hug": "Huachipaeri", "huh": "Huilliche", "hui": "Huli", "huj": "Northern Guiyang Hmong; Northern Guiyang Miao", "huk": "Hulung", "hul": "Hula", "hum": "Hungana", "huo": "Hu", "hup": "Hupa", "huq": "Tsat", "hur": "Halkomelem", "hus": "Huastec", "hut": "Humla", "huu": "Murui Huitoto", "huv": "San Mateo Del Mar Huave", "huw": "Hukumina", "hux": "Nüpode Huitoto", "huy": "Hulaulá", "huz": "Hunzib", "hvc": "Haitian Vodoun Culture Language", "hve": "San Dionisio Del Mar Huave", "hvk": "Haveke", "hvn": "Sabu", "hvv": "Santa María Del Mar Huave", "hwa": "Wané", "hwc": "Hawai'i Creole English; Hawai'i Pidgin", "hwo": "Hwana", "hy": "Armenian", "hya": "Hya", "hyw": "Western Armenian", "hyx": "Armenian (family)", "hz": "Herero", "ia": "Interlingua (International Auxiliary Language Association)", "iai": "Iaai", "ian": "Iatmul", "iar": "Purari", "iba": "Iban", "ibb": "Ibibio", "ibd": "Iwaidja", "ibe": "Akpes", "ibg": "Ibanag", "ibh": "Bih", "ibl": "Ibaloi", "ibm": "Agoi", "ibn": "Ibino", "ibr": "Ibuoro", "ibu": "Ibu", "iby": "Ibani", "ica": "Ede Ica", "ich": "Etkywan", "icl": "Icelandic Sign Language", "icr": "Islander Creole English", "id": "Indonesian", "ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi", "idb": "Indo-Portuguese", "idc": "Idon; Ajiya", "idd": "Ede Idaca", "ide": "Idere", "idi": "Idi", "idr": "Indri", "ids": "Idesa", "idt": "Idaté", "idu": "Idoma", "ie": "Interlingue; Occidental", "ifa": "Amganad Ifugao", "ifb": "Batad Ifugao; Ayangan Ifugao", "ife": "Ifè", "iff": "Ifo", "ifk": "Tuwali Ifugao", "ifm": "Teke-Fuumu", "ifu": "Mayoyao Ifugao", "ify": "Keley-I Kallahan", "ig": "Igbo", "igb": "Ebira", "ige": "Igede", "igg": "Igana", "igl": "Igala", "igm": "Kanggape", "ign": "Ignaciano", "igo": "Isebe", "igs": "Interglossa", "igw": "Igwe", "ihb": "Iha Based Pidgin", "ihi": "Ihievbe", "ihp": "Iha", "ihw": "Bidhawal", "ii": "Sichuan Yi; Nuosu", "iin": "Thiin", "iir": "Indo-Iranian languages", "ijc": "Izon", "ije": "Biseni", "ijj": "Ede Ije", "ijn": "Kalabari", "ijo": "Ijo languages", "ijs": "Southeast Ijo", "ik": "Inupiaq", "ike": "Eastern Canadian Inuktitut", "iki": "Iko", "ikk": "Ika", "ikl": "Ikulu", "iko": "Olulumo-Ikom", "ikp": "Ikpeshi", "ikr": "Ikaranggal", "iks": "Inuit Sign Language", "ikt": "Inuinnaqtun; Western Canadian Inuktitut", "ikv": "Iku-Gora-Ankwa", "ikw": "Ikwere", "ikx": "Ik", "ikz": "Ikizu", "ila": "Ile Ape", "ilb": "Ila", "ilg": "Garig-Ilgar", "ili": "Ili Turki", "ilk": "Ilongot", "ilm": "Iranun (Malaysia)", "ilo": "Iloko", "ilp": "Iranun (Philippines)", "ils": "International Sign", "ilu": "Ili'uun", "ilv": "Ilue", "ima": "Mala Malasar", "imi": "Anamgura", "iml": "Miluk", "imn": "Imonda", "imo": "Imbongu", "imr": "Imroing", "ims": "Marsian", "imt": "Imotong", "imy": "Milyan", "inb": "Inga", "inc": "Indic languages", "ine": "Indo-European languages", "ing": "Degexit'an", "inh": "Ingush", "inj": "Jungle Inga", "inl": "Indonesian Sign Language", "inm": "Minaean", "inn": "Isinai", "ino": "Inoke-Yate", "inp": "Iñapari", "ins": "Indian Sign Language", "int": "Intha", "inz": "Ineseño", "io": "Ido", "ior": "Inor", "iou": "Tuma-Irumu", "iow": "Iowa-Oto", "ipi": "Ipili", "ipo": "Ipiko", "iqu": "Iquito", "iqw": "Ikwo", "ira": "Iranian languages", "ire": "Iresim", "irh": "Irarutu", "iri": "Rigwe; Irigwe", "irk": "Iraqw", "irn": "Irántxe", "iro": "Iroquoian languages", "irr": "Ir", "iru": "Irula", "irx": "Kamberau", "iry": "Iraya", "is": "Icelandic", "isa": "Isabi", "isc": "Isconahua", "isd": "Isnag", "ise": "Italian Sign Language", "isg": "Irish Sign Language", "ish": "Esan", "isi": "Nkem-Nkum", "isk": "Ishkashimi", "ism": "Masimasi", "isn": "Isanzu", "iso": "Isoko", "isr": "Israeli Sign Language", "ist": "Istriot", "isu": "Isu (Menchum Division)", "it": "Italian", "itb": "Binongan Itneg", "itc": "Italic languages", "itd": "Southern Tidung", "ite": "Itene", "iti": "Inlaod Itneg", "itk": "Judeo-Italian", "itl": "Itelmen", "itm": "Itu Mbon Uzo", "ito": "Itonama", "itr": "Iteri", "its": "Isekiri", "itt": "Maeng Itneg", "itv": "Itawit", "itw": "Ito", "itx": "Itik", "ity": "Moyadan Itneg", "itz": "Itzá", "iu": "Inuktitut", "ium": "Iu Mien", "ivb": "Ibatan", "ivv": "Ivatan", "iwk": "I-Wak", "iwm": "Iwam", "iwo": "Iwur", "iws": "Sepik Iwam", "ixc": "Ixcatec", "ixl": "Ixil", "iya": "Iyayu", "iyo": "Mesaka", "iyx": "Yaka (Congo)", "izh": "Ingrian", "izr": "Izere", "izz": "Izii", "ja": "Japanese", "jaa": "Jamamadí", "jab": "Hyam", "jac": "Popti'; Jakalteko", "jad": "Jahanka", "jae": "Yabem", "jaf": "Jara", "jah": "Jah Hut", "jaj": "Zazao", "jak": "Jakun", "jal": "Yalahatan", "jam": "Jamaican Creole English", "jan": "Jandai", "jao": "Yanyuwa", "jaq": "Yaqay", "jas": "New Caledonian Javanese", "jat": "Jakati", "jau": "Yaur", "jax": "Jambi Malay", "jay": "Yan-nhangu; Nhangu", "jaz": "Jawe", "jbe": "Judeo-Berber", "jbi": "Badjiri", "jbj": "Arandai", "jbk": "Barikewa", "jbm": "Bijim", "jbn": "Nafusi", "jbo": "Lojban", "jbr": "Jofotek-Bromnya", "jbt": "Jabutí", "jbu": "Jukun Takum", "jbw": "Yawijibaya", "jcs": "Jamaican Country Sign Language", "jct": "Krymchak", "jda": "Jad", "jdg": "Jadgali", "jdt": "Judeo-Tat", "jeb": "Jebero", "jee": "Jerung", "jeh": "Jeh", "jei": "Yei", "jek": "Jeri Kuo", "jel": "Yelmek", "jen": "Dza", "jer": "Jere", "jet": "Manem", "jeu": "Jonkor Bourmataguil", "jgb": "Ngbee", "jge": "Judeo-Georgian", "jgk": "Gwak", "jgo": "Ngomba", "jhi": "Jehai", "jhs": "Jhankot Sign Language", "jia": "Jina", "jib": "Jibu", "jic": "Tol", "jid": "Bu (Kaduna State)", "jie": "Jilbe", "jig": "Jingulu; Djingili", "jih": "sTodsde; Shangzhai", "jii": "Jiiddu", "jil": "Jilim", "jim": "Jimi (Cameroon)", "jio": "Jiamao", "jiq": "Guanyinqiao; Lavrung", "jit": "Jita", "jiu": "Youle Jinuo", "jiv": "Shuar", "jiy": "Buyuan Jinuo", "jje": "Jejueo", "jjr": "Bankal", "jka": "Kaera", "jkm": "Mobwa Karen", "jko": "Kubo", "jkp": "Paku Karen", "jkr": "Koro (India)", "jks": "Amami Koniya Sign Language", "jku": "Labir", "jle": "Ngile", "jls": "Jamaican Sign Language", "jma": "Dima", "jmb": "Zumbun", "jmc": "Machame", "jmd": "Yamdena", "jmi": "Jimi (Nigeria)", "jml": "Jumli", "jmn": "Makuri Naga", "jmr": "Kamara", "jms": "Mashi (Nigeria)", "jmw": "Mouwase", "jmx": "Western Juxtlahuaca Mixtec", "jna": "Jangshung", "jnd": "Jandavra", "jng": "Yangman", "jni": "Janji", "jnj": "Yemsa", "jnl": "Rawat", "jns": "Jaunsari", "job": "Joba", "jod": "Wojenaka", "jog": "Jogi", "jor": "Jorá", "jos": "Jordanian Sign Language", "jow": "Jowulu", "jpa": "Jewish Palestinian Aramaic", "jpr": "Judeo-Persian", "jpx": "Japanese (family)", "jqr": "Jaqaru", "jra": "Jarai", "jrb": "Judeo-Arabic", "jrr": "Jiru", "jrt": "Jakattoe", "jru": "Japrería", "jsl": "Japanese Sign Language", "jua": "Júma", "jub": "Wannu", "juc": "Jurchen", "jud": "Worodougou", "juh": "Hõne", "jui": "Ngadjuri", "juk": "Wapan", "jul": "Jirel", "jum": "Jumjum", "jun": "Juang", "juo": "Jiba", "jup": "Hupdë", "jur": "Jurúna", "jus": "Jumla Sign Language", "jut": "Jutish", "juu": "Ju", "juw": "Wãpha", "juy": "Juray", "jv": "Javanese", "jvd": "Javindo", "jvn": "Caribbean Javanese", "jwi": "Jwira-Pepesa", "jya": "Jiarong", "jye": "Judeo-Yemeni Arabic", "jyy": "Jaya", "ka": "Georgian", "kaa": "Kara-Kalpak; Karakalpak", "kab": "Kabyle", "kac": "Kachin; Jingpho", "kad": "Adara", "kae": "Ketangalan", "kaf": "Katso", "kag": "Kajaman", "kah": "Kara (Central African Republic)", "kai": "Karekare", "kaj": "Jju", "kak": "Kalanguya; Kayapa Kallahan", "kam": "Kamba (Kenya)", "kao": "Xaasongaxango", "kap": "Bezhta", "kaq": "Capanahua", "kar": "Karen languages", "kav": "Katukína", "kaw": "Kawi", "kax": "Kao", "kay": "Kamayurá", "kba": "Kalarko", "kbb": "Kaxuiâna", "kbc": "Kadiwéu", "kbd": "Kabardian", "kbe": "Kanju", "kbg": "Khamba", "kbh": "Camsá", "kbi": "Kaptiau", "kbj": "Kari", "kbk": "Grass Koiari", "kbl": "Kanembu", "kbm": "Iwal", "kbn": "Kare (Central African Republic)", "kbo": "Keliko", "kbp": "Kabiyè", "kbq": "Kamano", "kbr": "Kafa", "kbs": "Kande", "kbt": "Abadi", "kbu": "Kabutra", "kbv": "Dera (Indonesia)", "kbw": "Kaiep", "kbx": "Ap Ma", "kby": "Manga Kanuri", "kbz": "Duhwa", "kca": "Khanty", "kcb": "Kawacha", "kcc": "Lubila", "kcd": "Ngkâlmpw Kanum", "kce": "Kaivi", "kcf": "Ukaan", "kcg": "Tyap", "kch": "Vono", "kci": "Kamantan", "kcj": "Kobiana", "kck": "Kalanga", "kcl": "Kela (Papua New Guinea); Kala", "kcm": "Gula (Central African Republic)", "kcn": "Nubi", "kco": "Kinalakna", "kcp": "Kanga", "kcq": "Kamo", "kcr": "Katla", "kcs": "Koenoem", "kct": "Kaian", "kcu": "Kami (Tanzania)", "kcv": "Kete", "kcw": "Kabwari", "kcx": "Kachama-Ganjule", "kcy": "Korandje", "kcz": "Konongo", "kda": "Worimi", "kdc": "Kutu", "kdd": "Yankunytjatjara", "kde": "Makonde", "kdf": "Mamusi", "kdg": "Seba", "kdh": "Tem", "kdi": "Kumam", "kdj": "Karamojong", "kdk": "Numèè; Kwényi", "kdl": "Tsikimba", "kdm": "Kagoma", "kdn": "Kunda", "kdo": "Kordofanian languages", "kdp": "Kaningdon-Nindem", "kdq": "Koch", "kdr": "Karaim", "kdt": "Kuy", "kdu": "Kadaru", "kdw": "Koneraw", "kdx": "Kam", "kdy": "Keder; Keijar", "kdz": "Kwaja", "kea": "Kabuverdianu", "keb": "Kélé", "kec": "Keiga", "ked": "Kerewe", "kee": "Eastern Keres", "kef": "Kpessi", "keg": "Tese", "keh": "Keak", "kei": "Kei", "kej": "Kadar", "kek": "Kekchí", "kel": "Kela (Democratic Republic of Congo)", "kem": "Kemak", "ken": "Kenyang", "keo": "Kakwa", "kep": "Kaikadi", "keq": "Kamar", "ker": "Kera", "kes": "Kugbo", "ket": "Ket", "keu": "Akebu", "kev": "Kanikkaran", "kew": "West Kewa", "kex": "Kukna", "key": "Kupia", "kez": "Kukele", "kfa": "Kodava", "kfb": "Northwestern Kolami", "kfc": "Konda-Dora", "kfd": "Korra Koraga", "kfe": "Kota (India)", "kff": "Koya", "kfg": "Kudiya", "kfh": "Kurichiya", "kfi": "Kannada Kurumba", "kfj": "Kemiehua", "kfk": "Kinnauri", "kfl": "Kung", "kfm": "Khunsari", "kfn": "Kuk", "kfo": "Koro (Côte d'Ivoire)", "kfp": "Korwa", "kfq": "Korku", "kfr": "Kachhi; Kutchi", "kfs": "Bilaspuri", "kft": "Kanjari", "kfu": "Katkari", "kfv": "Kurmukar", "kfw": "Kharam Naga", "kfx": "Kullu Pahari", "kfy": "Kumaoni", "kfz": "Koromfé", "kg": "Kongo", "kga": "Koyaga", "kgb": "Kawe", "kge": "Komering", "kgf": "Kube", "kgg": "Kusunda", "kgi": "Selangor Sign Language", "kgj": "Gamale Kham", "kgk": "Kaiwá", "kgl": "Kunggari", "kgm": "Karipúna", "kgn": "Karingani", "kgo": "Krongo", "kgp": "Kaingang", "kgq": "Kamoro", "kgr": "Abun", "kgs": "Kumbainggar", "kgt": "Somyev", "kgu": "Kobol", "kgv": "Karas", "kgw": "Karon Dori", "kgx": "Kamaru", "kgy": "Kyerung", "kha": "Khasi", "khb": "Lü", "khc": "Tukang Besi North", "khd": "Bädi Kanum", "khe": "Korowai", "khf": "Khuen", "khg": "Khams Tibetan", "khh": "Kehu", "khi": "Khoisan languages", "khj": "Kuturmi", "khk": "Halh Mongolian", "khl": "Lusi", "khn": "Khandesi", "kho": "Khotanese; Sakan", "khp": "Kapori; Kapauri", "khq": "Koyra Chiini Songhay", "khr": "Kharia", "khs": "Kasua", "kht": "Khamti", "khu": "Nkhumbi", "khv": "Khvarshi", "khw": "Khowar", "khx": "Kanu", "khy": "Kele (Democratic Republic of Congo)", "khz": "Keapara", "ki": "Kikuyu; Gikuyu", "kia": "Kim", "kib": "Koalib", "kic": "Kickapoo", "kid": "Koshin", "kie": "Kibet", "kif": "Eastern Parbate Kham", "kig": "Kimaama; Kimaghima", "kih": "Kilmeri", "kii": "Kitsai", "kij": "Kilivila", "kil": "Kariya", "kim": "Karagas", "kio": "Kiowa", "kip": "Sheshi Kham", "kiq": "Kosadle; Kosare", "kis": "Kis", "kit": "Agob", "kiu": "Kirmanjki (individual language)", "kiv": "Kimbu", "kiw": "Northeast Kiwai", "kix": "Khiamniungan Naga", "kiy": "Kirikiri", "kiz": "Kisi", "kj": "Kuanyama; Kwanyama", "kja": "Mlap", "kjb": "Q'anjob'al; Kanjobal", "kjc": "Coastal Konjo", "kjd": "Southern Kiwai", "kje": "Kisar", "kjg": "Khmu", "kjh": "Khakas", "kji": "Zabana", "kjj": "Khinalugh", "kjk": "Highland Konjo", "kjl": "Western Parbate Kham", "kjm": "Kháng", "kjn": "Kunjen", "kjo": "Harijan Kinnauri", "kjp": "Pwo Eastern Karen", "kjq": "Western Keres", "kjr": "Kurudu", "kjs": "East Kewa", "kjt": "Phrae Pwo Karen", "kju": "Kashaya", "kjv": "Kaikavian Literary Language", "kjx": "Ramopa", "kjy": "Erave", "kjz": "Bumthangkha", "kk": "Kazakh", "kka": "Kakanda", "kkb": "Kwerisa", "kkc": "Odoodee", "kkd": "Kinuku", "kke": "Kakabe", "kkf": "Kalaktang Monpa", "kkg": "Mabaka Valley Kalinga", "kkh": "Khün", "kki": "Kagulu", "kkj": "Kako", "kkk": "Kokota", "kkl": "Kosarek Yale", "kkm": "Kiong", "kkn": "Kon Keu", "kko": "Karko", "kkp": "Gugubera; Koko-Bera", "kkq": "Kaeku", "kkr": "Kir-Balar", "kks": "Giiwo", "kkt": "Koi", "kku": "Tumi", "kkv": "Kangean", "kkw": "Teke-Kukuya", "kkx": "Kohin", "kky": "Guugu Yimidhirr; Guguyimidjir", "kkz": "Kaska", "kl": "Kalaallisut; Greenlandic", "kla": "Klamath-Modoc", "klb": "Kiliwa", "klc": "Kolbila", "kld": "Gamilaraay", "kle": "Kulung (Nepal)", "klf": "Kendeje", "klg": "Tagakaulo", "klh": "Weliki", "kli": "Kalumpang", "klj": "Khalaj", "klk": "Kono (Nigeria)", "kll": "Kagan Kalagan", "klm": "Migum", "kln": "Kalenjin", "klo": "Kapya", "klp": "Kamasa", "klq": "Rumu", "klr": "Khaling", "kls": "Kalasha", "klt": "Nukna", "klu": "Klao", "klv": "Maskelynes", "klw": "Tado; Lindu", "klx": "Koluwawa", "kly": "Kalao", "klz": "Kabola", "km": "Khmer; Central Khmer", "kma": "Konni", "kmb": "Kimbundu", "kmc": "Southern Dong", "kmd": "Majukayang Kalinga", "kme": "Bakole", "kmf": "Kare (Papua New Guinea)", "kmg": "Kâte", "kmh": "Kalam", "kmi": "Kami (Nigeria)", "kmj": "Kumarbhag Paharia", "kmk": "Limos Kalinga", "kml": "Tanudan Kalinga", "kmm": "Kom (India)", "kmn": "Awtuw", "kmo": "Kwoma", "kmp": "Gimme", "kmq": "Kwama", "kmr": "Northern Kurdish", "kms": "Kamasau", "kmt": "Kemtuik", "kmu": "Kanite", "kmv": "Karipúna Creole French", "kmw": "Komo (Democratic Republic of Congo)", "kmx": "Waboda", "kmy": "Koma", "kmz": "Khorasani Turkish", "kn": "Kannada", "kna": "Dera (Nigeria)", "knb": "Lubuagan Kalinga", "knc": "Central Kanuri", "knd": "Konda", "kne": "Kankanaey", "knf": "Mankanya", "kng": "Koongo", "kni": "Kanufi", "knj": "Western Kanjobal", "knk": "Kuranko", "knl": "Keninjal", "knm": "Kanamarí", "knn": "Konkani (individual language)", "kno": "Kono (Sierra Leone)", "knp": "Kwanja", "knq": "Kintaq", "knr": "Kaningra", "kns": "Kensiu", "knt": "Panoan Katukína", "knu": "Kono (Guinea)", "knv": "Tabo", "knw": "Kung-Ekoka", "knx": "Kendayan; Salako", "kny": "Kanyok", "knz": "Kalamsé", "ko": "Korean", "koa": "Konomala", "koc": "Kpati", "kod": "Kodi", "koe": "Kacipo-Bale Suri", "kof": "Kubi", "kog": "Cogui; Kogi", "koh": "Koyo", "koi": "Komi-Permyak", "kok": "Konkani (macrolanguage)", "kol": "Kol (Papua New Guinea)", "koo": "Konzo", "kop": "Waube", "koq": "Kota (Gabon)", "kos": "Kosraean", "kot": "Lagwan", "kou": "Koke", "kov": "Kudu-Camo", "kow": "Kugama", "koy": "Koyukon", "koz": "Korak", "kpa": "Kutto", "kpb": "Mullu Kurumba", "kpc": "Curripaco", "kpd": "Koba", "kpe": "Kpelle", "kpf": "Komba", "kpg": "Kapingamarangi", "kph": "Kplang", "kpi": "Kofei", "kpj": "Karajá", "kpk": "Kpan", "kpl": "Kpala", "kpm": "Koho", "kpn": "Kepkiriwát", "kpo": "Ikposo", "kpq": "Korupun-Sela", "kpr": "Korafe-Yegha", "kps": "Tehit", "kpt": "Karata", "kpu": "Kafoa", "kpv": "Komi-Zyrian", "kpw": "Kobon", "kpx": "Mountain Koiali", "kpy": "Koryak", "kpz": "Kupsabiny", "kqa": "Mum", "kqb": "Kovai", "kqc": "Doromu-Koki", "kqd": "Koy Sanjaq Surat", "kqe": "Kalagan", "kqf": "Kakabai", "kqg": "Khe", "kqh": "Kisankasa", "kqi": "Koitabu", "kqj": "Koromira", "kqk": "Kotafon Gbe", "kql": "Kyenele", "kqm": "Khisa", "kqn": "Kaonde", "kqo": "Eastern Krahn", "kqp": "Kimré", "kqq": "Krenak", "kqr": "Kimaragang", "kqs": "Northern Kissi", "kqt": "Klias River Kadazan", "kqu": "Seroa", "kqv": "Okolod", "kqw": "Kandas", "kqx": "Mser", "kqy": "Koorete", "kqz": "Korana", "kr": "Kanuri", "kra": "Kumhali", "krb": "Karkin", "krc": "Karachay-Balkar", "krd": "Kairui-Midiki", "kre": "Panará", "krf": "Koro (Vanuatu)", "krh": "Kurama", "kri": "Krio", "krj": "Kinaray-A", "krk": "Kerek", "krl": "Karelian", "krn": "Sapo", "kro": "Kru languages", "krp": "Korop", "krr": "Krung", "krs": "Gbaya (Sudan)", "krt": "Tumari Kanuri", "kru": "Kurukh", "krv": "Kavet", "krw": "Western Krahn", "krx": "Karon", "kry": "Kryts", "krz": "Sota Kanum", "ks": "Kashmiri", "ksa": "Shuwa-Zamani", "ksb": "Shambala", "ksc": "Southern Kalinga", "ksd": "Kuanua", "kse": "Kuni", "ksf": "Bafia", "ksg": "Kusaghe", "ksh": "Kölsch", "ksi": "Krisa; I'saka", "ksj": "Uare", "ksk": "Kansa", "ksl": "Kumalu", "ksm": "Kumba", "ksn": "Kasiguranin", "kso": "Kofa", "ksp": "Kaba", "ksq": "Kwaami", "ksr": "Borong", "kss": "Southern Kisi", "kst": "Winyé", "ksu": "Khamyang", "ksv": "Kusu", "ksw": "S'gaw Karen", "ksx": "Kedang", "ksy": "Kharia Thar", "ksz": "Kodaku", "kta": "Katua", "ktb": "Kambaata", "ktc": "Kholok", "ktd": "Kokata; Kukatha", "kte": "Nubri", "ktf": "Kwami", "ktg": "Kalkutung", "kth": "Karanga", "kti": "North Muyu", "ktj": "Plapo Krumen", "ktk": "Kaniet", "ktl": "Koroshi", "ktm": "Kurti", "ktn": "Karitiâna", "kto": "Kuot", "ktp": "Kaduo", "ktq": "Katabaga", "kts": "South Muyu", "ktt": "Ketum", "ktu": "Kituba (Democratic Republic of Congo)", "ktv": "Eastern Katu", "ktw": "Kato", "ktx": "Kaxararí", "kty": "Kango (Bas-Uélé District)", "ktz": "Juǀʼhoan; Juǀʼhoansi", "ku": "Kurdish", "kub": "Kutep", "kuc": "Kwinsu", "kud": "'Auhelawa", "kue": "Kuman (Papua New Guinea)", "kuf": "Western Katu", "kug": "Kupa", "kuh": "Kushi", "kui": "Kuikúro-Kalapálo; Kalapalo", "kuj": "Kuria", "kuk": "Kepo'", "kul": "Kulere", "kum": "Kumyk", "kun": "Kunama", "kuo": "Kumukio", "kup": "Kunimaipa", "kuq": "Karipuna", "kus": "Kusaal", "kut": "Kutenai", "kuu": "Upper Kuskokwim", "kuv": "Kur", "kuw": "Kpagua", "kux": "Kukatja", "kuy": "Kuuku-Ya'u", "kuz": "Kunza", "kv": "Komi", "kva": "Bagvalal", "kvb": "Kubu", "kvc": "Kove", "kvd": "Kui (Indonesia)", "kve": "Kalabakan", "kvf": "Kabalai", "kvg": "Kuni-Boazi", "kvh": "Komodo", "kvi": "Kwang", "kvj": "Psikye", "kvk": "Korean Sign Language", "kvl": "Kayaw", "kvm": "Kendem", "kvn": "Border Kuna", "kvo": "Dobel", "kvp": "Kompane", "kvq": "Geba Karen", "kvr": "Kerinci", "kvt": "Lahta Karen; Lahta", "kvu": "Yinbaw Karen", "kvv": "Kola", "kvw": "Wersing", "kvx": "Parkari Koli", "kvy": "Yintale Karen; Yintale", "kvz": "Tsakwambo; Tsaukambo", "kw": "Cornish", "kwa": "Dâw", "kwb": "Kwa", "kwc": "Likwala", "kwd": "Kwaio", "kwe": "Kwerba", "kwf": "Kwara'ae", "kwg": "Sara Kaba Deme", "kwh": "Kowiai", "kwi": "Awa-Cuaiquer", "kwj": "Kwanga", "kwk": "Kwakiutl", "kwl": "Kofyar", "kwm": "Kwambi", "kwn": "Kwangali", "kwo": "Kwomtari", "kwp": "Kodia", "kwr": "Kwer", "kws": "Kwese", "kwt": "Kwesten", "kwu": "Kwakum", "kwv": "Sara Kaba Náà", "kww": "Kwinti", "kwx": "Khirwar", "kwy": "San Salvador Kongo", "kwz": "Kwadi", "kxa": "Kairiru", "kxb": "Krobu", "kxc": "Konso; Khonso", "kxd": "Brunei", "kxf": "Manumanaw Karen; Manumanaw", "kxh": "Karo (Ethiopia)", "kxi": "Keningau Murut", "kxj": "Kulfa", "kxk": "Zayein Karen", "kxm": "Northern Khmer", "kxn": "Kanowit-Tanjong Melanau", "kxo": "Kanoé", "kxp": "Wadiyara Koli", "kxq": "Smärky Kanum", "kxr": "Koro (Papua New Guinea)", "kxs": "Kangjia", "kxt": "Koiwat", "kxv": "Kuvi", "kxw": "Konai", "kxx": "Likuba", "kxy": "Kayong", "kxz": "Kerewo", "ky": "Kirghiz; Kyrgyz", "kya": "Kwaya", "kyb": "Butbut Kalinga", "kyc": "Kyaka", "kyd": "Karey", "kye": "Krache", "kyf": "Kouya", "kyg": "Keyagana", "kyh": "Karok", "kyi": "Kiput", "kyj": "Karao", "kyk": "Kamayo", "kyl": "Kalapuya", "kym": "Kpatili", "kyn": "Northern Binukidnon", "kyo": "Kelon", "kyp": "Kang", "kyq": "Kenga", "kyr": "Kuruáya", "kys": "Baram Kayan", "kyt": "Kayagar", "kyu": "Western Kayah", "kyv": "Kayort", "kyw": "Kudmali", "kyx": "Rapoisi", "kyy": "Kambaira", "kyz": "Kayabí", "kza": "Western Karaboro", "kzb": "Kaibobo", "kzc": "Bondoukou Kulango", "kzd": "Kadai", "kze": "Kosena", "kzf": "Da'a Kaili", "kzg": "Kikai", "kzi": "Kelabit", "kzk": "Kazukuru", "kzl": "Kayeli", "kzm": "Kais", "kzn": "Kokola", "kzo": "Kaningi", "kzp": "Kaidipang", "kzq": "Kaike", "kzr": "Karang", "kzs": "Sugut Dusun", "kzu": "Kayupulau", "kzv": "Komyandaret", "kzw": "Karirí-Xocó", "kzx": "Kamarian", "kzy": "Kango (Tshopo District)", "kzz": "Kalabra", "la": "Latin", "laa": "Southern Subanen", "lab": "Linear A", "lac": "Lacandon", "lad": "Ladino", "lae": "Pattani", "laf": "Lafofa", "lag": "Langi", "lah": "Lahnda", "lai": "Lambya", "laj": "Lango (Uganda)", "lal": "Lalia", "lam": "Lamba", "lan": "Laru", "lap": "Laka (Chad)", "laq": "Qabiao", "lar": "Larteh", "las": "Lama (Togo)", "lau": "Laba", "law": "Lauje", "lax": "Tiwa", "lay": "Lama Bai", "laz": "Aribwatsa", "lb": "Luxembourgish; Letzeburgesch", "lbb": "Label", "lbc": "Lakkia", "lbe": "Lak", "lbf": "Tinani", "lbg": "Laopang", "lbi": "La'bi", "lbj": "Ladakhi", "lbk": "Central Bontok", "lbl": "Libon Bikol", "lbm": "Lodhi", "lbn": "Rmeet", "lbo": "Laven", "lbq": "Wampar", "lbr": "Lohorung", "lbs": "Libyan Sign Language", "lbt": "Lachi", "lbu": "Labu", "lbv": "Lavatbura-Lamusong", "lbw": "Tolaki", "lbx": "Lawangan", "lby": "Lamalama; Lamu-Lamu", "lbz": "Lardil", "lcc": "Legenyem", "lcd": "Lola", "lce": "Loncong; Sekak", "lcf": "Lubu", "lch": "Luchazi", "lcl": "Lisela", "lcm": "Tungag", "lcp": "Western Lawa", "lcq": "Luhu", "lcs": "Lisabata-Nuniali", "lda": "Kla-Dan", "ldb": "Dũya", "ldd": "Luri", "ldg": "Lenyima", "ldh": "Lamja-Dengsa-Tola", "ldi": "Laari", "ldj": "Lemoro", "ldk": "Leelau", "ldl": "Kaan", "ldm": "Landoma", "ldn": "Láadan", "ldo": "Loo", "ldp": "Tso", "ldq": "Lufu", "lea": "Lega-Shabunda", "leb": "Lala-Bisa", "lec": "Leco", "led": "Lendu", "lee": "Lyélé", "lef": "Lelemi", "leh": "Lenje", "lei": "Lemio", "lej": "Lengola", "lek": "Leipon", "lel": "Lele (Democratic Republic of Congo)", "lem": "Nomaande", "len": "Lenca", "leo": "Leti (Cameroon)", "lep": "Lepcha", "leq": "Lembena", "ler": "Lenkau", "les": "Lese", "let": "Lesing-Gelimi; Amio-Gelimi", "leu": "Kara (Papua New Guinea)", "lev": "Lamma", "lew": "Ledo Kaili", "lex": "Luang", "ley": "Lemolang", "lez": "Lezghian", "lfa": "Lefa", "lfn": "Lingua Franca Nova", "lg": "Ganda; Luganda", "lga": "Lungga", "lgb": "Laghu", "lgg": "Lugbara", "lgh": "Laghuu", "lgi": "Lengilu", "lgk": "Lingarak; Neverver", "lgl": "Wala", "lgm": "Lega-Mwenga", "lgn": "T'apo; Opuuo", "lgo": "Lango (South Sudan)", "lgq": "Logba", "lgr": "Lengo", "lgt": "Pahi", "lgu": "Longgu", "lgz": "Ligenza", "lha": "Laha (Viet Nam)", "lhh": "Laha (Indonesia)", "lhi": "Lahu Shi", "lhl": "Lahul Lohar", "lhm": "Lhomi", "lhn": "Lahanan", "lhp": "Lhokpu", "lhs": "Mlahsö", "lht": "Lo-Toga", "lhu": "Lahu", "li": "Limburgan; Limburger; Limburgish", "lia": "West-Central Limba", "lib": "Likum", "lic": "Hlai", "lid": "Nyindrou", "lie": "Likila", "lif": "Limbu", "lig": "Ligbi", "lih": "Lihir", "lij": "Ligurian", "lik": "Lika", "lil": "Lillooet", "lio": "Liki", "lip": "Sekpele", "liq": "Libido", "lir": "Liberian English", "lis": "Lisu", "liu": "Logorik", "liv": "Liv", "liw": "Col", "lix": "Liabuku", "liy": "Banda-Bambari", "liz": "Libinza", "lja": "Golpa", "lje": "Rampi", "lji": "Laiyolo", "ljl": "Li'o", "ljp": "Lampung Api", "ljw": "Yirandali", "ljx": "Yuru", "lka": "Lakalei", "lkb": "Kabras; Lukabaras", "lkc": "Kucong", "lkd": "Lakondê", "lke": "Kenyi", "lkh": "Lakha", "lki": "Laki", "lkj": "Remun", "lkl": "Laeko-Libuat", "lkm": "Kalaamaya", "lkn": "Lakon; Vure", "lko": "Khayo; Olukhayo", "lkr": "Päri", "lks": "Kisa; Olushisa", "lkt": "Lakota", "lku": "Kungkari", "lky": "Lokoya", "lla": "Lala-Roba", "llb": "Lolo", "llc": "Lele (Guinea)", "lld": "Ladin", "lle": "Lele (Papua New Guinea)", "llf": "Hermit", "llg": "Lole", "llh": "Lamu", "lli": "Teke-Laali", "llj": "Ladji Ladji", "llk": "Lelak", "lll": "Lilau", "llm": "Lasalimu", "lln": "Lele (Chad)", "llp": "North Efate", "llq": "Lolak", "lls": "Lithuanian Sign Language", "llu": "Lau", "llx": "Lauan", "lma": "East Limba", "lmb": "Merei", "lmc": "Limilngan", "lmd": "Lumun", "lme": "Pévé", "lmf": "South Lembata", "lmg": "Lamogai", "lmh": "Lambichhong", "lmi": "Lombi", "lmj": "West Lembata", "lmk": "Lamkang", "lml": "Hano", "lmn": "Lambadi", "lmo": "Lombard", "lmp": "Limbum", "lmq": "Lamatuka", "lmr": "Lamalera", "lmu": "Lamenu", "lmv": "Lomaiviti", "lmw": "Lake Miwok", "lmx": "Laimbue", "lmy": "Lamboya", "ln": "Lingala", "lna": "Langbashe", "lnb": "Mbalanhu", "lnd": "Lundayeh; Lun Bawang", "lng": "Langobardic", "lnh": "Lanoh", "lni": "Daantanai'", "lnj": "Leningitij", "lnl": "South Central Banda", "lnm": "Langam", "lnn": "Lorediakarkar", "lns": "Lamnso'", "lnu": "Longuda", "lnw": "Lanima", "lnz": "Lonzo", "lo": "Lao", "loa": "Loloda", "lob": "Lobi", "loc": "Inonhan", "loe": "Saluan", "lof": "Logol", "log": "Logo", "loh": "Narim", "loi": "Loma (Côte d'Ivoire)", "loj": "Lou", "lok": "Loko", "lol": "Mongo", "lom": "Loma (Liberia)", "lon": "Malawi Lomwe", "loo": "Lombo", "lop": "Lopa", "loq": "Lobala", "lor": "Téén", "los": "Loniu", "lot": "Otuho", "lou": "Louisiana Creole", "lov": "Lopi", "low": "Tampias Lobu", "lox": "Loun", "loy": "Loke", "loz": "Lozi", "lpa": "Lelepa", "lpe": "Lepki", "lpn": "Long Phuri Naga", "lpo": "Lipo", "lpx": "Lopit", "lqr": "Logir", "lra": "Rara Bakati'", "lrc": "Northern Luri", "lre": "Laurentian", "lrg": "Laragia", "lri": "Marachi; Olumarachi", "lrk": "Loarki", "lrl": "Lari", "lrm": "Marama; Olumarama", "lrn": "Lorang", "lro": "Laro", "lrr": "Southern Yamphu", "lrt": "Larantuka Malay", "lrv": "Larevat", "lrz": "Lemerig", "lsa": "Lasgerdi", "lsb": "Burundian Sign Language; Langue des Signes Burundaise", "lsc": "Albarradas Sign Language; Lengua de señas Albarradas", "lsd": "Lishana Deni", "lse": "Lusengo", "lsh": "Lish", "lsi": "Lashi", "lsl": "Latvian Sign Language", "lsm": "Saamia; Olusamia", "lsn": "Tibetan Sign Language", "lso": "Laos Sign Language", "lsp": "Panamanian Sign Language; Lengua de Señas Panameñas", "lsr": "Aruop", "lss": "Lasi", "lst": "Trinidad and Tobago Sign Language", "lsv": "Sivia Sign Language", "lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise", "lsy": "Mauritian Sign Language", "lt": "Lithuanian", "ltc": "Late Middle Chinese", "ltg": "Latgalian", "lth": "Thur", "lti": "Leti (Indonesia)", "ltn": "Latundê", "lto": "Tsotso; Olutsotso", "lts": "Tachoni; Lutachoni", "ltu": "Latu", "lu": "Luba-Katanga", "lua": "Luba-Lulua", "luc": "Aringa", "lud": "Ludian", "lue": "Luvale", "luf": "Laua", "lui": "Luiseno", "luj": "Luna", "luk": "Lunanakha", "lul": "Olu'bo", "lum": "Luimbi", "lun": "Lunda", "luo": "Luo (Kenya and Tanzania); Dholuo", "lup": "Lumbu", "luq": "Lucumi", "lur": "Laura", "lus": "Lushai", "lut": "Lushootseed", "luu": "Lumba-Yakkha", "luv": "Luwati", "luw": "Luo (Cameroon)", "luy": "Luyia; Oluluyia", "luz": "Southern Luri", "lv": "Latvian", "lva": "Maku'a", "lvi": "Lavi", "lvk": "Lavukaleve", "lvs": "Standard Latvian", "lvu": "Levuka", "lwa": "Lwalu", "lwe": "Lewo Eleng", "lwg": "Wanga; Oluwanga", "lwh": "White Lachi", "lwl": "Eastern Lawa", "lwm": "Laomian", "lwo": "Luwo", "lws": "Malawian Sign Language", "lwt": "Lewotobi", "lwu": "Lawu", "lww": "Lewo", "lxm": "Lakurumau", "lya": "Layakha", "lyg": "Lyngngam", "lyn": "Luyana", "lzh": "Literary Chinese", "lzl": "Litzlitz", "lzn": "Leinong Naga", "lzz": "Laz", "maa": "San Jerónimo Tecóatl Mazatec", "mab": "Yutanduchi Mixtec", "mad": "Madurese", "mae": "Bo-Rukul", "maf": "Mafa", "mag": "Magahi", "mai": "Maithili", "maj": "Jalapa De Díaz Mazatec", "mak": "Makasar", "mam": "Mam", "man": "Mandingo; Manding", "map": "Austronesian languages", "maq": "Chiquihuitlán Mazatec", "mas": "Masai", "mat": "San Francisco Matlatzinca", "mau": "Huautla Mazatec", "mav": "Sateré-Mawé", "maw": "Mampruli", "max": "North Moluccan Malay", "maz": "Central Mazahua", "mba": "Higaonon", "mbb": "Western Bukidnon Manobo", "mbc": "Macushi", "mbd": "Dibabawon Manobo", "mbe": "Molale", "mbf": "Baba Malay", "mbh": "Mangseng", "mbi": "Ilianen Manobo", "mbj": "Nadëb", "mbk": "Malol", "mbl": "Maxakalí", "mbm": "Ombamba", "mbn": "Macaguán", "mbo": "Mbo (Cameroon)", "mbp": "Malayo", "mbq": "Maisin", "mbr": "Nukak Makú", "mbs": "Sarangani Manobo", "mbt": "Matigsalug Manobo", "mbu": "Mbula-Bwazza", "mbv": "Mbulungish", "mbw": "Maring", "mbx": "Mari (East Sepik Province)", "mby": "Memoni", "mbz": "Amoltepec Mixtec", "mca": "Maca", "mcb": "Machiguenga", "mcc": "Bitur", "mcd": "Sharanahua", "mce": "Itundujia Mixtec", "mcf": "Matsés", "mcg": "Mapoyo", "mch": "Maquiritari", "mci": "Mese", "mcj": "Mvanip", "mck": "Mbunda", "mcl": "Macaguaje", "mcm": "Malaccan Creole Portuguese", "mcn": "Masana", "mco": "Coatlán Mixe", "mcp": "Makaa", "mcq": "Ese", "mcr": "Menya", "mcs": "Mambai", "mct": "Mengisa", "mcu": "Cameroon Mambila", "mcv": "Minanibai", "mcw": "Mawa (Chad)", "mcx": "Mpiemo", "mcy": "South Watut", "mcz": "Mawan", "mda": "Mada (Nigeria)", "mdb": "Morigi", "mdc": "Male (Papua New Guinea)", "mdd": "Mbum", "mde": "Maba (Chad)", "mdf": "Moksha", "mdg": "Massalat", "mdh": "Maguindanaon", "mdi": "Mamvu", "mdj": "Mangbetu", "mdk": "Mangbutu", "mdl": "Maltese Sign Language", "mdm": "Mayogo", "mdn": "Mbati", "mdp": "Mbala", "mdq": "Mbole", "mdr": "Mandar", "mds": "Maria (Papua New Guinea)", "mdt": "Mbere", "mdu": "Mboko", "mdv": "Santa Lucía Monteverde Mixtec", "mdw": "Mbosi", "mdx": "Dizin", "mdy": "Male (Ethiopia)", "mdz": "Suruí Do Pará", "mea": "Menka", "meb": "Ikobi", "mec": "Marra", "med": "Melpa", "mee": "Mengen", "mef": "Megam", "meh": "Southwestern Tlaxiaco Mixtec", "mei": "Midob", "mej": "Meyah", "mek": "Mekeo", "mel": "Central Melanau", "mem": "Mangala", "men": "Mende (Sierra Leone)", "meo": "Kedah Malay", "mep": "Miriwoong", "meq": "Merey", "mer": "Meru", "mes": "Masmaje", "met": "Mato", "meu": "Motu", "mev": "Mano", "mew": "Maaka", "mey": "Hassaniyya", "mez": "Menominee", "mfa": "Pattani Malay", "mfb": "Bangka", "mfc": "Mba", "mfd": "Mendankwe-Nkwen", "mfe": "Morisyen", "mff": "Naki", "mfg": "Mogofin", "mfh": "Matal", "mfi": "Wandala", "mfj": "Mefele", "mfk": "North Mofu", "mfl": "Putai", "mfm": "Marghi South", "mfn": "Cross River Mbembe", "mfo": "Mbe", "mfp": "Makassar Malay", "mfq": "Moba", "mfr": "Marrithiyel", "mfs": "Mexican Sign Language", "mft": "Mokerang", "mfu": "Mbwela", "mfv": "Mandjak", "mfw": "Mulaha", "mfx": "Melo", "mfy": "Mayo", "mfz": "Mabaan", "mg": "Malagasy", "mga": "Middle Irish (900-1200)", "mgb": "Mararit", "mgc": "Morokodo", "mgd": "Moru", "mge": "Mango", "mgf": "Maklew", "mgg": "Mpumpong", "mgh": "Makhuwa-Meetto", "mgi": "Lijili", "mgj": "Abureni", "mgk": "Mawes", "mgl": "Maleu-Kilenge", "mgm": "Mambae", "mgn": "Mbangi", "mgo": "Meta'", "mgp": "Eastern Magar", "mgq": "Malila", "mgr": "Mambwe-Lungu", "mgs": "Manda (Tanzania)", "mgt": "Mongol", "mgu": "Mailu", "mgv": "Matengo", "mgw": "Matumbi", "mgy": "Mbunga", "mgz": "Mbugwe", "mh": "Marshallese", "mha": "Manda (India)", "mhb": "Mahongwe", "mhc": "Mocho", "mhd": "Mbugu", "mhe": "Besisi; Mah Meri", "mhf": "Mamaa", "mhg": "Margu", "mhi": "Ma'di", "mhj": "Mogholi", "mhk": "Mungaka", "mhl": "Mauwake", "mhm": "Makhuwa-Moniga", "mhn": "Mócheno", "mho": "Mashi (Zambia)", "mhp": "Balinese Malay", "mhq": "Mandan", "mhr": "Eastern Mari", "mhs": "Buru (Indonesia)", "mht": "Mandahuaca", "mhu": "Digaro-Mishmi; Darang Deng", "mhw": "Mbukushu", "mhx": "Maru; Lhaovo", "mhy": "Ma'anyan", "mhz": "Mor (Mor Islands)", "mi": "Maori", "mia": "Miami", "mib": "Atatláhuca Mixtec", "mic": "Mi'kmaq; Micmac", "mid": "Mandaic", "mie": "Ocotepec Mixtec", "mif": "Mofu-Gudur", "mig": "San Miguel El Grande Mixtec", "mih": "Chayuco Mixtec", "mii": "Chigmecatitlán Mixtec", "mij": "Abar; Mungbam", "mik": "Mikasuki", "mil": "Peñoles Mixtec", "mim": "Alacatlatzala Mixtec", "min": "Minangkabau", "mio": "Pinotepa Nacional Mixtec", "mip": "Apasco-Apoala Mixtec", "miq": "Mískito", "mir": "Isthmus Mixe", "mit": "Southern Puebla Mixtec", "miu": "Cacaloxtepec Mixtec", "miw": "Akoye", "mix": "Mixtepec Mixtec", "miy": "Ayutla Mixtec", "miz": "Coatzospan Mixtec", "mjb": "Makalero", "mjc": "San Juan Colorado Mixtec", "mjd": "Northwest Maidu", "mje": "Muskum", "mjg": "Tu", "mjh": "Mwera (Nyasa)", "mji": "Kim Mun", "mjj": "Mawak", "mjk": "Matukar", "mjl": "Mandeali", "mjm": "Medebur", "mjn": "Ma (Papua New Guinea)", "mjo": "Malankuravan", "mjp": "Malapandaram", "mjq": "Malaryan", "mjr": "Malavedan", "mjs": "Miship", "mjt": "Sauria Paharia", "mju": "Manna-Dora", "mjv": "Mannan", "mjw": "Karbi", "mjx": "Mahali", "mjy": "Mahican", "mjz": "Majhi", "mk": "Macedonian", "mka": "Mbre", "mkb": "Mal Paharia", "mkc": "Siliput", "mke": "Mawchi", "mkf": "Miya", "mkg": "Mak (China)", "mkh": "Mon-Khmer languages", "mki": "Dhatki", "mkj": "Mokilese", "mkk": "Byep", "mkl": "Mokole", "mkm": "Moklen", "mkn": "Kupang Malay", "mko": "Mingang Doso", "mkp": "Moikodi", "mkq": "Bay Miwok", "mkr": "Malas", "mks": "Silacayoapan Mixtec", "mkt": "Vamale", "mku": "Konyanka Maninka", "mkv": "Mafea", "mkw": "Kituba (Congo)", "mkx": "Kinamiging Manobo", "mky": "East Makian", "mkz": "Makasae", "ml": "Malayalam", "mla": "Malo", "mlb": "Mbule", "mlc": "Cao Lan", "mle": "Manambu", "mlf": "Mal", "mlh": "Mape", "mli": "Malimpung", "mlj": "Miltu", "mlk": "Ilwana; Kiwilwana", "mll": "Malua Bay", "mlm": "Mulam", "mln": "Malango", "mlo": "Mlomp", "mlp": "Bargam", "mlq": "Western Maninkakan", "mlr": "Vame", "mls": "Masalit", "mlu": "To'abaita", "mlv": "Motlav; Mwotlap", "mlw": "Moloko", "mlx": "Malfaxal; Naha'ai", "mlz": "Malaynon", "mma": "Mama", "mmb": "Momina", "mmc": "Michoacán Mazahua", "mmd": "Maonan", "mme": "Mae", "mmf": "Mundat", "mmg": "North Ambrym", "mmh": "Mehináku", "mmi": "Musar", "mmj": "Majhwar", "mmk": "Mukha-Dora", "mml": "Man Met", "mmm": "Maii", "mmn": "Mamanwa", "mmo": "Mangga Buang", "mmp": "Siawi", "mmq": "Musak", "mmr": "Western Xiangxi Miao", "mmt": "Malalamai", "mmu": "Mmaala", "mmv": "Miriti", "mmw": "Emae", "mmx": "Madak", "mmy": "Migaama", "mmz": "Mabaale", "mn": "Mongolian", "mna": "Mbula", "mnb": "Muna", "mnc": "Manchu", "mnd": "Mondé", "mne": "Naba", "mnf": "Mundani", "mng": "Eastern Mnong", "mnh": "Mono (Democratic Republic of Congo)", "mni": "Manipuri", "mnj": "Munji", "mnk": "Mandinka", "mnl": "Tiale", "mnm": "Mapena", "mnn": "Southern Mnong", "mno": "Manobo languages", "mnp": "Min Bei Chinese", "mnq": "Minriq", "mnr": "Mono (USA)", "mns": "Mansi", "mnu": "Mer", "mnv": "Rennell-Bellona", "mnw": "Mon", "mnx": "Manikion", "mny": "Manyawa", "mnz": "Moni", "moa": "Mwan", "moc": "Mocoví", "mod": "Mobilian", "moe": "Innu; Montagnais", "mog": "Mongondow", "moh": "Mohawk", "moi": "Mboi", "moj": "Monzombo", "mok": "Morori", "mom": "Mangue", "moo": "Monom", "mop": "Mopán Maya", "moq": "Mor (Bomberai Peninsula)", "mor": "Moro", "mos": "Mossi", "mot": "Barí", "mou": "Mogum", "mov": "Mohave", "mow": "Moi (Congo)", "mox": "Molima", "moy": "Shekkacho", "moz": "Mukulu; Gergiko", "mpa": "Mpoto", "mpb": "Malak Malak; Mullukmulluk", "mpc": "Mangarrayi", "mpd": "Machinere", "mpe": "Majang", "mpg": "Marba", "mph": "Maung", "mpi": "Mpade", "mpj": "Martu Wangka; Wangkajunga", "mpk": "Mbara (Chad)", "mpl": "Middle Watut", "mpm": "Yosondúa Mixtec", "mpn": "Mindiri", "mpo": "Miu", "mpp": "Migabac", "mpq": "Matís", "mpr": "Vangunu", "mps": "Dadibi", "mpt": "Mian", "mpu": "Makuráp", "mpv": "Mungkip", "mpw": "Mapidian", "mpx": "Misima-Panaeati", "mpy": "Mapia", "mpz": "Mpi", "mqa": "Maba (Indonesia)", "mqb": "Mbuko", "mqc": "Mangole", "mqe": "Matepi", "mqf": "Momuna", "mqg": "Kota Bangun Kutai Malay", "mqh": "Tlazoyaltepec Mixtec", "mqi": "Mariri", "mqj": "Mamasa", "mqk": "Rajah Kabunsuwan Manobo", "mql": "Mbelime", "mqm": "South Marquesan", "mqn": "Moronene", "mqo": "Modole", "mqp": "Manipa", "mqq": "Minokok", "mqr": "Mander", "mqs": "West Makian", "mqt": "Mok", "mqu": "Mandari", "mqv": "Mosimo", "mqw": "Murupi", "mqx": "Mamuju", "mqy": "Manggarai", "mqz": "Pano", "mr": "Marathi", "mra": "Mlabri", "mrb": "Marino", "mrc": "Maricopa", "mrd": "Western Magar", "mre": "Martha's Vineyard Sign Language", "mrf": "Elseng", "mrg": "Mising", "mrh": "Mara Chin", "mrj": "Western Mari", "mrk": "Hmwaveke", "mrl": "Mortlockese", "mrm": "Merlav; Mwerlap", "mrn": "Cheke Holo", "mro": "Mru", "mrp": "Morouas", "mrq": "North Marquesan", "mrr": "Maria (India)", "mrs": "Maragus", "mrt": "Marghi Central", "mru": "Mono (Cameroon)", "mrv": "Mangareva", "mrw": "Maranao", "mrx": "Maremgi; Dineor", "mry": "Mandaya", "mrz": "Marind", "ms": "Malay (macrolanguage)", "msb": "Masbatenyo", "msc": "Sankaran Maninka", "msd": "Yucatec Maya Sign Language", "mse": "Musey", "msf": "Mekwei", "msg": "Moraid", "msh": "Masikoro Malagasy", "msi": "Sabah Malay", "msj": "Ma (Democratic Republic of Congo)", "msk": "Mansaka", "msl": "Molof; Poule", "msm": "Agusan Manobo", "msn": "Vurës", "mso": "Mombum", "msp": "Maritsauá", "msq": "Caac", "msr": "Mongolian Sign Language", "mss": "West Masela", "msu": "Musom", "msv": "Maslam", "msw": "Mansoanka", "msx": "Moresada", "msy": "Aruamu", "msz": "Momare", "mt": "Maltese", "mta": "Cotabato Manobo", "mtb": "Anyin Morofo", "mtc": "Munit", "mtd": "Mualang", "mte": "Mono (Solomon Islands)", "mtf": "Murik (Papua New Guinea)", "mtg": "Una", "mth": "Munggui", "mti": "Maiwa (Papua New Guinea)", "mtj": "Moskona", "mtk": "Mbe'", "mtl": "Montol", "mtm": "Mator", "mtn": "Matagalpa", "mto": "Totontepec Mixe", "mtp": "Wichí Lhamtés Nocten", "mtq": "Muong", "mtr": "Mewari", "mts": "Yora", "mtt": "Mota", "mtu": "Tututepec Mixtec", "mtv": "Asaro'o", "mtw": "Southern Binukidnon", "mtx": "Tidaá Mixtec", "mty": "Nabi", "mua": "Mundang", "mub": "Mubi", "muc": "Ajumbu", "mud": "Mednyj Aleut", "mue": "Media Lengua", "mug": "Musgu", "muh": "Mündü", "mui": "Musi", "muj": "Mabire", "muk": "Mugom", "mum": "Maiwala", "mun": "Munda languages", "muo": "Nyong", "mup": "Malvi", "muq": "Eastern Xiangxi Miao", "mur": "Murle", "mus": "Creek", "mut": "Western Muria", "muu": "Yaaku", "muv": "Muthuvan", "mux": "Bo-Ung", "muy": "Muyang", "muz": "Mursi", "mva": "Manam", "mvb": "Mattole", "mvd": "Mamboru", "mve": "Marwari (Pakistan)", "mvf": "Peripheral Mongolian", "mvg": "Yucuañe Mixtec", "mvh": "Mulgi", "mvi": "Miyako", "mvk": "Mekmek", "mvl": "Mbara (Australia)", "mvn": "Minaveha", "mvo": "Marovo", "mvp": "Duri", "mvq": "Moere", "mvr": "Marau", "mvs": "Massep", "mvt": "Mpotovoro", "mvu": "Marfa", "mvv": "Tagal Murut", "mvw": "Machinga", "mvx": "Meoswar", "mvy": "Indus Kohistani", "mvz": "Mesqan", "mwa": "Mwatebu", "mwb": "Juwal", "mwc": "Are", "mwe": "Mwera (Chimwera)", "mwf": "Murrinh-Patha", "mwg": "Aiklep", "mwh": "Mouk-Aria", "mwi": "Labo; Ninde", "mwk": "Kita Maninkakan", "mwl": "Mirandese", "mwm": "Sar", "mwn": "Nyamwanga", "mwo": "Central Maewo", "mwp": "Kala Lagaw Ya", "mwq": "Mün Chin", "mwr": "Marwari", "mws": "Mwimbi-Muthambi", "mwt": "Moken", "mwu": "Mittu", "mwv": "Mentawai", "mww": "Hmong Daw", "mwz": "Moingi", "mxa": "Northwest Oaxaca Mixtec", "mxb": "Tezoatlán Mixtec", "mxc": "Manyika", "mxd": "Modang", "mxe": "Mele-Fila", "mxf": "Malgbe", "mxg": "Mbangala", "mxh": "Mvuba", "mxi": "Mozarabic", "mxj": "Miju-Mishmi; Geman Deng", "mxk": "Monumbo", "mxl": "Maxi Gbe", "mxm": "Meramera", "mxn": "Moi (Indonesia)", "mxo": "Mbowe", "mxp": "Tlahuitoltepec Mixe", "mxq": "Juquila Mixe", "mxr": "Murik (Malaysia)", "mxs": "Huitepec Mixtec", "mxt": "Jamiltepec Mixtec", "mxu": "Mada (Cameroon)", "mxv": "Metlatónoc Mixtec", "mxw": "Namo", "mxx": "Mahou; Mawukakan", "mxy": "Southeastern Nochixtlán Mixtec", "mxz": "Central Masela", "my": "Burmese", "myb": "Mbay", "myc": "Mayeka", "mye": "Myene", "myf": "Bambassi", "myg": "Manta", "myh": "Makah", "myj": "Mangayat", "myk": "Mamara Senoufo", "myl": "Moma", "mym": "Me'en", "myn": "Mayan languages", "myo": "Anfillo", "myp": "Pirahã", "myr": "Muniche", "mys": "Mesmes", "myu": "Mundurukú", "myv": "Erzya", "myw": "Muyuw", "myx": "Masaaba", "myy": "Macuna", "myz": "Classical Mandaic", "mza": "Santa María Zacatepec Mixtec", "mzb": "Tumzabt", "mzc": "Madagascar Sign Language", "mzd": "Malimba", "mze": "Morawa", "mzg": "Monastic Sign Language", "mzh": "Wichí Lhamtés Güisnay", "mzi": "Ixcatlán Mazatec", "mzj": "Manya", "mzk": "Nigeria Mambila", "mzl": "Mazatlán Mixe", "mzm": "Mumuye", "mzn": "Mazanderani", "mzo": "Matipuhy", "mzp": "Movima", "mzq": "Mori Atas", "mzr": "Marúbo", "mzs": "Macanese", "mzt": "Mintil", "mzu": "Inapang", "mzv": "Manza", "mzw": "Deg", "mzx": "Mawayana", "mzy": "Mozambican Sign Language", "mzz": "Maiadomu", "na": "Nauru", "naa": "Namla", "nab": "Southern Nambikuára", "nac": "Narak", "nae": "Naka'ela", "naf": "Nabak", "nag": "Naga Pidgin", "nah": "Nahuatl languages", "nai": "North American Indian languages", "naj": "Nalu", "nak": "Nakanai", "nal": "Nalik", "nam": "Ngan'gityemerri", "nan": "Min Nan Chinese", "nao": "Naaba", "nap": "Neapolitan", "naq": "Khoekhoe; Nama (Namibia)", "nar": "Iguta", "nas": "Naasioi", "nat": "Ca̱hungwa̱rya̱; Hungworo", "naw": "Nawuri", "nax": "Nakwi", "nay": "Ngarrindjeri", "naz": "Coatepec Nahuatl", "nb": "Norwegian Bokmål", "nba": "Nyemba", "nbb": "Ndoe", "nbc": "Chang Naga", "nbd": "Ngbinda", "nbe": "Konyak Naga", "nbg": "Nagarchal", "nbh": "Ngamo", "nbi": "Mao Naga", "nbj": "Ngarinyman", "nbk": "Nake", "nbm": "Ngbaka Ma'bo", "nbn": "Kuri", "nbo": "Nkukoli", "nbp": "Nnam", "nbq": "Nggem", "nbr": "Numana", "nbs": "Namibian Sign Language", "nbt": "Na", "nbu": "Rongmei Naga", "nbv": "Ngamambo", "nbw": "Southern Ngbandi", "nby": "Ningera", "nca": "Iyo", "ncb": "Central Nicobarese", "ncc": "Ponam", "ncd": "Nachering", "nce": "Yale", "ncf": "Notsi", "ncg": "Nisga'a", "nch": "Central Huasteca Nahuatl", "nci": "Classical Nahuatl", "ncj": "Northern Puebla Nahuatl", "nck": "Na-kara", "ncl": "Michoacán Nahuatl", "ncm": "Nambo", "ncn": "Nauna", "nco": "Sibe", "ncq": "Northern Katang", "ncr": "Ncane", "ncs": "Nicaraguan Sign Language", "nct": "Chothe Naga", "ncu": "Chumburung", "ncx": "Central Puebla Nahuatl", "ncz": "Natchez", "nd": "North Ndebele", "nda": "Ndasa", "ndb": "Kenswei Nsei", "ndc": "Ndau", "ndd": "Nde-Nsele-Nta", "ndf": "Nadruvian", "ndg": "Ndengereko", "ndh": "Ndali", "ndi": "Samba Leko", "ndj": "Ndamba", "ndk": "Ndaka", "ndl": "Ndolo", "ndm": "Ndam", "ndn": "Ngundi", "ndp": "Ndo", "ndq": "Ndombe", "ndr": "Ndoola", "nds": "Low German; Low Saxon", "ndt": "Ndunga", "ndu": "Dugun", "ndv": "Ndut", "ndw": "Ndobo", "ndx": "Nduga", "ndy": "Lutos", "ndz": "Ndogo", "ne": "Nepali (macrolanguage)", "nea": "Eastern Ngad'a", "neb": "Toura (Côte d'Ivoire)", "nec": "Nedebang", "ned": "Nde-Gbite", "nee": "Nêlêmwa-Nixumwak", "nef": "Nefamese", "neg": "Negidal", "neh": "Nyenkha", "nei": "Neo-Hittite", "nej": "Neko", "nek": "Neku", "nem": "Nemi", "nen": "Nengone", "neo": "Ná-Meo", "neq": "North Central Mixe", "ner": "Yahadian", "nes": "Bhoti Kinnauri", "net": "Nete", "neu": "Neo", "nev": "Nyaheun", "new": "Newari; Nepal Bhasa", "nex": "Neme", "ney": "Neyo", "nez": "Nez Perce", "nfa": "Dhao", "nfd": "Ahwai", "nfl": "Ayiwo; Äiwoo", "nfr": "Nafaanra", "nfu": "Mfumte", "ng": "Ndonga", "nga": "Ngbaka", "ngb": "Northern Ngbandi", "ngc": "Ngombe (Democratic Republic of Congo)", "ngd": "Ngando (Central African Republic)", "nge": "Ngemba", "ngf": "Trans-New Guinea languages", "ngg": "Ngbaka Manza", "ngh": "Nǁng", "ngi": "Ngizim", "ngj": "Ngie", "ngk": "Dalabon", "ngl": "Lomwe", "ngm": "Ngatik Men's Creole", "ngn": "Ngwo", "ngp": "Ngulu", "ngq": "Ngurimi; Ngoreme", "ngr": "Engdewu", "ngs": "Gvoko", "ngt": "Kriang; Ngeq", "ngu": "Guerrero Nahuatl", "ngv": "Nagumi", "ngw": "Ngwaba", "ngx": "Nggwahyi", "ngy": "Tibea", "ngz": "Ngungwel", "nha": "Nhanda", "nhb": "Beng", "nhc": "Tabasco Nahuatl", "nhd": "Chiripá; Ava Guaraní", "nhe": "Eastern Huasteca Nahuatl", "nhf": "Nhuwala", "nhg": "Tetelcingo Nahuatl", "nhh": "Nahari", "nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl", "nhk": "Isthmus-Cosoleacaque Nahuatl", "nhm": "Morelos Nahuatl", "nhn": "Central Nahuatl", "nho": "Takuu", "nhp": "Isthmus-Pajapan Nahuatl", "nhq": "Huaxcaleca Nahuatl", "nhr": "Naro", "nht": "Ometepec Nahuatl", "nhu": "Noone", "nhv": "Temascaltepec Nahuatl", "nhw": "Western Huasteca Nahuatl", "nhx": "Isthmus-Mecayapan Nahuatl", "nhy": "Northern Oaxaca Nahuatl", "nhz": "Santa María La Alta Nahuatl", "nia": "Nias", "nib": "Nakame", "nic": "Niger-Kordofanian languages", "nid": "Ngandi", "nie": "Niellim", "nif": "Nek", "nig": "Ngalakgan", "nih": "Nyiha (Tanzania)", "nii": "Nii", "nij": "Ngaju", "nik": "Southern Nicobarese", "nil": "Nila", "nim": "Nilamba", "nin": "Ninzo", "nio": "Nganasan", "niq": "Nandi", "nir": "Nimboran", "nis": "Nimi", "nit": "Southeastern Kolami", "niu": "Niuean", "niv": "Gilyak", "niw": "Nimo", "nix": "Hema", "niy": "Ngiti", "niz": "Ningil", "nja": "Nzanyi", "njb": "Nocte Naga", "njd": "Ndonde Hamba", "njh": "Lotha Naga", "nji": "Gudanji", "njj": "Njen", "njl": "Njalgulgule", "njm": "Angami Naga", "njn": "Liangmai Naga", "njo": "Ao Naga", "njr": "Njerep", "njs": "Nisa", "njt": "Ndyuka-Trio Pidgin", "nju": "Ngadjunmaya", "njx": "Kunyi", "njy": "Njyem", "njz": "Nyishi", "nka": "Nkoya", "nkb": "Khoibu Naga", "nkc": "Nkongho", "nkd": "Koireng", "nke": "Duke", "nkf": "Inpui Naga", "nkg": "Nekgini", "nkh": "Khezha Naga", "nki": "Thangal Naga", "nkj": "Nakai", "nkk": "Nokuku", "nkm": "Namat", "nkn": "Nkangala", "nko": "Nkonya", "nkp": "Niuatoputapu", "nkq": "Nkami", "nkr": "Nukuoro", "nks": "North Asmat", "nkt": "Nyika (Tanzania)", "nku": "Bouna Kulango", "nkv": "Nyika (Malawi and Zambia)", "nkw": "Nkutu", "nkx": "Nkoroo", "nkz": "Nkari", "nl": "Dutch; Flemish", "nla": "Ngombale", "nlc": "Nalca", "nle": "East Nyala", "nlg": "Gela", "nli": "Grangali", "nlj": "Nyali", "nlk": "Ninia Yali", "nll": "Nihali", "nlm": "Mankiyali", "nlo": "Ngul", "nlq": "Lao Naga", "nlu": "Nchumbulu", "nlv": "Orizaba Nahuatl", "nlw": "Walangama", "nlx": "Nahali", "nly": "Nyamal", "nlz": "Nalögo", "nma": "Maram Naga", "nmb": "Big Nambas; V'ënen Taut", "nmc": "Ngam", "nmd": "Ndumu", "nme": "Mzieme Naga", "nmf": "Tangkhul Naga (India)", "nmg": "Kwasio", "nmh": "Monsang Naga", "nmi": "Nyam", "nmj": "Ngombe (Central African Republic)", "nmk": "Namakura", "nml": "Ndemli", "nmm": "Manangba", "nmn": "ǃXóõ", "nmo": "Moyon Naga", "nmp": "Nimanbur", "nmq": "Nambya", "nmr": "Nimbari", "nms": "Letemboi", "nmt": "Namonuito", "nmu": "Northeast Maidu", "nmv": "Ngamini", "nmw": "Nimoa; Rifao", "nmx": "Nama (Papua New Guinea)", "nmy": "Namuyi", "nmz": "Nawdm", "nn": "Norwegian Nynorsk", "nna": "Nyangumarta", "nnb": "Nande", "nnc": "Nancere", "nnd": "West Ambae", "nne": "Ngandyera", "nnf": "Ngaing", "nng": "Maring Naga", "nnh": "Ngiemboon", "nni": "North Nuaulu", "nnj": "Nyangatom", "nnk": "Nankina", "nnl": "Northern Rengma Naga", "nnm": "Namia", "nnn": "Ngete", "nnp": "Wancho Naga", "nnq": "Ngindo", "nnr": "Narungga", "nnt": "Nanticoke", "nnu": "Dwang", "nnv": "Nugunu (Australia)", "nnw": "Southern Nuni", "nny": "Nyangga", "nnz": "Nda'nda'", "no": "Norwegian", "noa": "Woun Meu", "noc": "Nuk", "nod": "Northern Thai", "noe": "Nimadi", "nof": "Nomane", "nog": "Nogai", "noh": "Nomu", "noi": "Noiri", "noj": "Nonuya", "nok": "Nooksack", "nol": "Nomlaki", "nom": "Nocamán", "non": "Old Norse", "nop": "Numanggang", "noq": "Ngongo", "nos": "Eastern Nisu", "not": "Nomatsiguenga", "nou": "Ewage-Notu", "nov": "Novial", "now": "Nyambo", "noy": "Noy", "noz": "Nayi", "npa": "Nar Phu", "npb": "Nupbikha", "npg": "Ponyo-Gongwang Naga", "nph": "Phom Naga", "npi": "Nepali (individual language)", "npl": "Southeastern Puebla Nahuatl", "npn": "Mondropolon", "npo": "Pochuri Naga", "nps": "Nipsan", "npu": "Puimei Naga", "npx": "Noipx", "npy": "Napu", "nqg": "Southern Nago", "nqk": "Kura Ede Nago", "nql": "Ngendelengo", "nqm": "Ndom", "nqn": "Nen", "nqo": "N'Ko; N’Ko", "nqq": "Kyan-Karyaw Naga", "nqt": "Nteng", "nqy": "Akyaung Ari Naga", "nr": "South Ndebele", "nra": "Ngom", "nrb": "Nara", "nrc": "Noric", "nre": "Southern Rengma Naga", "nrf": "Jèrriais; Guernésiais", "nrg": "Narango", "nri": "Chokri Naga", "nrk": "Ngarla", "nrl": "Ngarluma", "nrm": "Narom", "nrn": "Norn", "nrp": "North Picene", "nrr": "Norra; Nora", "nrt": "Northern Kalapuya", "nru": "Narua", "nrx": "Ngurmbur", "nrz": "Lala", "nsa": "Sangtam Naga", "nsb": "Lower Nossob", "nsc": "Nshi", "nsd": "Southern Nisu", "nse": "Nsenga", "nsf": "Northwestern Nisu", "nsg": "Ngasa", "nsh": "Ngoshie", "nsi": "Nigerian Sign Language", "nsk": "Naskapi", "nsl": "Norwegian Sign Language", "nsm": "Sumi Naga", "nsn": "Nehan", "nso": "Pedi; Northern Sotho; Sepedi", "nsp": "Nepalese Sign Language", "nsq": "Northern Sierra Miwok", "nsr": "Maritime Sign Language", "nss": "Nali", "nst": "Tase Naga", "nsu": "Sierra Negra Nahuatl", "nsv": "Southwestern Nisu", "nsw": "Navut", "nsx": "Nsongo", "nsy": "Nasal", "nsz": "Nisenan", "ntd": "Northern Tidung", "nte": "Nathembo", "ntg": "Ngantangarra", "nti": "Natioro", "ntj": "Ngaanyatjarra", "ntk": "Ikoma-Nata-Isenye", "ntm": "Nateni", "nto": "Ntomba", "ntp": "Northern Tepehuan", "ntr": "Delo", "ntu": "Natügu", "ntw": "Nottoway", "ntx": "Tangkhul Naga (Myanmar)", "nty": "Mantsi", "ntz": "Natanzi", "nua": "Yuanga", "nub": "Nubian languages", "nuc": "Nukuini", "nud": "Ngala", "nue": "Ngundu", "nuf": "Nusu", "nug": "Nungali", "nuh": "Ndunda", "nui": "Ngumbi", "nuj": "Nyole", "nuk": "Nuu-chah-nulth; Nuuchahnulth", "nul": "Nusa Laut", "num": "Niuafo'ou", "nun": "Anong", "nuo": "Nguôn", "nup": "Nupe-Nupe-Tako", "nuq": "Nukumanu", "nur": "Nukuria", "nus": "Nuer", "nut": "Nung (Viet Nam)", "nuu": "Ngbundu", "nuv": "Northern Nuni", "nuw": "Nguluwan", "nux": "Mehek", "nuy": "Nunggubuyu", "nuz": "Tlamacazapa Nahuatl", "nv": "Navajo; Navaho", "nvh": "Nasarian", "nvm": "Namiae", "nvo": "Nyokon", "nwa": "Nawathinehena", "nwb": "Nyabwa", "nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari", "nwe": "Ngwe", "nwg": "Ngayawung", "nwi": "Southwest Tanna", "nwm": "Nyamusa-Molo", "nwo": "Nauo", "nwr": "Nawaru", "nww": "Ndwewe", "nwx": "Middle Newar", "nwy": "Nottoway-Meherrin", "nxa": "Nauete", "nxd": "Ngando (Democratic Republic of Congo)", "nxe": "Nage", "nxg": "Ngad'a", "nxi": "Nindi", "nxk": "Koki Naga", "nxl": "South Nuaulu", "nxm": "Numidian", "nxn": "Ngawun", "nxo": "Ndambomo", "nxq": "Naxi", "nxr": "Ninggerum", "nxx": "Nafri", "ny": "Nyanja; Chewa; Chichewa", "nyb": "Nyangbo", "nyc": "Nyanga-li", "nyd": "Nyore; Olunyole", "nye": "Nyengo", "nyf": "Giryama; Kigiryama", "nyg": "Nyindu", "nyh": "Nyikina", "nyi": "Ama (Sudan)", "nyj": "Nyanga", "nyk": "Nyaneka", "nyl": "Nyeu", "nym": "Nyamwezi", "nyn": "Nyankole", "nyo": "Nyoro", "nyp": "Nyang'i", "nyq": "Nayini", "nyr": "Nyiha (Malawi)", "nys": "Nyungar", "nyt": "Nyawaygi", "nyu": "Nyungwe", "nyv": "Nyulnyul", "nyw": "Nyaw", "nyx": "Nganyaywana", "nyy": "Nyakyusa-Ngonde", "nza": "Tigon Mbembe", "nzb": "Njebi", "nzd": "Nzadi", "nzi": "Nzima", "nzk": "Nzakara", "nzm": "Zeme Naga", "nzs": "New Zealand Sign Language", "nzu": "Teke-Nzikou", "nzy": "Nzakambay", "nzz": "Nanga Dama Dogon", "oaa": "Orok", "oac": "Oroch", "oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)", "oav": "Old Avar", "obi": "Obispeño", "obk": "Southern Bontok", "obl": "Oblo", "obm": "Moabite", "obo": "Obo Manobo", "obr": "Old Burmese", "obt": "Old Breton", "obu": "Obulom", "oc": "Occitan (post 1500)", "oca": "Ocaina", "och": "Old Chinese", "ocm": "Old Cham", "oco": "Old Cornish", "ocu": "Atzingo Matlatzinca", "oda": "Odut", "odk": "Od", "odt": "Old Dutch", "odu": "Odual", "ofo": "Ofo", "ofs": "Old Frisian", "ofu": "Efutop", "ogb": "Ogbia", "ogc": "Ogbah", "oge": "Old Georgian", "ogg": "Ogbogolo", "ogo": "Khana", "ogu": "Ogbronuagum", "oht": "Old Hittite", "ohu": "Old Hungarian", "oia": "Oirata", "oie": "Okolie", "oin": "Inebu One", "oj": "Ojibwa", "ojb": "Northwestern Ojibwa", "ojc": "Central Ojibwa", "ojg": "Eastern Ojibwa", "ojp": "Old Japanese", "ojs": "Severn Ojibwa", "ojv": "Ontong Java", "ojw": "Western Ojibwa", "oka": "Okanagan", "okb": "Okobo", "okc": "Kobo", "okd": "Okodia", "oke": "Okpe (Southwestern Edo)", "okg": "Koko Babangk", "okh": "Koresh-e Rostam", "oki": "Okiek", "okj": "Oko-Juwoi", "okk": "Kwamtim One", "okl": "Old Kentish Sign Language", "okm": "Middle Korean (10th-16th cent.)", "okn": "Oki-No-Erabu", "oko": "Old Korean (3rd-9th cent.)", "okr": "Kirike", "oks": "Oko-Eni-Osayen", "oku": "Oku", "okv": "Orokaiva", "okx": "Okpe (Northwestern Edo)", "okz": "Old Khmer", "ola": "Walungge", "old": "Mochi", "ole": "Olekha", "olk": "Olkol", "olm": "Oloma", "olo": "Livvi", "olr": "Olrat", "olt": "Old Lithuanian", "olu": "Kuvale", "om": "Oromo", "oma": "Omaha-Ponca", "omb": "East Ambae", "omc": "Mochica", "omg": "Omagua", "omi": "Omi", "omk": "Omok", "oml": "Ombo", "omn": "Minoan", "omo": "Utarmbung", "omp": "Old Manipuri", "omq": "Oto-Manguean languages", "omr": "Old Marathi", "omt": "Omotik", "omu": "Omurano", "omv": "Omotic languages", "omw": "South Tairora", "omx": "Old Mon", "omy": "Old Malay", "ona": "Ona", "onb": "Lingao", "one": "Oneida", "ong": "Olo", "oni": "Onin", "onj": "Onjob", "onk": "Kabore One", "onn": "Onobasulu", "ono": "Onondaga", "onp": "Sartang", "onr": "Northern One", "ons": "Ono", "ont": "Ontenu", "onu": "Unua", "onw": "Old Nubian", "onx": "Onin Based Pidgin", "ood": "Tohono O'odham", "oog": "Ong", "oon": "Önge", "oor": "Oorlams", "oos": "Old Ossetic", "opa": "Okpamheri", "opk": "Kopkaka", "opm": "Oksapmin", "opo": "Opao", "opt": "Opata", "opy": "Ofayé", "or": "Oriya (macrolanguage); Odia (macrolanguage)", "ora": "Oroha", "orc": "Orma", "ore": "Orejón", "org": "Oring", "orh": "Oroqen", "orn": "Orang Kanaq", "oro": "Orokolo", "orr": "Oruma", "ors": "Orang Seletar", "ort": "Adivasi Oriya", "oru": "Ormuri", "orv": "Old Russian", "orw": "Oro Win", "orx": "Oro", "ory": "Odia (individual language); Oriya (individual language)", "orz": "Ormu", "os": "Ossetian; Ossetic", "osa": "Osage", "osc": "Oscan", "osi": "Osing", "osn": "Old Sundanese", "oso": "Ososo", "osp": "Old Spanish", "ost": "Osatu", "osu": "Southern One", "osx": "Old Saxon", "ota": "Ottoman Turkish (1500-1928)", "otb": "Old Tibetan", "otd": "Ot Danum", "ote": "Mezquital Otomi", "oti": "Oti", "otk": "Old Turkish", "otl": "Tilapa Otomi", "otm": "Eastern Highland Otomi", "otn": "Tenango Otomi", "oto": "Otomian languages", "otq": "Querétaro Otomi", "otr": "Otoro", "ots": "Estado de México Otomi", "ott": "Temoaya Otomi", "otu": "Otuke", "otw": "Ottawa", "otx": "Texcatepec Otomi", "oty": "Old Tamil", "otz": "Ixtenco Otomi", "oua": "Tagargrent", "oub": "Glio-Oubi", "oue": "Oune", "oui": "Old Uighur", "oum": "Ouma", "ovd": "Elfdalian; Övdalian", "owi": "Owiniga", "owl": "Old Welsh", "oyb": "Oy", "oyd": "Oyda", "oym": "Wayampi", "oyy": "Oya'oya", "ozm": "Koonzime", "pa": "Panjabi; Punjabi", "paa": "Papuan languages", "pab": "Parecís", "pac": "Pacoh", "pad": "Paumarí", "pae": "Pagibete", "paf": "Paranawát", "pag": "Pangasinan", "pah": "Tenharim", "pai": "Pe", "pak": "Parakanã", "pal": "Pahlavi", "pam": "Pampanga; Kapampangan", "pao": "Northern Paiute", "pap": "Papiamento", "paq": "Parya", "par": "Panamint; Timbisha", "pas": "Papasena", "pau": "Palauan", "pav": "Pakaásnovos", "paw": "Pawnee", "pax": "Pankararé", "pay": "Pech", "paz": "Pankararú", "pbb": "Páez", "pbc": "Patamona", "pbe": "Mezontla Popoloca", "pbf": "Coyotepec Popoloca", "pbg": "Paraujano", "pbh": "E'ñapa Woromaipu", "pbi": "Parkwa", "pbl": "Mak (Nigeria)", "pbm": "Puebla Mazatec", "pbn": "Kpasam", "pbo": "Papel", "pbp": "Badyara", "pbr": "Pangwa", "pbs": "Central Pame", "pbt": "Southern Pashto", "pbu": "Northern Pashto", "pbv": "Pnar", "pby": "Pyu (Papua New Guinea)", "pca": "Santa Inés Ahuatempan Popoloca", "pcb": "Pear", "pcc": "Bouyei", "pcd": "Picard", "pce": "Ruching Palaung", "pcf": "Paliyan", "pcg": "Paniya", "pch": "Pardhan", "pci": "Duruwa", "pcj": "Parenga", "pck": "Paite Chin", "pcl": "Pardhi", "pcm": "Nigerian Pidgin", "pcn": "Piti", "pcp": "Pacahuara", "pcw": "Pyapun", "pda": "Anam", "pdc": "Pennsylvania German", "pdi": "Pa Di", "pdn": "Podena; Fedan", "pdo": "Padoe", "pdt": "Plautdietsch", "pdu": "Kayan", "pea": "Peranakan Indonesian", "peb": "Eastern Pomo", "ped": "Mala (Papua New Guinea)", "pee": "Taje", "pef": "Northeastern Pomo", "peg": "Pengo", "peh": "Bonan", "pei": "Chichimeca-Jonaz", "pej": "Northern Pomo", "pek": "Penchal", "pel": "Pekal", "pem": "Phende", "peo": "Old Persian (ca. 600-400 B.C.)", "pep": "Kunja", "peq": "Southern Pomo", "pes": "Iranian Persian", "pev": "Pémono", "pex": "Petats", "pey": "Petjo", "pez": "Eastern Penan", "pfa": "Pááfang", "pfe": "Pere", "pfl": "Pfaelzisch", "pga": "Sudanese Creole Arabic", "pgd": "Gāndhārī", "pgg": "Pangwali", "pgi": "Pagi", "pgk": "Rerep", "pgl": "Primitive Irish", "pgn": "Paelignian", "pgs": "Pangseng", "pgu": "Pagu", "pgz": "Papua New Guinean Sign Language", "pha": "Pa-Hng", "phd": "Phudagi", "phg": "Phuong", "phh": "Phukha", "phi": "Philippine languages", "phj": "Pahari", "phk": "Phake", "phl": "Phalura; Palula", "phm": "Phimbi", "phn": "Phoenician", "pho": "Phunoi", "phq": "Phana'", "phr": "Pahari-Potwari", "pht": "Phu Thai", "phu": "Phuan", "phv": "Pahlavani", "phw": "Phangduwali", "pi": "Pali", "pia": "Pima Bajo", "pib": "Yine", "pic": "Pinji", "pid": "Piaroa", "pie": "Piro", "pif": "Pingelapese", "pig": "Pisabo", "pih": "Pitcairn-Norfolk", "pij": "Pijao", "pil": "Yom", "pim": "Powhatan", "pin": "Piame", "pio": "Piapoco", "pip": "Pero", "pir": "Piratapuyo", "pis": "Pijin", "pit": "Pitta Pitta", "piu": "Pintupi-Luritja", "piv": "Pileni; Vaeakau-Taumako", "piw": "Pimbwe", "pix": "Piu", "piy": "Piya-Kwonci", "piz": "Pije", "pjt": "Pitjantjatjara", "pka": "Ardhamāgadhī Prākrit", "pkb": "Pokomo; Kipfokomo", "pkc": "Paekche", "pkg": "Pak-Tong", "pkh": "Pankhu", "pkn": "Pakanha", "pko": "Pökoot", "pkp": "Pukapuka", "pkr": "Attapady Kurumba", "pks": "Pakistan Sign Language", "pkt": "Maleng", "pku": "Paku", "pl": "Polish", "pla": "Miani", "plb": "Polonombauk", "plc": "Central Palawano", "pld": "Polari", "ple": "Palu'e", "plf": "Central Malayo-Polynesian languages", "plg": "Pilagá", "plh": "Paulohi", "plj": "Polci", "plk": "Kohistani Shina", "pll": "Shwe Palaung", "pln": "Palenquero", "plo": "Oluta Popoluca", "plq": "Palaic", "plr": "Palaka Senoufo", "pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca", "plt": "Plateau Malagasy", "plu": "Palikúr", "plv": "Southwest Palawano", "plw": "Brooke's Point Palawano", "ply": "Bolyu", "plz": "Paluan", "pma": "Paama", "pmb": "Pambia", "pmd": "Pallanganmiddang", "pme": "Pwaamei", "pmf": "Pamona", "pmh": "Māhārāṣṭri Prākrit", "pmi": "Northern Pumi", "pmj": "Southern Pumi", "pmk": "Pamlico", "pml": "Lingua Franca", "pmm": "Pomo", "pmn": "Pam", "pmo": "Pom", "pmq": "Northern Pame", "pmr": "Paynamar", "pms": "Piemontese", "pmt": "Tuamotuan", "pmw": "Plains Miwok", "pmx": "Poumei Naga", "pmy": "Papuan Malay", "pmz": "Southern Pame", "pna": "Punan Bah-Biau", "pnb": "Western Panjabi", "pnc": "Pannei", "pnd": "Mpinda", "pne": "Western Penan", "png": "Pangu; Pongu", "pnh": "Penrhyn", "pni": "Aoheng", "pnj": "Pinjarup", "pnk": "Paunaka", "pnl": "Paleni", "pnm": "Punan Batu 1", "pnn": "Pinai-Hagahai", "pno": "Panobo", "pnp": "Pancana", "pnq": "Pana (Burkina Faso)", "pnr": "Panim", "pns": "Ponosakan", "pnt": "Pontic", "pnu": "Jiongnai Bunu", "pnv": "Pinigura", "pnw": "Banyjima; Panytyima", "pnx": "Phong-Kniang", "pny": "Pinyin", "pnz": "Pana (Central African Republic)", "poc": "Poqomam", "poe": "San Juan Atzingo Popoloca", "pof": "Poke", "pog": "Potiguára", "poh": "Poqomchi'", "poi": "Highland Popoluca", "pok": "Pokangá", "pom": "Southeastern Pomo", "pon": "Pohnpeian", "poo": "Central Pomo", "pop": "Pwapwâ", "poq": "Texistepec Popoluca", "pos": "Sayula Popoluca", "pot": "Potawatomi", "pov": "Upper Guinea Crioulo", "pow": "San Felipe Otlaltepec Popoloca", "pox": "Polabian", "poy": "Pogolo", "poz": "Malayo-Polynesian languages", "ppe": "Papi", "ppi": "Paipai", "ppk": "Uma", "ppl": "Pipil; Nicarao", "ppm": "Papuma", "ppn": "Papapana", "ppo": "Folopa", "ppp": "Pelende", "ppq": "Pei", "pps": "San Luís Temalacayuca Popoloca", "ppt": "Pare", "ppu": "Papora", "pqa": "Pa'a", "pqe": "Eastern Malayo-Polynesian languages", "pqm": "Malecite-Passamaquoddy", "pqw": "Western Malayo-Polynesian languages", "pra": "Prakrit languages", "prc": "Parachi", "prd": "Parsi-Dari", "pre": "Principense", "prf": "Paranan", "prg": "Prussian", "prh": "Porohanon", "pri": "Paicî", "prk": "Parauk", "prl": "Peruvian Sign Language", "prm": "Kibiri", "prn": "Prasuni", "pro": "Old Provençal (to 1500); Old Occitan (to 1500)", "prp": "Parsi", "prq": "Ashéninka Perené", "prr": "Puri", "prs": "Dari; Afghan Persian", "prt": "Phai", "pru": "Puragi", "prw": "Parawen", "prx": "Purik", "prz": "Providencia Sign Language", "ps": "Pushto; Pashto", "psa": "Asue Awyu", "psc": "Iranian Sign Language; Persian Sign Language", "psd": "Plains Indian Sign Language", "pse": "Central Malay", "psg": "Penang Sign Language", "psh": "Southwest Pashai; Southwest Pashayi", "psi": "Southeast Pashai; Southeast Pashayi", "psl": "Puerto Rican Sign Language", "psm": "Pauserna", "psn": "Panasuan", "pso": "Polish Sign Language", "psp": "Philippine Sign Language", "psq": "Pasi", "psr": "Portuguese Sign Language", "pss": "Kaulong", "pst": "Central Pashto", "psu": "Sauraseni Prākrit", "psw": "Port Sandwich", "psy": "Piscataway", "pt": "Portuguese", "pta": "Pai Tavytera", "pth": "Pataxó Hã-Ha-Hãe", "pti": "Pindiini; Wangkatha", "ptn": "Patani", "pto": "Zo'é", "ptp": "Patep", "ptq": "Pattapu", "ptr": "Piamatsina", "ptt": "Enrekang", "ptu": "Bambam", "ptv": "Port Vato", "ptw": "Pentlatch", "pty": "Pathiya", "pua": "Western Highland Purepecha", "pub": "Purum", "puc": "Punan Merap", "pud": "Punan Aput", "pue": "Puelche", "puf": "Punan Merah", "pug": "Phuie", "pui": "Puinave", "puj": "Punan Tubu", "pum": "Puma", "puo": "Puoc", "pup": "Pulabu", "puq": "Puquina", "pur": "Puruborá", "put": "Putoh", "puu": "Punu", "puw": "Puluwatese", "pux": "Puare", "puy": "Purisimeño", "pwa": "Pawaia", "pwb": "Panawa", "pwg": "Gapapaiwa", "pwi": "Patwin", "pwm": "Molbog", "pwn": "Paiwan", "pwo": "Pwo Western Karen", "pwr": "Powari", "pww": "Pwo Northern Karen", "pxm": "Quetzaltepec Mixe", "pye": "Pye Krumen", "pym": "Fyam", "pyn": "Poyanáwa", "pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay", "pyu": "Puyuma", "pyx": "Pyu (Myanmar)", "pyy": "Pyen", "pzh": "Pazeh", "pzn": "Jejara Naga; Para Naga", "qu": "Quechua", "qua": "Quapaw", "qub": "Huallaga Huánuco Quechua", "quc": "K'iche'; Quiché", "qud": "Calderón Highland Quichua", "quf": "Lambayeque Quechua", "qug": "Chimborazo Highland Quichua", "quh": "South Bolivian Quechua", "qui": "Quileute", "quk": "Chachapoyas Quechua", "qul": "North Bolivian Quechua", "qum": "Sipacapense", "qun": "Quinault", "qup": "Southern Pastaza Quechua", "quq": "Quinqui", "qur": "Yanahuanca Pasco Quechua", "qus": "Santiago del Estero Quichua", "quv": "Sacapulteco", "quw": "Tena Lowland Quichua", "qux": "Yauyos Quechua", "quy": "Ayacucho Quechua", "quz": "Cusco Quechua", "qva": "Ambo-Pasco Quechua", "qvc": "Cajamarca Quechua", "qve": "Eastern Apurímac Quechua", "qvh": "Huamalíes-Dos de Mayo Huánuco Quechua", "qvi": "Imbabura Highland Quichua", "qvj": "Loja Highland Quichua", "qvl": "Cajatambo North Lima Quechua", "qvm": "Margos-Yarowilca-Lauricocha Quechua", "qvn": "North Junín Quechua", "qvo": "Napo Lowland Quechua", "qvp": "Pacaraos Quechua", "qvs": "San Martín Quechua", "qvw": "Huaylla Wanca Quechua", "qvy": "Queyu", "qvz": "Northern Pastaza Quichua", "qwa": "Corongo Ancash Quechua", "qwc": "Classical Quechua", "qwe": "Quechuan (family)", "qwh": "Huaylas Ancash Quechua", "qwm": "Kuman (Russia)", "qws": "Sihuas Ancash Quechua", "qwt": "Kwalhioqua-Tlatskanai", "qxa": "Chiquián Ancash Quechua", "qxc": "Chincha Quechua", "qxh": "Panao Huánuco Quechua", "qxl": "Salasaca Highland Quichua", "qxn": "Northern Conchucos Ancash Quechua", "qxo": "Southern Conchucos Ancash Quechua", "qxp": "Puno Quechua", "qxq": "Qashqa'i", "qxr": "Cañar Highland Quichua", "qxs": "Southern Qiang", "qxt": "Santa Ana de Tusi Pasco Quechua", "qxu": "Arequipa-La Unión Quechua", "qxw": "Jauja Wanca Quechua", "qya": "Quenya", "qyp": "Quiripi", "raa": "Dungmali", "rab": "Camling", "rac": "Rasawa", "rad": "Rade", "raf": "Western Meohang", "rag": "Logooli; Lulogooli", "rah": "Rabha", "rai": "Ramoaaina", "raj": "Rajasthani", "rak": "Tulu-Bohuai", "ral": "Ralte", "ram": "Canela", "ran": "Riantana", "rao": "Rao", "rap": "Rapanui", "raq": "Saam", "rar": "Rarotongan; Cook Islands Maori", "ras": "Tegali", "rat": "Razajerdi", "rau": "Raute", "rav": "Sampang", "raw": "Rawang", "rax": "Rang", "ray": "Rapa", "raz": "Rahambuu", "rbb": "Rumai Palaung", "rbk": "Northern Bontok", "rbl": "Miraya Bikol", "rbp": "Barababaraba", "rcf": "Réunion Creole French", "rdb": "Rudbari", "rea": "Rerau", "reb": "Rembong", "ree": "Rejang Kayan", "reg": "Kara (Tanzania)", "rei": "Reli", "rej": "Rejang", "rel": "Rendille", "rem": "Remo", "ren": "Rengao", "rer": "Rer Bare", "res": "Reshe", "ret": "Retta", "rey": "Reyesano", "rga": "Roria", "rge": "Romano-Greek", "rgk": "Rangkas", "rgn": "Romagnol", "rgr": "Resígaro", "rgs": "Southern Roglai", "rgu": "Ringgou", "rhg": "Rohingya", "rhp": "Yahang", "ria": "Riang (India)", "rib": "Bribri Sign Language", "rif": "Tarifit", "ril": "Riang Lang; Riang (Myanmar)", "rim": "Nyaturu", "rin": "Nungu", "rir": "Ribun", "rit": "Ritharrngu", "riu": "Riung", "rjg": "Rajong", "rji": "Raji", "rjs": "Rajbanshi", "rka": "Kraol", "rkb": "Rikbaktsa", "rkh": "Rakahanga-Manihiki", "rki": "Rakhine", "rkm": "Marka", "rkt": "Rangpuri; Kamta", "rkw": "Arakwal", "rm": "Romansh", "rma": "Rama", "rmb": "Rembarrnga", "rmc": "Carpathian Romani", "rmd": "Traveller Danish", "rme": "Angloromani", "rmf": "Kalo Finnish Romani", "rmg": "Traveller Norwegian", "rmh": "Murkim", "rmi": "Lomavren", "rmk": "Romkun", "rml": "Baltic Romani", "rmm": "Roma", "rmn": "Balkan Romani", "rmo": "Sinte Romani", "rmp": "Rempi", "rmq": "Caló", "rms": "Romanian Sign Language", "rmt": "Domari", "rmu": "Tavringer Romani", "rmv": "Romanova", "rmw": "Welsh Romani", "rmx": "Romam", "rmy": "Vlax Romani", "rmz": "Marma", "rn": "Rundi", "rnb": "Brunca Sign Language", "rnd": "Ruund", "rng": "Ronga", "rnl": "Ranglong", "rnn": "Roon", "rnp": "Rongpo", "rnr": "Nari Nari", "rnw": "Rungwa", "ro": "Romanian; Moldavian; Moldovan", "roa": "Romance languages", "rob": "Tae'", "roc": "Cacgia Roglai", "rod": "Rogo", "roe": "Ronji", "rof": "Rombo", "rog": "Northern Roglai", "rol": "Romblomanon", "rom": "Romany", "roo": "Rotokas", "rop": "Kriol", "ror": "Rongga", "rou": "Runga", "row": "Dela-Oenale", "rpn": "Repanbitip", "rpt": "Rapting", "rri": "Ririo", "rro": "Waima", "rrt": "Arritinngithigh", "rsb": "Romano-Serbian", "rsk": "Ruthenian; Rusyn", "rsl": "Russian Sign Language", "rsm": "Miriwoong Sign Language", "rsn": "Rwandan Sign Language", "rtc": "Rungtu Chin", "rth": "Ratahan", "rtm": "Rotuman", "rts": "Yurats", "rtw": "Rathawi", "ru": "Russian", "rub": "Gungu", "ruc": "Ruuli", "rue": "Rusyn", "ruf": "Luguru", "rug": "Roviana", "ruh": "Ruga", "rui": "Rufiji", "ruk": "Che", "ruo": "Istro Romanian", "rup": "Macedo-Romanian; Aromanian; Arumanian", "ruq": "Megleno Romanian", "rut": "Rutul", "ruu": "Lanas Lobu", "ruy": "Mala (Nigeria)", "ruz": "Ruma", "rw": "Kinyarwanda", "rwa": "Rawo", "rwk": "Rwa", "rwl": "Ruwila", "rwm": "Amba (Uganda)", "rwo": "Rawa", "rwr": "Marwari (India)", "rxd": "Ngardi", "rxw": "Karuwali; Garuwali", "ryn": "Northern Amami-Oshima", "rys": "Yaeyama", "ryu": "Central Okinawan", "rzh": "Rāziḥī", "sa": "Sanskrit", "saa": "Saba", "sab": "Buglere", "sac": "Meskwaki", "sad": "Sandawe", "sae": "Sabanê", "saf": "Safaliba", "sah": "Yakut", "sai": "South American Indian languages", "saj": "Sahu", "sak": "Sake", "sal": "Salishan languages", "sam": "Samaritan Aramaic", "sao": "Sause", "saq": "Samburu", "sar": "Saraveca", "sas": "Sasak", "sat": "Santali", "sau": "Saleman", "sav": "Saafi-Saafi", "saw": "Sawi", "sax": "Sa", "say": "Saya", "saz": "Saurashtra", "sba": "Ngambay", "sbb": "Simbo", "sbc": "Kele (Papua New Guinea)", "sbd": "Southern Samo", "sbe": "Saliba", "sbf": "Chabu; Shabo", "sbg": "Seget", "sbh": "Sori-Harengan", "sbi": "Seti", "sbj": "Surbakhal", "sbk": "Safwa", "sbl": "Botolan Sambal", "sbm": "Sagala", "sbn": "Sindhi Bhil", "sbo": "Sabüm", "sbp": "Sangu (Tanzania)", "sbq": "Sileibi", "sbr": "Sembakung Murut", "sbs": "Subiya", "sbt": "Kimki", "sbu": "Stod Bhoti", "sbv": "Sabine", "sbw": "Simba", "sbx": "Seberuang", "sby": "Soli", "sbz": "Sara Kaba", "sc": "Sardinian", "scb": "Chut", "sce": "Dongxiang", "scf": "San Miguel Creole French", "scg": "Sanggau", "sch": "Sakachep", "sci": "Sri Lankan Creole Malay", "sck": "Sadri", "scl": "Shina", "scn": "Sicilian", "sco": "Scots", "scp": "Hyolmo; Helambu Sherpa", "scq": "Sa'och", "scs": "North Slavey", "sct": "Southern Katang", "scu": "Shumcho", "scv": "Sheni", "scw": "Sha", "scx": "Sicel", "sd": "Sindhi", "sda": "Toraja-Sa'dan", "sdb": "Shabak", "sdc": "Sassarese Sardinian", "sde": "Surubu", "sdf": "Sarli", "sdg": "Savi", "sdh": "Southern Kurdish", "sdj": "Suundi", "sdk": "Sos Kundi", "sdl": "Saudi Arabian Sign Language", "sdn": "Gallurese Sardinian", "sdo": "Bukar-Sadung Bidayuh", "sdp": "Sherdukpen", "sdq": "Semandang", "sdr": "Oraon Sadri", "sds": "Sened", "sdt": "Shuadit", "sdu": "Sarudu", "sdv": "Eastern Sudanic languages", "sdx": "Sibu Melanau", "sdz": "Sallands", "se": "Northern Sami", "sea": "Semai", "seb": "Shempire Senoufo", "sec": "Sechelt", "sed": "Sedang", "see": "Seneca", "sef": "Cebaara Senoufo", "seg": "Segeju", "seh": "Sena", "sei": "Seri", "sej": "Sene", "sek": "Sekani", "sel": "Selkup", "sem": "Semitic languages", "sen": "Nanerigé Sénoufo", "seo": "Suarmin", "sep": "Sìcìté Sénoufo", "seq": "Senara Sénoufo", "ser": "Serrano", "ses": "Koyraboro Senni Songhai", "set": "Sentani", "seu": "Serui-Laut", "sev": "Nyarafolo Senoufo", "sew": "Sewa Bay", "sey": "Secoya", "sez": "Senthang Chin", "sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language", "sfe": "Eastern Subanen", "sfm": "Small Flowery Miao", "sfs": "South African Sign Language", "sfw": "Sehwi", "sg": "Sango", "sga": "Old Irish (to 900)", "sgb": "Mag-antsi Ayta", "sgc": "Kipsigis", "sgd": "Surigaonon", "sge": "Segai", "sgg": "Swiss-German Sign Language", "sgh": "Shughni", "sgi": "Suga", "sgj": "Surgujia", "sgk": "Sangkong", "sgm": "Singa", "sgn": "Sign languages", "sgp": "Singpho", "sgr": "Sangisari", "sgs": "Samogitian", "sgt": "Brokpake", "sgu": "Salas", "sgw": "Sebat Bet Gurage", "sgx": "Sierra Leone Sign Language", "sgy": "Sanglechi", "sgz": "Sursurunga", "sh": "Serbo-Croatian", "sha": "Shall-Zwall", "shb": "Ninam", "shc": "Sonde", "shd": "Kundal Shahi", "she": "Sheko", "shg": "Shua", "shh": "Shoshoni", "shi": "Tachelhit", "shj": "Shatt", "shk": "Shilluk", "shl": "Shendu", "shm": "Shahrudi", "shn": "Shan", "sho": "Shanga", "shp": "Shipibo-Conibo", "shq": "Sala", "shr": "Shi", "shs": "Shuswap", "sht": "Shasta", "shu": "Chadian Arabic", "shv": "Shehri", "shw": "Shwai", "shx": "She", "shy": "Tachawit", "shz": "Syenara Senoufo", "si": "Sinhala; Sinhalese", "sia": "Akkala Sami", "sib": "Sebop", "sid": "Sidamo", "sie": "Simaa", "sif": "Siamou", "sig": "Paasaal", "sih": "Zire; Sîshëë", "sii": "Shom Peng", "sij": "Numbami", "sik": "Sikiana", "sil": "Tumulung Sisaala", "sim": "Mende (Papua New Guinea)", "sio": "Siouan languages", "sip": "Sikkimese", "siq": "Sonia", "sir": "Siri", "sis": "Siuslaw", "sit": "Sino-Tibetan languages", "siu": "Sinagen", "siv": "Sumariup", "siw": "Siwai", "six": "Sumau", "siy": "Sivandi", "siz": "Siwi", "sja": "Epena", "sjb": "Sajau Basap", "sjd": "Kildin Sami", "sje": "Pite Sami", "sjg": "Assangori", "sjk": "Kemi Sami", "sjl": "Sajalong; Miji", "sjm": "Mapun", "sjn": "Sindarin", "sjo": "Xibe", "sjp": "Surjapuri", "sjr": "Siar-Lak", "sjs": "Senhaja De Srair", "sjt": "Ter Sami", "sju": "Ume Sami", "sjw": "Shawnee", "sk": "Slovak", "ska": "Skagit", "skb": "Saek", "skc": "Ma Manda", "skd": "Southern Sierra Miwok", "ske": "Seke (Vanuatu)", "skf": "Sakirabiá", "skg": "Sakalava Malagasy", "skh": "Sikule", "ski": "Sika", "skj": "Seke (Nepal)", "skm": "Kutong", "skn": "Kolibugan Subanon", "sko": "Seko Tengah", "skp": "Sekapan", "skq": "Sininkere", "skr": "Saraiki; Seraiki", "sks": "Maia", "skt": "Sakata", "sku": "Sakao", "skv": "Skou", "skw": "Skepi Creole Dutch", "skx": "Seko Padang", "sky": "Sikaiana", "skz": "Sekar", "sl": "Slovenian", "sla": "Slavic languages", "slc": "Sáliba", "sld": "Sissala", "sle": "Sholaga", "slf": "Swiss-Italian Sign Language", "slg": "Selungai Murut", "slh": "Southern Puget Sound Salish", "sli": "Lower Silesian", "slj": "Salumá", "sll": "Salt-Yui", "slm": "Pangutaran Sama", "sln": "Salinan", "slp": "Lamaholot", "slq": "Salchuq", "slr": "Salar", "sls": "Singapore Sign Language", "slt": "Sila", "slu": "Selaru", "slw": "Sialum", "slx": "Salampasu", "sly": "Selayar", "slz": "Ma'ya", "sm": "Samoan", "sma": "Southern Sami", "smb": "Simbari", "smc": "Som", "smf": "Auwe", "smg": "Simbali", "smh": "Samei", "smi": "Sami languages", "smj": "Lule Sami", "smk": "Bolinao", "sml": "Central Sama", "smm": "Musasa", "smn": "Inari Sami", "smp": "Samaritan", "smq": "Samo", "smr": "Simeulue", "sms": "Skolt Sami", "smt": "Simte", "smu": "Somray", "smv": "Samvedi", "smw": "Sumbawa", "smx": "Samba", "smy": "Semnani", "smz": "Simeku", "sn": "Shona", "snc": "Sinaugoro", "sne": "Bau Bidayuh", "snf": "Noon", "sng": "Sanga (Democratic Republic of Congo)", "sni": "Sensi", "snj": "Riverain Sango", "snk": "Soninke", "snl": "Sangil", "snm": "Southern Ma'di", "snn": "Siona", "sno": "Snohomish", "snp": "Siane", "snq": "Sangu (Gabon)", "snr": "Sihan", "sns": "South West Bay; Nahavaq", "snu": "Senggi; Viid", "snv": "Sa'ban", "snw": "Selee", "snx": "Sam", "sny": "Saniyo-Hiyewe", "snz": "Kou", "so": "Somali", "soa": "Thai Song", "sob": "Sobei", "soc": "So (Democratic Republic of Congo)", "sod": "Songoora", "soe": "Songomeno", "sog": "Sogdian", "soh": "Aka", "soi": "Sonha", "soj": "Soi", "sok": "Sokoro", "sol": "Solos", "son": "Songhai languages", "soo": "Songo", "sop": "Songe", "soq": "Kanasi", "sor": "Somrai", "sos": "Seeku", "sou": "Southern Thai", "sov": "Sonsorol", "sow": "Sowanda", "sox": "Swo", "soy": "Miyobe", "soz": "Temi", "spb": "Sepa (Indonesia)", "spc": "Sapé", "spd": "Saep", "spe": "Sepa (Papua New Guinea)", "spg": "Sian", "spi": "Saponi", "spk": "Sengo", "spl": "Selepet", "spm": "Akukem", "spn": "Sanapaná", "spo": "Spokane", "spp": "Supyire Senoufo", "spq": "Loreto-Ucayali Spanish", "spr": "Saparua", "sps": "Saposa", "spt": "Spiti Bhoti", "spu": "Sapuan", "spv": "Sambalpuri; Kosli", "spx": "South Picene", "spy": "Sabaot", "sq": "Albanian", "sqa": "Shama-Sambuga", "sqh": "Shau", "sqj": "Albanian languages", "sqk": "Albanian Sign Language", "sqm": "Suma", "sqn": "Susquehannock", "sqo": "Sorkhei", "sqq": "Sou", "sqr": "Siculo Arabic", "sqs": "Sri Lankan Sign Language", "sqt": "Soqotri", "squ": "Squamish", "sqx": "Kufr Qassem Sign Language (KQSL)", "sr": "Serbian", "sra": "Saruga", "srb": "Sora", "src": "Logudorese Sardinian", "sre": "Sara", "srf": "Nafi", "srg": "Sulod", "srh": "Sarikoli", "sri": "Siriano", "srk": "Serudung Murut", "srl": "Isirawa", "srm": "Saramaccan", "srn": "Sranan Tongo", "sro": "Campidanese Sardinian", "srq": "Sirionó", "srr": "Serer", "srs": "Sarsi", "srt": "Sauri", "sru": "Suruí", "srv": "Southern Sorsoganon", "srw": "Serua", "srx": "Sirmauri", "sry": "Sera", "srz": "Shahmirzadi", "ss": "Swati", "ssa": "Nilo-Saharan languages", "ssb": "Southern Sama", "ssc": "Suba-Simbiti", "ssd": "Siroi", "sse": "Balangingi; Bangingih Sama", "ssf": "Thao", "ssg": "Seimat", "ssh": "Shihhi Arabic", "ssi": "Sansi", "ssj": "Sausi", "ssk": "Sunam", "ssl": "Western Sisaala", "ssm": "Semnam", "ssn": "Waata", "sso": "Sissano", "ssp": "Spanish Sign Language", "ssq": "So'a", "ssr": "Swiss-French Sign Language", "sss": "Sô", "sst": "Sinasina", "ssu": "Susuami", "ssv": "Shark Bay", "ssx": "Samberigi", "ssy": "Saho", "ssz": "Sengseng", "st": "Southern Sotho", "sta": "Settla", "stb": "Northern Subanen", "std": "Sentinel", "ste": "Liana-Seti", "stf": "Seta", "stg": "Trieng", "sth": "Shelta", "sti": "Bulo Stieng", "stj": "Matya Samo", "stk": "Arammba", "stl": "Stellingwerfs", "stm": "Setaman", "stn": "Owa", "sto": "Stoney", "stp": "Southeastern Tepehuan", "stq": "Saterfriesisch", "str": "Straits Salish", "sts": "Shumashti", "stt": "Budeh Stieng", "stu": "Samtao", "stv": "Silt'e", "stw": "Satawalese", "sty": "Siberian Tatar", "su": "Sundanese", "sua": "Sulka", "sub": "Suku", "suc": "Western Subanon", "sue": "Suena", "sug": "Suganga", "sui": "Suki", "suj": "Shubi", "suk": "Sukuma", "suo": "Bouni", "suq": "Tirmaga-Chai Suri; Suri", "sur": "Mwaghavul", "sus": "Susu", "sut": "Subtiaba", "suv": "Puroik", "suw": "Sumbwa", "sux": "Sumerian", "suy": "Suyá", "suz": "Sunwar", "sv": "Swedish", "sva": "Svan", "svb": "Ulau-Suain", "svc": "Vincentian Creole English", "sve": "Serili", "svk": "Slovakian Sign Language", "svm": "Slavomolisano", "svs": "Savosavo", "svx": "Skalvian", "sw": "Swahili (macrolanguage)", "swb": "Maore Comorian", "swc": "Congo Swahili", "swf": "Sere", "swg": "Swabian", "swh": "Swahili (individual language); Kiswahili", "swi": "Sui", "swj": "Sira", "swk": "Malawi Sena", "swl": "Swedish Sign Language", "swm": "Samosa", "swn": "Sawknah", "swo": "Shanenawa", "swp": "Suau", "swq": "Sharwa", "swr": "Saweru", "sws": "Seluwasan", "swt": "Sawila", "swu": "Suwawa", "swv": "Shekhawati", "sww": "Sowa", "swx": "Suruahá", "swy": "Sarua", "sxb": "Suba", "sxc": "Sicanian", "sxe": "Sighu", "sxg": "Shuhi; Shixing", "sxk": "Southern Kalapuya", "sxl": "Selian", "sxm": "Samre", "sxn": "Sangir", "sxo": "Sorothaptic", "sxr": "Saaroa", "sxs": "Sasaru", "sxu": "Upper Saxon", "sxw": "Saxwe Gbe", "sya": "Siang", "syb": "Central Subanen", "syc": "Classical Syriac", "syd": "Samoyedic languages", "syi": "Seki", "syk": "Sukur", "syl": "Sylheti", "sym": "Maya Samo", "syn": "Senaya", "syo": "Suoy", "syr": "Syriac", "sys": "Sinyar", "syw": "Kagate", "syx": "Samay", "syy": "Al-Sayyid Bedouin Sign Language", "sza": "Semelai", "szb": "Ngalum", "szc": "Semaq Beri", "szd": "Seru", "sze": "Seze", "szg": "Sengele", "szl": "Silesian", "szn": "Sula", "szp": "Suabo", "szs": "Solomon Islands Sign Language", "szv": "Isu (Fako Division)", "szw": "Sawai", "szy": "Sakizaya", "ta": "Tamil", "taa": "Lower Tanana", "tab": "Tabassaran", "tac": "Lowland Tarahumara", "tad": "Tause", "tae": "Tariana", "taf": "Tapirapé", "tag": "Tagoi", "tai": "Tai languages", "taj": "Eastern Tamang", "tak": "Tala", "tal": "Tal", "tan": "Tangale", "tao": "Yami", "tap": "Taabwa", "taq": "Tamasheq", "tar": "Central Tarahumara", "tas": "Tay Boi", "tau": "Upper Tanana", "tav": "Tatuyo", "taw": "Tai", "tax": "Tamki", "tay": "Atayal", "taz": "Tocho", "tba": "Aikanã", "tbc": "Takia", "tbd": "Kaki Ae", "tbe": "Tanimbili", "tbf": "Mandara", "tbg": "North Tairora", "tbh": "Dharawal; Thurawal", "tbi": "Gaam", "tbj": "Tiang", "tbk": "Calamian Tagbanwa", "tbl": "Tboli", "tbm": "Tagbu", "tbn": "Barro Negro Tunebo", "tbo": "Tawala", "tbp": "Taworta; Diebroud", "tbq": "Tibeto-Burman languages", "tbr": "Tumtum", "tbs": "Tanguat", "tbt": "Tembo (Kitembo)", "tbu": "Tubar", "tbv": "Tobo", "tbw": "Tagbanwa", "tbx": "Kapin", "tby": "Tabaru", "tbz": "Ditammari", "tca": "Ticuna", "tcb": "Tanacross", "tcc": "Datooga", "tcd": "Tafi", "tce": "Southern Tutchone", "tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec", "tcg": "Tamagario", "tch": "Turks And Caicos Creole English", "tci": "Wára", "tck": "Tchitchege", "tcl": "Taman (Myanmar)", "tcm": "Tanahmerah", "tcn": "Tichurong", "tco": "Taungyo", "tcp": "Tawr Chin", "tcq": "Kaiy", "tcs": "Torres Strait Creole; Yumplatok", "tct": "T'en", "tcu": "Southeastern Tarahumara", "tcw": "Tecpatlán Totonac", "tcx": "Toda", "tcy": "Tulu", "tcz": "Thado Chin", "tda": "Tagdal", "tdb": "Panchpargania", "tdc": "Emberá-Tadó", "tdd": "Tai Nüa", "tde": "Tiranige Diga Dogon", "tdf": "Talieng", "tdg": "Western Tamang", "tdh": "Thulung", "tdi": "Tomadino", "tdj": "Tajio", "tdk": "Tambas", "tdl": "Sur", "tdm": "Taruma", "tdn": "Tondano", "tdo": "Teme", "tdq": "Tita", "tdr": "Todrah", "tds": "Doutai", "tdt": "Tetun Dili", "tdv": "Toro", "tdx": "Tandroy-Mahafaly Malagasy", "tdy": "Tadyawan", "te": "Telugu", "tea": "Temiar", "teb": "Tetete", "tec": "Terik", "ted": "Tepo Krumen", "tee": "Huehuetla Tepehua", "tef": "Teressa", "teg": "Teke-Tege", "teh": "Tehuelche", "tei": "Torricelli", "tek": "Ibali Teke", "tem": "Timne", "ten": "Tama (Colombia)", "teo": "Teso", "tep": "Tepecano", "teq": "Temein", "ter": "Tereno", "tes": "Tengger", "tet": "Tetum", "teu": "Soo", "tev": "Teor", "tew": "Tewa (USA)", "tex": "Tennet", "tey": "Tulishi", "tez": "Tetserret", "tfi": "Tofin Gbe", "tfn": "Tanaina", "tfo": "Tefaro", "tfr": "Teribe", "tft": "Ternate", "tg": "Tajik", "tga": "Sagalla", "tgb": "Tobilung", "tgc": "Tigak", "tgd": "Ciwogai", "tge": "Eastern Gorkha Tamang", "tgf": "Chalikha", "tgh": "Tobagonian Creole English", "tgi": "Lawunuia", "tgj": "Tagin", "tgn": "Tandaganon", "tgo": "Sudest", "tgp": "Tangoa", "tgq": "Tring", "tgr": "Tareng", "tgs": "Nume", "tgt": "Central Tagbanwa", "tgu": "Tanggu", "tgv": "Tingui-Boto", "tgw": "Tagwana Senoufo", "tgx": "Tagish", "tgy": "Togoyo", "tgz": "Tagalaka", "th": "Thai", "thd": "Kuuk Thaayorre; Thayore", "the": "Chitwania Tharu", "thf": "Thangmi", "thh": "Northern Tarahumara", "thi": "Tai Long", "thk": "Tharaka; Kitharaka", "thl": "Dangaura Tharu", "thm": "Aheu", "thn": "Thachanadan", "thp": "Thompson", "thq": "Kochila Tharu", "thr": "Rana Tharu", "ths": "Thakali", "tht": "Tahltan", "thu": "Thuri", "thv": "Tahaggart Tamahaq", "thy": "Tha", "thz": "Tayart Tamajeq", "ti": "Tigrinya", "tia": "Tidikelt Tamazight", "tic": "Tira", "tif": "Tifal", "tig": "Tigre", "tih": "Timugon Murut", "tii": "Tiene", "tij": "Tilung", "tik": "Tikar", "til": "Tillamook", "tim": "Timbe", "tin": "Tindi", "tio": "Teop", "tip": "Trimuris", "tiq": "Tiéfo", "tis": "Masadiit Itneg", "tit": "Tinigua", "tiu": "Adasen", "tiv": "Tiv", "tiw": "Tiwi", "tix": "Southern Tiwa", "tiy": "Tiruray", "tiz": "Tai Hongjin", "tja": "Tajuasohn", "tjg": "Tunjung", "tji": "Northern Tujia", "tjj": "Tjungundji", "tjl": "Tai Laing", "tjm": "Timucua", "tjn": "Tonjon", "tjo": "Temacine Tamazight", "tjp": "Tjupany", "tjs": "Southern Tujia", "tju": "Tjurruru", "tjw": "Djabwurrung", "tk": "Turkmen", "tka": "Truká", "tkb": "Buksa", "tkd": "Tukudede", "tke": "Takwane", "tkf": "Tukumanféd", "tkg": "Tesaka Malagasy", "tkl": "Tokelau", "tkm": "Takelma", "tkn": "Toku-No-Shima", "tkp": "Tikopia", "tkq": "Tee", "tkr": "Tsakhur", "tks": "Takestani", "tkt": "Kathoriya Tharu", "tku": "Upper Necaxa Totonac", "tkv": "Mur Pano", "tkw": "Teanu", "tkx": "Tangko", "tkz": "Takua", "tl": "Tagalog", "tla": "Southwestern Tepehuan", "tlb": "Tobelo", "tlc": "Yecuatla Totonac", "tld": "Talaud", "tlf": "Telefol", "tlg": "Tofanma", "tlh": "Klingon; tlhIngan Hol", "tli": "Tlingit", "tlj": "Talinga-Bwisi", "tlk": "Taloki", "tll": "Tetela", "tlm": "Tolomako", "tln": "Talondo'", "tlo": "Talodi", "tlp": "Filomena Mata-Coahuitlán Totonac", "tlq": "Tai Loi", "tlr": "Talise", "tls": "Tambotalo", "tlt": "Sou Nama; Teluti", "tlu": "Tulehu", "tlv": "Taliabu", "tlx": "Khehek", "tly": "Talysh", "tma": "Tama (Chad)", "tmb": "Katbol; Avava", "tmc": "Tumak", "tmd": "Haruai", "tme": "Tremembé", "tmf": "Toba-Maskoy", "tmg": "Ternateño", "tmh": "Tamashek", "tmi": "Tutuba", "tmj": "Samarokena", "tmk": "Northwestern Tamang", "tml": "Tamnim Citak", "tmm": "Tai Thanh", "tmn": "Taman (Indonesia)", "tmo": "Temoq", "tmq": "Tumleo", "tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)", "tms": "Tima", "tmt": "Tasmate", "tmu": "Iau", "tmv": "Tembo (Motembo)", "tmw": "Temuan", "tmy": "Tami", "tmz": "Tamanaku", "tn": "Tswana", "tna": "Tacana", "tnb": "Western Tunebo", "tnc": "Tanimuca-Retuarã", "tnd": "Angosturas Tunebo", "tng": "Tobanga", "tnh": "Maiani", "tni": "Tandia", "tnk": "Kwamera", "tnl": "Lenakel", "tnm": "Tabla", "tnn": "North Tanna", "tno": "Toromono", "tnp": "Whitesands", "tnq": "Taino", "tnr": "Ménik", "tns": "Tenis", "tnt": "Tontemboan", "tnu": "Tay Khang", "tnv": "Tangchangya", "tnw": "Tonsawang", "tnx": "Tanema", "tny": "Tongwe", "tnz": "Ten'edn", "to": "Tonga (Tonga Islands)", "tob": "Toba", "toc": "Coyutla Totonac", "tod": "Toma", "tof": "Gizrra", "tog": "Tonga (Nyasa)", "toh": "Gitonga", "toi": "Tonga (Zambia)", "toj": "Tojolabal", "tok": "Toki Pona", "tol": "Tolowa", "tom": "Tombulu", "too": "Xicotepec De Juárez Totonac", "top": "Papantla Totonac", "toq": "Toposa", "tor": "Togbo-Vara Banda", "tos": "Highland Totonac", "tou": "Tho", "tov": "Upper Taromi", "tow": "Jemez", "tox": "Tobian", "toy": "Topoiyo", "toz": "To", "tpa": "Taupota", "tpc": "Azoyú Me'phaa; Azoyú Tlapanec", "tpe": "Tippera", "tpf": "Tarpia", "tpg": "Kula", "tpi": "Tok Pisin", "tpj": "Tapieté", "tpk": "Tupinikin", "tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec", "tpm": "Tampulma", "tpn": "Tupinambá", "tpo": "Tai Pao", "tpp": "Pisaflores Tepehua", "tpq": "Tukpa", "tpr": "Tuparí", "tpt": "Tlachichilco Tepehua", "tpu": "Tampuan", "tpv": "Tanapag", "tpw": "Tupí", "tpx": "Acatepec Me'phaa; Acatepec Tlapanec", "tpy": "Trumai", "tpz": "Tinputz", "tqb": "Tembé", "tql": "Lehali", "tqm": "Turumsa", "tqn": "Tenino", "tqo": "Toaripi", "tqp": "Tomoip", "tqq": "Tunni", "tqr": "Torona", "tqt": "Western Totonac", "tqu": "Touo", "tqw": "Tonkawa", "tr": "Turkish", "tra": "Tirahi", "trb": "Terebu", "trc": "Copala Triqui", "trd": "Turi", "tre": "East Tarangan", "trf": "Trinidadian Creole English", "trg": "Lishán Didán", "trh": "Turaka", "tri": "Trió", "trj": "Toram", "trk": "Turkic languages", "trl": "Traveller Scottish", "trm": "Tregami", "trn": "Trinitario", "tro": "Tarao Naga", "trp": "Kok Borok", "trq": "San Martín Itunyoso Triqui", "trr": "Taushiro", "trs": "Chicahuaxtla Triqui", "trt": "Tunggare", "tru": "Turoyo; Surayt", "trv": "Sediq; Seediq; Taroko", "trw": "Torwali", "trx": "Tringgus-Sembaan Bidayuh", "try": "Turung", "trz": "Torá", "ts": "Tsonga", "tsa": "Tsaangi", "tsb": "Tsamai", "tsc": "Tswa", "tsd": "Tsakonian", "tse": "Tunisian Sign Language", "tsg": "Tausug", "tsh": "Tsuvan", "tsi": "Tsimshian", "tsj": "Tshangla", "tsk": "Tseku", "tsl": "Ts'ün-Lao", "tsm": "Turkish Sign Language; Türk İşaret Dili", "tsp": "Northern Toussian", "tsq": "Thai Sign Language", "tsr": "Akei", "tss": "Taiwan Sign Language", "tst": "Tondi Songway Kiini", "tsu": "Tsou", "tsv": "Tsogo", "tsw": "Tsishingini", "tsx": "Mubami", "tsy": "Tebul Sign Language", "tsz": "Purepecha", "tt": "Tatar", "tta": "Tutelo", "ttb": "Gaa", "ttc": "Tektiteko", "ttd": "Tauade", "tte": "Bwanabwana", "ttf": "Tuotomb", "ttg": "Tutong", "tth": "Upper Ta'oih", "tti": "Tobati", "ttj": "Tooro", "ttk": "Totoro", "ttl": "Totela", "ttm": "Northern Tutchone", "ttn": "Towei", "tto": "Lower Ta'oih", "ttp": "Tombelala", "ttq": "Tawallammat Tamajaq", "ttr": "Tera", "tts": "Northeastern Thai", "ttt": "Muslim Tat", "ttu": "Torau", "ttv": "Titan", "ttw": "Long Wat", "tty": "Sikaritai", "ttz": "Tsum", "tua": "Wiarumus", "tub": "Tübatulabal", "tuc": "Mutu", "tud": "Tuxá", "tue": "Tuyuca", "tuf": "Central Tunebo", "tug": "Tunia", "tuh": "Taulil", "tui": "Tupuri", "tuj": "Tugutil", "tul": "Tula", "tum": "Tumbuka", "tun": "Tunica", "tuo": "Tucano", "tup": "Tupi languages", "tuq": "Tedaga", "tus": "Tuscarora", "tut": "Altaic languages", "tuu": "Tututni", "tuv": "Turkana", "tuw": "Tungus languages", "tux": "Tuxináwa", "tuy": "Tugen", "tuz": "Turka", "tva": "Vaghua", "tvd": "Tsuvadi", "tve": "Te'un", "tvk": "Southeast Ambrym", "tvl": "Tuvalu", "tvm": "Tela-Masbuar", "tvn": "Tavoyan", "tvo": "Tidore", "tvs": "Taveta", "tvt": "Tutsa Naga", "tvu": "Tunen", "tvw": "Sedoa", "tvx": "Taivoan", "tvy": "Timor Pidgin", "tw": "Twi", "twa": "Twana", "twb": "Western Tawbuid", "twc": "Teshenawa", "twd": "Twents", "twe": "Tewa (Indonesia)", "twf": "Northern Tiwa", "twg": "Tereweng", "twh": "Tai Dón", "twl": "Tawara", "twm": "Tawang Monpa", "twn": "Twendi", "two": "Tswapong", "twp": "Ere", "twq": "Tasawaq", "twr": "Southwestern Tarahumara", "twt": "Turiwára", "twu": "Termanu", "tww": "Tuwari", "twx": "Tewe", "twy": "Tawoyan", "txa": "Tombonuo", "txb": "Tokharian B", "txc": "Tsetsaut", "txe": "Totoli", "txg": "Tangut", "txh": "Thracian", "txi": "Ikpeng", "txj": "Tarjumo", "txm": "Tomini", "txn": "West Tarangan", "txo": "Toto", "txq": "Tii", "txr": "Tartessian", "txs": "Tonsea", "txt": "Citak", "txu": "Kayapó", "txx": "Tatana", "txy": "Tanosy Malagasy", "ty": "Tahitian", "tya": "Tauya", "tye": "Kyanga", "tyh": "O'du", "tyi": "Teke-Tsaayi", "tyj": "Tai Do; Tai Yo", "tyl": "Thu Lao", "tyn": "Kombai", "typ": "Thaypan", "tyr": "Tai Daeng", "tys": "Tày Sa Pa", "tyt": "Tày Tac", "tyu": "Kua", "tyv": "Tuvinian", "tyx": "Teke-Tyee", "tyy": "Tiyaa", "tyz": "Tày", "tza": "Tanzanian Sign Language", "tzh": "Tzeltal", "tzj": "Tz'utujil", "tzl": "Talossan", "tzm": "Central Atlas Tamazight", "tzn": "Tugun", "tzo": "Tzotzil", "tzx": "Tabriak", "uam": "Uamué", "uan": "Kuan", "uar": "Tairuma", "uba": "Ubang", "ubi": "Ubi", "ubl": "Buhi'non Bikol", "ubr": "Ubir", "ubu": "Umbu-Ungu", "uby": "Ubykh", "uda": "Uda", "ude": "Udihe", "udg": "Muduga", "udi": "Udi", "udj": "Ujir", "udl": "Wuzlam", "udm": "Udmurt", "udu": "Uduk", "ues": "Kioko", "ufi": "Ufim", "ug": "Uighur; Uyghur", "uga": "Ugaritic", "ugb": "Kuku-Ugbanh", "uge": "Ughele", "ugh": "Kubachi", "ugn": "Ugandan Sign Language", "ugo": "Ugong", "ugy": "Uruguayan Sign Language", "uha": "Uhami", "uhn": "Damal", "uis": "Uisai", "uiv": "Iyive", "uji": "Tanjijili", "uk": "Ukrainian", "uka": "Kaburi", "ukg": "Ukuriguma", "ukh": "Ukhwejo", "uki": "Kui (India)", "ukk": "Muak Sa-aak", "ukl": "Ukrainian Sign Language", "ukp": "Ukpe-Bayobiri", "ukq": "Ukwa", "uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language", "uku": "Ukue", "ukv": "Kuku", "ukw": "Ukwuani-Aboh-Ndoni", "uky": "Kuuk-Yak", "ula": "Fungwa", "ulb": "Ulukwumi", "ulc": "Ulch", "ule": "Lule", "ulf": "Usku; Afra", "uli": "Ulithian", "ulk": "Meriam Mir", "ull": "Ullatan", "ulm": "Ulumanda'", "uln": "Unserdeutsch", "ulu": "Uma' Lung", "ulw": "Ulwa", "uma": "Umatilla", "umb": "Umbundu", "umc": "Marrucinian", "umd": "Umbindhamu", "umg": "Morrobalama; Umbuygamu", "umi": "Ukit", "umm": "Umon", "umn": "Makyan Naga", "umo": "Umotína", "ump": "Umpila", "umr": "Umbugarla", "ums": "Pendau", "umu": "Munsee", "una": "North Watut", "und": "Undetermined", "une": "Uneme", "ung": "Ngarinyin", "uni": "Uni", "unk": "Enawené-Nawé", "unm": "Unami", "unn": "Kurnai", "unr": "Mundari", "unu": "Unubahe", "unx": "Munda", "unz": "Unde Kaili", "uon": "Kulon", "upi": "Umeda", "upv": "Uripiv-Wala-Rano-Atchin", "ur": "Urdu", "ura": "Urarina", "urb": "Urubú-Kaapor; Kaapor", "urc": "Urningangg", "ure": "Uru", "urf": "Uradhi", "urg": "Urigina", "urh": "Urhobo", "uri": "Urim", "urj": "Uralic languages", "urk": "Urak Lawoi'", "url": "Urali", "urm": "Urapmin", "urn": "Uruangnirin", "uro": "Ura (Papua New Guinea)", "urp": "Uru-Pa-In", "urr": "Lehalurup; Löyöp", "urt": "Urat", "uru": "Urumi", "urv": "Uruava", "urw": "Sop", "urx": "Urimo", "ury": "Orya", "urz": "Uru-Eu-Wau-Wau", "usa": "Usarufa", "ush": "Ushojo", "usi": "Usui", "usk": "Usaghade", "usp": "Uspanteco", "uss": "us-Saare", "usu": "Uya", "uta": "Otank", "ute": "Ute-Southern Paiute", "uth": "ut-Hun", "utp": "Amba (Solomon Islands)", "utr": "Etulo", "utu": "Utu", "uum": "Urum", "uur": "Ura (Vanuatu)", "uuu": "U", "uve": "West Uvean; Fagauvea", "uvh": "Uri", "uvl": "Lote", "uwa": "Kuku-Uwanh", "uya": "Doko-Uyanga", "uz": "Uzbek", "uzn": "Northern Uzbek", "uzs": "Southern Uzbek", "vaa": "Vaagri Booli", "vae": "Vale", "vaf": "Vafsi", "vag": "Vagla", "vah": "Varhadi-Nagpuri", "vai": "Vai", "vaj": "Sekele; Northwestern ǃKung; Vasekele", "val": "Vehes", "vam": "Vanimo", "van": "Valman", "vao": "Vao", "vap": "Vaiphei", "var": "Huarijio", "vas": "Vasavi", "vau": "Vanuma", "vav": "Varli", "vay": "Wayu", "vbb": "Southeast Babar", "vbk": "Southwestern Bontok", "ve": "Venda", "vec": "Venetian", "ved": "Veddah", "vel": "Veluws", "vem": "Vemgo-Mabas", "veo": "Ventureño", "vep": "Veps", "ver": "Mom Jango", "vgr": "Vaghri", "vgt": "Vlaamse Gebarentaal; Flemish Sign Language", "vi": "Vietnamese", "vic": "Virgin Islands Creole English", "vid": "Vidunda", "vif": "Vili", "vig": "Viemo", "vil": "Vilela", "vin": "Vinza", "vis": "Vishavan", "vit": "Viti", "viv": "Iduna", "vka": "Kariyarra", "vkj": "Kujarge", "vkk": "Kaur", "vkl": "Kulisusu", "vkm": "Kamakan", "vkn": "Koro Nulu", "vko": "Kodeoha", "vkp": "Korlai Creole Portuguese", "vkt": "Tenggarong Kutai Malay", "vku": "Kurrama", "vkz": "Koro Zuba", "vlp": "Valpei", "vls": "Vlaams", "vma": "Martuyhunira", "vmb": "Barbaram", "vmc": "Juxtlahuaca Mixtec", "vmd": "Mudu Koraga", "vme": "East Masela", "vmf": "Mainfränkisch", "vmg": "Lungalunga", "vmh": "Maraghei", "vmi": "Miwa", "vmj": "Ixtayutla Mixtec", "vmk": "Makhuwa-Shirima", "vml": "Malgana", "vmm": "Mitlatongo Mixtec", "vmp": "Soyaltepec Mazatec", "vmq": "Soyaltepec Mixtec", "vmr": "Marenje", "vms": "Moksela", "vmu": "Muluridyi", "vmv": "Valley Maidu", "vmw": "Makhuwa", "vmx": "Tamazola Mixtec", "vmy": "Ayautla Mazatec", "vmz": "Mazatlán Mazatec", "vnk": "Vano; Lovono", "vnm": "Vinmavis; Neve'ei", "vnp": "Vunapu", "vo": "Volapük", "vor": "Voro", "vot": "Votic", "vra": "Vera'a", "vro": "Võro", "vrs": "Varisi", "vrt": "Burmbar; Banam Bay", "vsi": "Moldova Sign Language", "vsl": "Venezuelan Sign Language", "vsv": "Valencian Sign Language; Llengua de signes valenciana", "vto": "Vitou", "vum": "Vumbu", "vun": "Vunjo", "vut": "Vute", "vwa": "Awa (China)", "wa": "Walloon", "waa": "Walla Walla", "wab": "Wab", "wac": "Wasco-Wishram", "wad": "Wamesa; Wondama", "wae": "Walser", "waf": "Wakoná", "wag": "Wa'ema", "wah": "Watubela", "wai": "Wares", "waj": "Waffa", "wak": "Wakashan languages", "wal": "Wolaytta; Wolaitta", "wam": "Wampanoag", "wan": "Wan", "wao": "Wappo", "wap": "Wapishana", "waq": "Wagiman", "war": "Waray (Philippines)", "was": "Washo", "wat": "Kaninuwa", "wau": "Waurá", "wav": "Waka", "waw": "Waiwai", "wax": "Watam; Marangis", "way": "Wayana", "waz": "Wampur", "wba": "Warao", "wbb": "Wabo", "wbe": "Waritai", "wbf": "Wara", "wbh": "Wanda", "wbi": "Vwanji", "wbj": "Alagwa", "wbk": "Waigali", "wbl": "Wakhi", "wbm": "Wa", "wbp": "Warlpiri", "wbq": "Waddar", "wbr": "Wagdi", "wbs": "West Bengal Sign Language", "wbt": "Warnman", "wbv": "Wajarri", "wbw": "Woi", "wca": "Yanomámi", "wci": "Waci Gbe", "wdd": "Wandji", "wdg": "Wadaginam", "wdj": "Wadjiginy", "wdk": "Wadikali", "wdt": "Wendat", "wdu": "Wadjigu", "wdy": "Wadjabangayi", "wea": "Wewaw", "wec": "Wè Western", "wed": "Wedau", "weg": "Wergaia", "weh": "Weh", "wei": "Kiunum", "wem": "Weme Gbe", "wen": "Sorbian languages", "weo": "Wemale", "wep": "Westphalien", "wer": "Weri", "wes": "Cameroon Pidgin", "wet": "Perai", "weu": "Rawngtu Chin", "wew": "Wejewa", "wfg": "Yafi; Zorop", "wga": "Wagaya", "wgb": "Wagawaga", "wgg": "Wangkangurru; Wangganguru", "wgi": "Wahgi", "wgo": "Waigeo", "wgu": "Wirangu", "wgy": "Warrgamay", "wha": "Sou Upaa; Manusela", "whg": "North Wahgi", "whk": "Wahau Kenyah", "whu": "Wahau Kayan", "wib": "Southern Toussian", "wic": "Wichita", "wie": "Wik-Epa", "wif": "Wik-Keyangan", "wig": "Wik Ngathan", "wih": "Wik-Me'anha", "wii": "Minidien", "wij": "Wik-Iiyanh", "wik": "Wikalkan", "wil": "Wilawila", "wim": "Wik-Mungkan", "win": "Ho-Chunk", "wir": "Wiraféd", "wiu": "Wiru", "wiv": "Vitu", "wiy": "Wiyot", "wja": "Waja", "wji": "Warji", "wka": "Kw'adza", "wkb": "Kumbaran", "wkd": "Wakde; Mo", "wkl": "Kalanadi", "wkr": "Keerray-Woorroong", "wku": "Kunduvadi", "wkw": "Wakawaka", "wky": "Wangkayutyuru", "wla": "Walio", "wlc": "Mwali Comorian", "wle": "Wolane", "wlg": "Kunbarlang", "wlh": "Welaun", "wli": "Waioli", "wlk": "Wailaki", "wll": "Wali (Sudan)", "wlm": "Middle Welsh", "wlo": "Wolio", "wlr": "Wailapa", "wls": "Wallisian", "wlu": "Wuliwuli", "wlv": "Wichí Lhamtés Vejoz", "wlw": "Walak", "wlx": "Wali (Ghana)", "wly": "Waling", "wma": "Mawa (Nigeria)", "wmb": "Wambaya", "wmc": "Wamas", "wmd": "Mamaindé", "wme": "Wambule", "wmg": "Western Minyag", "wmh": "Waima'a", "wmi": "Wamin", "wmm": "Maiwa (Indonesia)", "wmn": "Waamwang", "wmo": "Wom (Papua New Guinea)", "wms": "Wambon", "wmt": "Walmajarri", "wmw": "Mwani", "wmx": "Womo", "wnb": "Wanambre", "wnc": "Wantoat", "wnd": "Wandarang", "wne": "Waneci", "wng": "Wanggom", "wni": "Ndzwani Comorian", "wnk": "Wanukaka", "wnm": "Wanggamala", "wnn": "Wunumara", "wno": "Wano", "wnp": "Wanap", "wnu": "Usan", "wnw": "Wintu", "wny": "Wanyi; Waanyi", "wo": "Wolof", "woa": "Kuwema; Tyaraity", "wob": "Wè Northern", "woc": "Wogeo", "wod": "Wolani", "woe": "Woleaian", "wof": "Gambian Wolof", "wog": "Wogamusin", "woi": "Kamang", "wok": "Longto", "wom": "Wom (Nigeria)", "won": "Wongo", "woo": "Manombai", "wor": "Woria", "wos": "Hanga Hundi", "wow": "Wawonii", "woy": "Weyto", "wpc": "Maco", "wrb": "Waluwarra; Warluwara", "wrg": "Warungu; Gudjal", "wrh": "Wiradjuri", "wri": "Wariyangga", "wrk": "Garrwa", "wrl": "Warlmanpa", "wrm": "Warumungu", "wrn": "Warnang", "wro": "Worrorra", "wrp": "Waropen", "wrr": "Wardaman", "wrs": "Waris", "wru": "Waru", "wrv": "Waruna", "wrw": "Gugu Warra", "wrx": "Wae Rana", "wry": "Merwari", "wrz": "Waray (Australia)", "wsa": "Warembori", "wsg": "Adilabad Gondi", "wsi": "Wusi", "wsk": "Waskia", "wsr": "Owenia", "wss": "Wasa", "wsu": "Wasu", "wsv": "Wotapuri-Katarqalai", "wtf": "Watiwa", "wth": "Wathawurrung", "wti": "Berta", "wtk": "Watakataui", "wtm": "Mewati", "wtw": "Wotu", "wua": "Wikngenchera", "wub": "Wunambal", "wud": "Wudu", "wuh": "Wutunhua", "wul": "Silimo", "wum": "Wumbvu", "wun": "Bungu", "wur": "Wurrugu", "wut": "Wutung", "wuu": "Wu Chinese", "wuv": "Wuvulu-Aua", "wux": "Wulna", "wuy": "Wauyai", "wwa": "Waama", "wwb": "Wakabunga", "wwo": "Wetamut; Dorig", "wwr": "Warrwa", "www": "Wawa", "wxa": "Waxianghua", "wxw": "Wardandi", "wyb": "Wangaaybuwan-Ngiyambaa", "wyi": "Woiwurrung", "wym": "Wymysorys", "wyn": "Wyandot", "wyr": "Wayoró", "wyy": "Western Fijian", "xaa": "Andalusian Arabic", "xab": "Sambe", "xac": "Kachari", "xad": "Adai", "xae": "Aequian", "xag": "Aghwan", "xai": "Kaimbé", "xaj": "Ararandewára", "xak": "Máku", "xal": "Kalmyk; Oirat", "xam": "ǀXam", "xan": "Xamtanga", "xao": "Khao", "xap": "Apalachee", "xaq": "Aquitanian", "xar": "Karami", "xas": "Kamas", "xat": "Katawixi", "xau": "Kauwera", "xav": "Xavánte", "xaw": "Kawaiisu", "xay": "Kayan Mahakam", "xbb": "Lower Burdekin", "xbc": "Bactrian", "xbd": "Bindal", "xbe": "Bigambal", "xbg": "Bunganditj", "xbi": "Kombio", "xbj": "Birrpayi", "xbm": "Middle Breton", "xbn": "Kenaboi", "xbo": "Bolgarian", "xbp": "Bibbulman", "xbr": "Kambera", "xbw": "Kambiwá", "xby": "Batjala; Batyala", "xcb": "Cumbric", "xcc": "Camunic", "xce": "Celtiberian", "xcg": "Cisalpine Gaulish", "xch": "Chemakum; Chimakum", "xcl": "Classical Armenian", "xcm": "Comecrudo", "xcn": "Cotoname", "xco": "Chorasmian", "xcr": "Carian", "xct": "Classical Tibetan", "xcu": "Curonian", "xcv": "Chuvantsy", "xcw": "Coahuilteco", "xcy": "Cayuse", "xda": "Darkinyung", "xdc": "Dacian", "xdk": "Dharuk", "xdm": "Edomite", "xdo": "Kwandu", "xdq": "Kaitag", "xdy": "Malayic Dayak", "xeb": "Eblan", "xed": "Hdi", "xeg": "ǁXegwi", "xel": "Kelo", "xem": "Kembayan", "xep": "Epi-Olmec", "xer": "Xerénte", "xes": "Kesawai", "xet": "Xetá", "xeu": "Keoru-Ahia", "xfa": "Faliscan", "xga": "Galatian", "xgb": "Gbin", "xgd": "Gudang", "xgf": "Gabrielino-Fernandeño", "xgg": "Goreng", "xgi": "Garingbal", "xgl": "Galindan", "xgm": "Dharumbal; Guwinmal", "xgn": "Mongolian languages", "xgr": "Garza", "xgu": "Unggumi", "xgw": "Guwa", "xh": "Xhosa", "xha": "Harami", "xhc": "Hunnic", "xhd": "Hadrami", "xhe": "Khetrani", "xhm": "Middle Khmer (1400 to 1850 CE)", "xhr": "Hernican", "xht": "Hattic", "xhu": "Hurrian", "xhv": "Khua", "xib": "Iberian", "xii": "Xiri", "xil": "Illyrian", "xin": "Xinca", "xir": "Xiriâna", "xis": "Kisan", "xiv": "Indus Valley Language", "xiy": "Xipaya", "xjb": "Minjungbal", "xjt": "Jaitmatang", "xka": "Kalkoti", "xkb": "Northern Nago", "xkc": "Kho'ini", "xkd": "Mendalam Kayan", "xke": "Kereho", "xkf": "Khengkha", "xkg": "Kagoro", "xki": "Kenyan Sign Language", "xkj": "Kajali", "xkk": "Kachok; Kaco'", "xkl": "Mainstream Kenyah", "xkn": "Kayan River Kayan", "xko": "Kiorr", "xkp": "Kabatei", "xkq": "Koroni", "xkr": "Xakriabá", "xks": "Kumbewaha", "xkt": "Kantosi", "xku": "Kaamba", "xkv": "Kgalagadi", "xkw": "Kembra", "xkx": "Karore", "xky": "Uma' Lasan", "xkz": "Kurtokha", "xla": "Kamula", "xlb": "Loup B", "xlc": "Lycian", "xld": "Lydian", "xle": "Lemnian", "xlg": "Ligurian (Ancient)", "xli": "Liburnian", "xln": "Alanic", "xlo": "Loup A", "xlp": "Lepontic", "xls": "Lusitanian", "xlu": "Cuneiform Luwian", "xly": "Elymian", "xma": "Mushungulu", "xmb": "Mbonga", "xmc": "Makhuwa-Marrevone", "xmd": "Mbudum", "xme": "Median", "xmf": "Mingrelian", "xmg": "Mengaka", "xmh": "Kugu-Muminh", "xmj": "Majera", "xmk": "Ancient Macedonian", "xml": "Malaysian Sign Language", "xmm": "Manado Malay", "xmn": "Manichaean Middle Persian", "xmo": "Morerebi", "xmp": "Kuku-Mu'inh", "xmq": "Kuku-Mangk", "xmr": "Meroitic", "xms": "Moroccan Sign Language", "xmt": "Matbat", "xmu": "Kamu", "xmv": "Antankarana Malagasy; Tankarana Malagasy", "xmw": "Tsimihety Malagasy", "xmx": "Salawati; Maden", "xmy": "Mayaguduna", "xmz": "Mori Bawah", "xna": "Ancient North Arabian", "xnb": "Kanakanabu", "xnd": "Na-Dene languages", "xng": "Middle Mongolian", "xnh": "Kuanhua", "xni": "Ngarigu", "xnj": "Ngoni (Tanzania)", "xnk": "Nganakarti", "xnm": "Ngumbarl", "xnn": "Northern Kankanay", "xno": "Anglo-Norman", "xnq": "Ngoni (Mozambique)", "xnr": "Kangri", "xns": "Kanashi", "xnt": "Narragansett", "xnu": "Nukunul", "xny": "Nyiyaparli", "xnz": "Kenzi; Mattoki", "xoc": "O'chi'chi'", "xod": "Kokoda", "xog": "Soga", "xoi": "Kominimung", "xok": "Xokleng", "xom": "Komo (Sudan)", "xon": "Konkomba", "xoo": "Xukurú", "xop": "Kopar", "xor": "Korubo", "xow": "Kowaki", "xpa": "Pirriya", "xpb": "Northeastern Tasmanian; Pyemmairrener", "xpc": "Pecheneg", "xpd": "Oyster Bay Tasmanian", "xpe": "Liberia Kpelle", "xpf": "Southeast Tasmanian; Nuenonne", "xpg": "Phrygian", "xph": "North Midlands Tasmanian; Tyerrenoterpanner", "xpi": "Pictish", "xpj": "Mpalitjanh", "xpk": "Kulina Pano", "xpl": "Port Sorell Tasmanian", "xpm": "Pumpokol", "xpn": "Kapinawá", "xpo": "Pochutec", "xpp": "Puyo-Paekche", "xpq": "Mohegan-Pequot", "xpr": "Parthian", "xps": "Pisidian", "xpt": "Punthamara", "xpu": "Punic", "xpv": "Northern Tasmanian; Tommeginne", "xpw": "Northwestern Tasmanian; Peerapper", "xpx": "Southwestern Tasmanian; Toogee", "xpy": "Puyo", "xpz": "Bruny Island Tasmanian", "xqa": "Karakhanid", "xqt": "Qatabanian", "xra": "Krahô", "xrb": "Eastern Karaboro", "xrd": "Gundungurra", "xre": "Kreye", "xrg": "Minang", "xri": "Krikati-Timbira", "xrm": "Armazic", "xrn": "Arin", "xrr": "Raetic", "xrt": "Aranama-Tamique", "xru": "Marriammu", "xrw": "Karawa", "xsa": "Sabaean", "xsb": "Sambal", "xsc": "Scythian", "xsd": "Sidetic", "xse": "Sempan", "xsh": "Shamang", "xsi": "Sio", "xsj": "Subi", "xsl": "South Slavey", "xsm": "Kasem", "xsn": "Sanga (Nigeria)", "xso": "Solano", "xsp": "Silopi", "xsq": "Makhuwa-Saka", "xsr": "Sherpa", "xss": "Assan", "xsu": "Sanumá", "xsv": "Sudovian", "xsy": "Saisiyat", "xta": "Alcozauca Mixtec", "xtb": "Chazumba Mixtec", "xtc": "Katcha-Kadugli-Miri", "xtd": "Diuxi-Tilantongo Mixtec", "xte": "Ketengban", "xtg": "Transalpine Gaulish", "xth": "Yitha Yitha", "xti": "Sinicahua Mixtec", "xtj": "San Juan Teita Mixtec", "xtl": "Tijaltepec Mixtec", "xtm": "Magdalena Peñasco Mixtec", "xtn": "Northern Tlaxiaco Mixtec", "xto": "Tokharian A", "xtp": "San Miguel Piedras Mixtec", "xtq": "Tumshuqese", "xtr": "Early Tripuri", "xts": "Sindihui Mixtec", "xtt": "Tacahua Mixtec", "xtu": "Cuyamecalco Mixtec", "xtv": "Thawa", "xtw": "Tawandê", "xty": "Yoloxochitl Mixtec", "xua": "Alu Kurumba", "xub": "Betta Kurumba", "xud": "Umiida", "xug": "Kunigami", "xuj": "Jennu Kurumba", "xul": "Ngunawal; Nunukul", "xum": "Umbrian", "xun": "Unggaranggu", "xuo": "Kuo", "xup": "Upper Umpqua", "xur": "Urartian", "xut": "Kuthant", "xuu": "Kxoe; Khwedam", "xve": "Venetic", "xvi": "Kamviri", "xvn": "Vandalic", "xvo": "Volscian", "xvs": "Vestinian", "xwa": "Kwaza", "xwc": "Woccon", "xwd": "Wadi Wadi", "xwe": "Xwela Gbe", "xwg": "Kwegu", "xwj": "Wajuk", "xwk": "Wangkumara", "xwl": "Western Xwla Gbe", "xwo": "Written Oirat", "xwr": "Kwerba Mamberamo", "xwt": "Wotjobaluk", "xww": "Wemba Wemba", "xxb": "Boro (Ghana)", "xxk": "Ke'o", "xxm": "Minkin", "xxr": "Koropó", "xxt": "Tambora", "xya": "Yaygir", "xyb": "Yandjibara", "xyj": "Mayi-Yapi", "xyk": "Mayi-Kulan", "xyl": "Yalakalore", "xyt": "Mayi-Thakurti", "xyy": "Yorta Yorta", "xzh": "Zhang-Zhung", "xzm": "Zemgalian", "xzp": "Ancient Zapotec", "yaa": "Yaminahua", "yab": "Yuhup", "yac": "Pass Valley Yali", "yad": "Yagua", "yae": "Pumé", "yaf": "Yaka (Democratic Republic of Congo)", "yag": "Yámana", "yah": "Yazgulyam", "yai": "Yagnobi", "yaj": "Banda-Yangere", "yak": "Yakama", "yal": "Yalunka", "yam": "Yamba", "yan": "Mayangna", "yao": "Yao", "yap": "Yapese", "yaq": "Yaqui", "yar": "Yabarana", "yas": "Nugunu (Cameroon)", "yat": "Yambeta", "yau": "Yuwana", "yav": "Yangben", "yaw": "Yawalapití", "yax": "Yauma", "yay": "Agwagwune", "yaz": "Lokaa", "yba": "Yala", "ybb": "Yemba", "ybe": "West Yugur", "ybh": "Yakha", "ybi": "Yamphu", "ybj": "Hasha", "ybk": "Bokha", "ybl": "Yukuben", "ybm": "Yaben", "ybn": "Yabaâna", "ybo": "Yabong", "ybx": "Yawiyo", "yby": "Yaweyuha", "ych": "Chesu", "ycl": "Lolopo", "ycn": "Yucuna", "ycp": "Chepya", "yda": "Yanda", "ydd": "Eastern Yiddish", "yde": "Yangum Dey", "ydg": "Yidgha", "ydk": "Yoidik", "yea": "Ravula", "yec": "Yeniche", "yee": "Yimas", "yei": "Yeni", "yej": "Yevanic", "yel": "Yela", "yer": "Tarok", "yes": "Nyankpa", "yet": "Yetfa", "yeu": "Yerukula", "yev": "Yapunda", "yey": "Yeyi", "yga": "Malyangapa", "ygi": "Yiningayi", "ygl": "Yangum Gel", "ygm": "Yagomi", "ygp": "Gepo", "ygr": "Yagaria", "ygs": "Yolŋu Sign Language", "ygu": "Yugul", "ygw": "Yagwoia", "yha": "Baha Buyang", "yhd": "Judeo-Iraqi Arabic", "yhl": "Hlepho Phowa", "yhs": "Yan-nhaŋu Sign Language", "yi": "Yiddish", "yia": "Yinggarda", "yif": "Ache", "yig": "Wusa Nasu", "yih": "Western Yiddish", "yii": "Yidiny", "yij": "Yindjibarndi", "yik": "Dongshanba Lalo", "yil": "Yindjilandji", "yim": "Yimchungru Naga", "yin": "Riang Lai; Yinchia", "yip": "Pholo", "yiq": "Miqie", "yir": "North Awyu", "yis": "Yis", "yit": "Eastern Lalu", "yiu": "Awu", "yiv": "Northern Nisu", "yix": "Axi Yi", "yiz": "Azhe", "yka": "Yakan", "ykg": "Northern Yukaghir", "yki": "Yoke", "ykk": "Yakaikeke", "ykl": "Khlula", "ykm": "Kap", "ykn": "Kua-nsi", "yko": "Yasa", "ykr": "Yekora", "ykt": "Kathu", "yku": "Kuamasi", "yky": "Yakoma", "yla": "Yaul", "ylb": "Yaleba", "yle": "Yele", "ylg": "Yelogu", "yli": "Angguruk Yali", "yll": "Yil", "ylm": "Limi", "yln": "Langnian Buyang", "ylo": "Naluo Yi", "ylr": "Yalarnnga", "ylu": "Aribwaung", "yly": "Nyâlayu; Nyelâyu", "ymb": "Yambes", "ymc": "Southern Muji", "ymd": "Muda", "yme": "Yameo", "ymg": "Yamongeri", "ymh": "Mili", "ymi": "Moji", "ymk": "Makwe", "yml": "Iamalele", "ymm": "Maay", "ymn": "Yamna; Sunum", "ymo": "Yangum Mon", "ymp": "Yamap", "ymq": "Qila Muji", "ymr": "Malasar", "yms": "Mysian", "ymx": "Northern Muji", "ymz": "Muzi", "yna": "Aluo", "ynd": "Yandruwandha", "yne": "Lang'e", "yng": "Yango", "ynk": "Naukan Yupik", "ynl": "Yangulam", "ynn": "Yana", "yno": "Yong", "ynq": "Yendang", "yns": "Yansi", "ynu": "Yahuna", "yo": "Yoruba", "yob": "Yoba", "yog": "Yogad", "yoi": "Yonaguni", "yok": "Yokuts", "yol": "Yola", "yom": "Yombe", "yon": "Yongkom", "yot": "Yotti", "yox": "Yoron", "yoy": "Yoy", "ypa": "Phala", "ypb": "Labo Phowa", "ypg": "Phola", "yph": "Phupha", "ypk": "Yupik languages", "ypm": "Phuma", "ypn": "Ani Phowa", "ypo": "Alo Phola", "ypp": "Phupa", "ypz": "Phuza", "yra": "Yerakai", "yrb": "Yareba", "yre": "Yaouré", "yrk": "Nenets", "yrl": "Nhengatu", "yrm": "Yirrk-Mel", "yrn": "Yerong", "yro": "Yaroamë", "yrs": "Yarsun", "yrw": "Yarawata", "yry": "Yarluyandi", "ysc": "Yassic", "ysd": "Samatao", "ysg": "Sonaga", "ysl": "Yugoslavian Sign Language", "ysm": "Myanmar Sign Language", "ysn": "Sani", "yso": "Nisi (China)", "ysp": "Southern Lolopo", "ysr": "Sirenik Yupik", "yss": "Yessan-Mayo", "ysy": "Sanie", "yta": "Talu", "ytl": "Tanglang", "ytp": "Thopho", "ytw": "Yout Wam", "yty": "Yatay", "yua": "Yucateco; Yucatec Maya", "yub": "Yugambal", "yuc": "Yuchi", "yud": "Judeo-Tripolitanian Arabic", "yue": "Yue Chinese; Cantonese", "yuf": "Havasupai-Walapai-Yavapai", "yug": "Yug", "yui": "Yurutí", "yuj": "Karkar-Yuri", "yuk": "Yuki", "yul": "Yulu", "yum": "Quechan", "yun": "Bena (Nigeria)", "yup": "Yukpa", "yuq": "Yuqui", "yur": "Yurok", "yut": "Yopno", "yuw": "Yau (Morobe Province)", "yux": "Southern Yukaghir", "yuy": "East Yugur", "yuz": "Yuracare", "yva": "Yawa", "yvt": "Yavitero", "ywa": "Kalou", "ywg": "Yinhawangka", "ywl": "Western Lalu", "ywn": "Yawanawa", "ywq": "Wuding-Luquan Yi", "ywr": "Yawuru", "ywt": "Xishanba Lalo; Central Lalo", "ywu": "Wumeng Nasu", "yww": "Yawarawarga", "yxa": "Mayawali", "yxg": "Yagara", "yxl": "Yardliyawarra", "yxm": "Yinwum", "yxu": "Yuyu", "yxy": "Yabula Yabula", "yyr": "Yir Yoront", "yyu": "Yau (Sandaun Province)", "yyz": "Ayizi", "yzg": "E'ma Buyang", "yzk": "Zokhuo", "za": "Zhuang; Chuang", "zaa": "Sierra de Juárez Zapotec", "zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec", "zac": "Ocotlán Zapotec", "zad": "Cajonos Zapotec", "zae": "Yareni Zapotec", "zaf": "Ayoquesco Zapotec", "zag": "Zaghawa", "zah": "Zangwal", "zai": "Isthmus Zapotec", "zaj": "Zaramo", "zak": "Zanaki", "zal": "Zauzou", "zam": "Miahuatlán Zapotec", "zao": "Ozolotepec Zapotec", "zap": "Zapotec", "zaq": "Aloápam Zapotec", "zar": "Rincón Zapotec", "zas": "Santo Domingo Albarradas Zapotec", "zat": "Tabaa Zapotec", "zau": "Zangskari", "zav": "Yatzachi Zapotec", "zaw": "Mitla Zapotec", "zax": "Xadani Zapotec", "zay": "Zayse-Zergulla; Zaysete", "zaz": "Zari", "zba": "Balaibalan", "zbc": "Central Berawan", "zbe": "East Berawan", "zbl": "Blissymbols; Bliss; Blissymbolics", "zbt": "Batui", "zbu": "Bu (Bauchi State)", "zbw": "West Berawan", "zca": "Coatecas Altas Zapotec", "zcd": "Las Delicias Zapotec", "zch": "Central Hongshuihe Zhuang", "zdj": "Ngazidja Comorian", "zea": "Zeeuws", "zeg": "Zenag", "zeh": "Eastern Hongshuihe Zhuang", "zen": "Zenaga", "zga": "Kinga", "zgb": "Guibei Zhuang", "zgh": "Standard Moroccan Tamazight", "zgm": "Minz Zhuang", "zgn": "Guibian Zhuang", "zgr": "Magori", "zh": "Chinese", "zhb": "Zhaba", "zhd": "Dai Zhuang", "zhi": "Zhire", "zhn": "Nong Zhuang", "zhw": "Zhoa", "zhx": "Chinese (family)", "zia": "Zia", "zib": "Zimbabwe Sign Language", "zik": "Zimakani", "zil": "Zialo", "zim": "Mesme", "zin": "Zinza", "ziw": "Zigula", "ziz": "Zizilivakan", "zka": "Kaimbulawa", "zkb": "Koibal", "zkd": "Kadu", "zkg": "Koguryo", "zkh": "Khorezmian", "zkk": "Karankawa", "zkn": "Kanan", "zko": "Kott", "zkp": "São Paulo Kaingáng", "zkr": "Zakhring", "zkt": "Kitan", "zku": "Kaurna", "zkv": "Krevinian", "zkz": "Khazar", "zla": "Zula", "zle": "East Slavic languages", "zlj": "Liujiang Zhuang", "zlm": "Malay (individual language)", "zln": "Lianshan Zhuang", "zlq": "Liuqian Zhuang", "zls": "South Slavic languages", "zlw": "West Slavic languages", "zma": "Manda (Australia)", "zmb": "Zimba", "zmc": "Margany", "zmd": "Maridan", "zme": "Mangerr", "zmf": "Mfinu", "zmg": "Marti Ke", "zmh": "Makolkol", "zmi": "Negeri Sembilan Malay", "zmj": "Maridjabin", "zmk": "Mandandanyi", "zml": "Matngala", "zmm": "Marimanindji; Marramaninyshi", "zmn": "Mbangwe", "zmo": "Molo", "zmp": "Mpuono", "zmq": "Mituku", "zmr": "Maranunggu", "zms": "Mbesa", "zmt": "Maringarr", "zmu": "Muruwari", "zmv": "Mbariman-Gudhinma", "zmw": "Mbo (Democratic Republic of Congo)", "zmx": "Bomitaba", "zmy": "Mariyedi", "zmz": "Mbandja", "zna": "Zan Gula", "znd": "Zande languages", "zne": "Zande (individual language)", "zng": "Mang", "znk": "Manangkari", "zns": "Mangas", "zoc": "Copainalá Zoque", "zoh": "Chimalapa Zoque", "zom": "Zou", "zoo": "Asunción Mixtepec Zapotec", "zoq": "Tabasco Zoque", "zor": "Rayón Zoque", "zos": "Francisco León Zoque", "zpa": "Lachiguiri Zapotec", "zpb": "Yautepec Zapotec", "zpc": "Choapan Zapotec", "zpd": "Southeastern Ixtlán Zapotec", "zpe": "Petapa Zapotec", "zpf": "San Pedro Quiatoni Zapotec", "zpg": "Guevea De Humboldt Zapotec", "zph": "Totomachapan Zapotec", "zpi": "Santa María Quiegolani Zapotec", "zpj": "Quiavicuzas Zapotec", "zpk": "Tlacolulita Zapotec", "zpl": "Lachixío Zapotec", "zpm": "Mixtepec Zapotec", "zpn": "Santa Inés Yatzechi Zapotec", "zpo": "Amatlán Zapotec", "zpp": "El Alto Zapotec", "zpq": "Zoogocho Zapotec", "zpr": "Santiago Xanica Zapotec", "zps": "Coatlán Zapotec", "zpt": "San Vicente Coatlán Zapotec", "zpu": "Yalálag Zapotec", "zpv": "Chichicapan Zapotec", "zpw": "Zaniza Zapotec", "zpx": "San Baltazar Loxicha Zapotec", "zpy": "Mazaltepec Zapotec", "zpz": "Texmelucan Zapotec", "zqe": "Qiubei Zhuang", "zra": "Kara (Korea)", "zrg": "Mirgan", "zrn": "Zerenkel", "zro": "Záparo", "zrp": "Zarphatic", "zrs": "Mairasi", "zsa": "Sarasira", "zsk": "Kaskean", "zsl": "Zambian Sign Language", "zsm": "Standard Malay", "zsr": "Southern Rincon Zapotec", "zsu": "Sukurum", "zte": "Elotepec Zapotec", "ztg": "Xanaguía Zapotec", "ztl": "Lapaguía-Guivini Zapotec", "ztm": "San Agustín Mixtepec Zapotec", "ztn": "Santa Catarina Albarradas Zapotec", "ztp": "Loxicha Zapotec", "ztq": "Quioquitani-Quierí Zapotec", "zts": "Tilquiapan Zapotec", "ztt": "Tejalapan Zapotec", "ztu": "Güilá Zapotec", "ztx": "Zaachila Zapotec", "zty": "Yatee Zapotec", "zu": "Zulu", "zua": "Zeem", "zuh": "Tokano", "zum": "Kumzari", "zun": "Zuni", "zuy": "Zumaya", "zwa": "Zay", "zyb": "Yongbei Zhuang", "zyg": "Yang Zhuang", "zyj": "Youjiang Zhuang", "zyn": "Yongnan Zhuang", "zyp": "Zyphe Chin", "zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki", "zzj": "Zuojiang Zhuang" }
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/parallel/parallel.py
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging logger = logging.get_logger(__name__) class ParallelBackendConfig: backend_name = None @experimental def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): """ **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either multiprocessing.Pool or joblib for parallelization. Args: function (`Callable[[Any], Any]`): Function to be applied to `iterable`. iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to. num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib). types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`): Whether to disable the tqdm progressbar. desc (`str`): Prefix for the tqdm progressbar. single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`. Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an element of `iterable`, and `rank` is used for progress bar. """ if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func ) return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func) def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): num_proc = num_proc if num_proc <= len(iterable) else len(iterable) split_kwds = [] # We organize the splits ourselve (contiguous splits) for index in range(num_proc): div = len(iterable) // num_proc mod = len(iterable) % num_proc start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc)) if len(iterable) != sum(len(i[1]) for i in split_kwds): raise ValueError( f"Error dividing inputs iterable among processes. " f"Total number of objects {len(iterable)}, " f"length: {sum(len(i[1]) for i in split_kwds)}" ) logger.info( f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}" ) initargs, initializer = None, None if not disable_tqdm: initargs, initializer = (RLock(),), tqdm.set_lock with Pool(num_proc, initargs=initargs, initializer=initializer) as pool: mapped = pool.map(single_map_nested_func, split_kwds) logger.info(f"Finished {num_proc} processes") mapped = [obj for proc_res in mapped for obj in proc_res] logger.info(f"Unpacked {len(mapped)} objects") return mapped def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc): return joblib.Parallel()( joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable ) @experimental @contextlib.contextmanager def parallel_backend(backend_name: str): """ **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization implemented by joblib. Args: backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib. Example usage: ```py with parallel_backend('spark'): dataset = load_dataset(..., num_proc=2) ``` """ ParallelBackendConfig.backend_name = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: ParallelBackendConfig.backend_name = None
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/parallel/__init__.py
from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/mock_download_manager.py
# Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Mock download manager interface.""" import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version logger = get_logger(__name__) class MockDownloadManager: dummy_file_name = "dummy_data" datasets_scripts_dir = "datasets" is_streaming = False def __init__( self, dataset_name: str, config: str, version: Union[Version, str], cache_dir: Optional[str] = None, use_local_dummy_data: bool = False, load_existing_dummy_data: bool = True, download_callbacks: Optional[List[Callable]] = None, ): self.downloaded_size = 0 self.dataset_name = dataset_name self.cache_dir = cache_dir self.use_local_dummy_data = use_local_dummy_data self.config = config # download_callbacks take a single url as input self.download_callbacks: List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root self.load_existing_dummy_data = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general self.version_name = str(version) # to be downloaded self._dummy_file = None self._bucket_url = None @property def dummy_file(self): if self._dummy_file is None: self._dummy_file = self.download_dummy_data() return self._dummy_file @property def dummy_data_folder(self): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy", self.config.name, self.version_name) # structure is dummy / version_name return os.path.join("dummy", self.version_name) @property def dummy_zip_file(self): return os.path.join(self.dummy_data_folder, "dummy_data.zip") def download_dummy_data(self): path_to_dummy_data_dir = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) local_path = cached_path( path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True ) return os.path.join(local_path, self.dummy_file_name) @property def local_path_to_dummy_data(self): return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file) @property def github_path_to_dummy_data(self): if self._bucket_url is None: self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/")) return self._bucket_url @property def manual_dir(self): # return full path if its a dir if os.path.isdir(self.dummy_file): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1]) # this function has to be in the manager under this name so that testing works def download_and_extract(self, data_url, *args): if self.load_existing_dummy_data: # dummy data is downloaded and tested dummy_file = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned dummy_file = self.dummy_file_name # special case when data_url is a dict if isinstance(data_url, dict): return self.create_dummy_data_dict(dummy_file, data_url) elif isinstance(data_url, (list, tuple)): return self.create_dummy_data_list(dummy_file, data_url) else: return self.create_dummy_data_single(dummy_file, data_url) # this function has to be in the manager under this name so that testing works def download(self, data_url, *args): return self.download_and_extract(data_url) # this function has to be in the manager under this name so that testing works def download_custom(self, data_url, custom_download): return self.download_and_extract(data_url) # this function has to be in the manager under this name so that testing works def extract(self, path, *args, **kwargs): return path # this function has to be in the manager under this name so that testing works def get_recorded_sizes_checksums(self): return {} def create_dummy_data_dict(self, path_to_dummy_data, data_url): dummy_data_dict = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(single_urls, list): for single_url in single_urls: download_callback(single_url) else: single_url = single_urls download_callback(single_url) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(single_urls, list): value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls] else: single_url = single_urls value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name)) dummy_data_dict[key] = value # make sure that values are unique if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( dummy_data_dict.values() ): # append key to value to make its name unique dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def create_dummy_data_list(self, path_to_dummy_data, data_url): dummy_data_list = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url) is_pubmed_records = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): data_url = [data_url[0]] * len(data_url) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(single_url) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1])) dummy_data_list.append(value) return dummy_data_list def create_dummy_data_single(self, path_to_dummy_data, data_url): for download_callback in self.download_callbacks: download_callback(data_url) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1])) if os.path.exists(value) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def delete_extracted_files(self): pass def manage_extracted_files(self): pass def iter_archive(self, path): def _iter_archive_members(path): # this preserves the order of the members inside the ZIP archive dummy_parent_path = Path(self.dummy_file).parent relative_path = path.relative_to(dummy_parent_path) with ZipFile(self.local_path_to_dummy_data) as zip_file: members = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix()): yield dummy_parent_path.joinpath(member) path = Path(path) file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*") for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__")): yield file_path.relative_to(path).as_posix(), file_path.open("rb") def iter_files(self, paths): if not isinstance(paths, list): paths = [paths] for path in paths: if os.path.isfile(path): yield path else: for dirpath, dirnames, filenames in os.walk(path): if os.path.basename(dirpath).startswith((".", "__")): continue dirnames.sort() for filename in sorted(filenames): if filename.startswith((".", "__")): continue yield os.path.join(dirpath, filename)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/download_config.py
import copy import warnings from dataclasses import InitVar, dataclass, field from pathlib import Path from typing import Any, Dict, Optional, Union from .. import config @dataclass class DownloadConfig: """Configuration for our cached path manager. Attributes: cache_dir (`str` or `Path`, *optional*): Specify a cache directory to save the file to (overwrite the default cache dir). force_download (`bool`, defaults to `False`): If `True`, re-dowload the file even if it's already cached in the cache dir. resume_download (`bool`, defaults to `False`): If `True`, resume the download if an incompletely received file is found. proxies (`dict`, *optional*): user_agent (`str`, *optional*): Optional string or dict that will be appended to the user-agent on remote requests. extract_compressed_file (`bool`, defaults to `False`): If `True` and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract (`bool`, defaults to `False`): If `True` when `extract_compressed_file` is `True` and the archive was already extracted, re-extract the archive and override the folder where it was extracted. delete_extracted (`bool`, defaults to `False`): Whether to delete (or keep) the extracted files. use_etag (`bool`, defaults to `True`): Whether to use the ETag HTTP response header to validate the cached files. num_proc (`int`, *optional*): The number of processes to launch to download the files in parallel. max_retries (`int`, default to `1`): The number of times to retry an HTTP request if it fails. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. use_auth_token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. <Deprecated version="2.14.0"> `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. </Deprecated> ignore_url_params (`bool`, defaults to `False`): Whether to strip all query parameters and fragments from the download URL before using it for caching the file. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the dataset file-system backend, if any. download_desc (`str`, *optional*): A description to be displayed alongside with the progress bar while downloading the files. """ cache_dir: Optional[Union[str, Path]] = None force_download: bool = False resume_download: bool = False local_files_only: bool = False proxies: Optional[Dict] = None user_agent: Optional[str] = None extract_compressed_file: bool = False force_extract: bool = False delete_extracted: bool = False use_etag: bool = True num_proc: Optional[int] = None max_retries: int = 1 token: Optional[Union[str, bool]] = None use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated" ignore_url_params: bool = False storage_options: Dict[str, Any] = field(default_factory=dict) download_desc: Optional[str] = None def __post_init__(self, use_auth_token): if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) self.token = use_auth_token if "hf" not in self.storage_options: self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT} def copy(self) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) def __setattr__(self, name, value): if name == "token" and getattr(self, "storage_options", None) is not None: if "hf" not in self.storage_options: self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT} elif getattr(self.storage_options["hf"], "token", None) is None: self.storage_options["hf"]["token"] = value super().__setattr__(name, value)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/streaming_download_manager.py
import glob import io import os import posixpath import re import tarfile import time import xml.dom.minidom import zipfile from asyncio import TimeoutError from io import BytesIO from itertools import chain from pathlib import Path, PurePosixPath from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union from xml.etree import ElementTree as ET import fsspec from aiohttp.client_exceptions import ClientError from .. import config from ..filesystems import COMPRESSION_FILESYSTEMS from ..utils.file_utils import ( get_authentication_headers_for_url, get_datasets_user_agent, http_head, is_local_path, is_relative_path, url_or_path_join, ) from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .download_config import DownloadConfig logger = get_logger(__name__) BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", ] COMPRESSION_EXTENSION_TO_PROTOCOL = { # single file compression **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}, # archive compression "zip": "zip", } SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS} SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/") MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class NonStreamableDatasetError(Exception): pass def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): return os.path.join(a, *p) else: a = posixpath.join(a, *p) return "::".join([a] + b) def xdirname(a): """ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xdirname function allows you to apply the dirname on the first path of the chain. Example:: >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip") zip://folder1::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): a = os.path.dirname(Path(a).as_posix()) else: a = posixpath.dirname(a) # if we end up at the root of the protocol, we get for example a = 'http:' # so we have to fix it by adding the '//' that was removed: if a.endswith(":"): a += "//" return "::".join([a] + b) def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None): """Extend `os.path.exists` function to support both local and remote files. Args: urlpath (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return os.path.exists(main_hop) else: urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) return fs.exists(main_hop) def xbasename(a): """ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xbasename function allows you to apply the basename on the first path of the chain. Example:: >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip") file.txt """ a, *b = str(a).split("::") if is_local_path(a): return os.path.basename(Path(a).as_posix()) else: return posixpath.basename(a) def xsplit(a): """ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplit function allows you to apply the xsplit on the first path of the chain. Example:: >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1::https://host.com/archive.zip', 'file.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.split(Path(a).as_posix()) else: a, tail = posixpath.split(a) return "::".join([a + "//" if a.endswith(":") else a] + b), tail def xsplitext(a): """ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplitext function allows you to apply the splitext on the first path of the chain. Example:: >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1/file::https://host.com/archive.zip', '.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.splitext(Path(a).as_posix()) else: a, ext = posixpath.splitext(a) return "::".join([a] + b), ext def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isfile` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isfile(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) return fs.isfile(main_hop) def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int: """Extend `os.path.getsize` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `int`: optional """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.getsize(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) size = fs.size(main_hop) if size is None: # use xopen instead of fs.open to make data fetching more robust with xopen(path, download_config=download_config) as f: size = len(f.read()) return size def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isdir(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) inner_path = main_hop.split("://")[1] if not inner_path.strip("/"): return True return fs.isdir(inner_path) def xrelpath(path, start=None): """Extend `os.path.relpath` function to support remote files. Args: path (`str`): URL path. start (`str`): Start URL directory path. Returns: `str` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop) else: return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop) def _add_retries_to_file_obj_read_method(file_obj): read = file_obj.read max_retries = config.STREAMING_READ_MAX_RETRIES def read_with_retries(*args, **kwargs): disconnect_err = None for retry in range(1, max_retries + 1): try: out = read(*args, **kwargs) break except (ClientError, TimeoutError) as err: disconnect_err = err logger.warning( f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]" ) time.sleep(config.STREAMING_READ_RETRY_INTERVAL) else: raise ConnectionError("Server Disconnected") from disconnect_err return out file_obj.read = read_with_retries def _get_path_extension(path: str) -> str: # Get extension: https://foo.bar/train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]: # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz urlpath = str(urlpath) path = urlpath.split("::")[0] extension = _get_path_extension(path) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL: return COMPRESSION_EXTENSION_TO_PROTOCOL[extension] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) try: with fsspec.open(urlpath, **(storage_options or {})) as f: return _get_extraction_protocol_with_magic_number(f) except FileNotFoundError: if urlpath.startswith(config.HF_ENDPOINT): raise FileNotFoundError( urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise def _prepare_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: prepared_urlpath = [] prepared_storage_options = {} for hop in urlpath.split("::"): hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config) prepared_urlpath.append(hop) prepared_storage_options.update(storage_options) return "::".join(prepared_urlpath), storage_options def _prepare_single_hop_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: """ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head In particular it resolves google drive URLs It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths. Storage options are formatted in the form {protocol: storage_options_for_protocol} """ token = None if download_config is None else download_config.token protocol = urlpath.split("://")[0] if "://" in urlpath else "file" if download_config is not None and protocol in download_config.storage_options: storage_options = download_config.storage_options[protocol] elif download_config is not None and protocol not in download_config.storage_options: storage_options = { option_name: option_value for option_name, option_value in download_config.storage_options.items() if option_name not in fsspec.available_protocols() } else: storage_options = {} if storage_options: storage_options = {protocol: storage_options} if protocol in ["http", "https"]: storage_options[protocol] = { "headers": { **get_authentication_headers_for_url(urlpath, token=token), "user-agent": get_datasets_user_agent(), }, "client_kwargs": {"trust_env": True}, # Enable reading proxy env variables. **(storage_options.get(protocol, {})), } if "drive.google.com" in urlpath: response = http_head(urlpath) cookies = None for k, v in response.cookies.items(): if k.startswith("download_warning"): urlpath += "&confirm=" + v cookies = response.cookies storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})} # Fix Google Drive URL to avoid Virus scan warning if "drive.google.com" in urlpath and "confirm=" not in urlpath: urlpath += "&confirm=t" if urlpath.startswith("https://raw.githubusercontent.com/"): # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389 storage_options[protocol]["headers"]["Accept-Encoding"] = "identity" elif protocol == "hf": storage_options[protocol] = { "token": token, "endpoint": config.HF_ENDPOINT, **storage_options.get(protocol, {}), } return urlpath, storage_options def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `open` function to support remote files using `fsspec`. It also has a retry mechanism in case connection fails. The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co Args: file (`str`): Path name of the file to be opened. mode (`str`, *optional*, default "r"): Mode in which the file is opened. *args: Arguments to be passed to `fsspec.open`. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Keyword arguments to be passed to `fsspec.open`. Returns: file object """ # This works as well for `xopen(str(Path(...)))` file_str = _as_str(file) main_hop, *rest_hops = file_str.split("::") if is_local_path(main_hop): return open(main_hop, mode, *args, **kwargs) # add headers and cookies for authentication on the HF Hub and for Google Drive file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config) kwargs = {**kwargs, **(storage_options or {})} try: file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open() except ValueError as e: if str(e) == "Cannot seek streaming HTTP file": raise NonStreamableDatasetError( "Streaming is not possible for this dataset because data host server doesn't support HTTP range " "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)" ) from e else: raise except FileNotFoundError: if file.startswith(config.HF_ENDPOINT): raise FileNotFoundError( file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise _add_retries_to_file_obj_read_method(file_obj) return file_obj def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]: """Extend `os.listdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(path).split("::") if is_local_path(main_hop): return os.listdir(path) else: # globbing inside a zip in a private repo requires authentication path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) inner_path = main_hop.split("://")[1] if inner_path.strip("/") and not fs.isdir(inner_path): raise FileNotFoundError(f"Directory doesn't exist: {path}") objects = fs.listdir(inner_path) return [os.path.basename(obj["name"]) for obj in objects] def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None): """Extend `glob.glob` function to support remote files. Args: urlpath (`str`): URL path with shell-style wildcard patterns. recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more directories or subdirectories. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return glob.glob(main_hop, recursive=recursive) else: # globbing inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. # - If there is "**" in the pattern, `fs.glob` must be called anyway. inner_path = main_hop.split("://")[1] globbed_paths = fs.glob(inner_path) protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths] def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `os.walk` function to support remote files. Args: urlpath (`str`): URL root path. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Additional keyword arguments forwarded to the underlying filesystem. Yields: `tuple`: 3-tuple (dirpath, dirnames, filenames). """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): yield from os.walk(main_hop, **kwargs) else: # walking inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) inner_path = main_hop.split("://")[1] if inner_path.strip("/") and not fs.isdir(inner_path): return [] protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs): yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames class xPath(type(Path())): """Extension of `pathlib.Path` to support both local paths and remote URLs.""" def __str__(self): path_str = super().__str__() main_hop, *rest_hops = path_str.split("::") if is_local_path(main_hop): return main_hop path_as_posix = path_str.replace("\\", "/") path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix) path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol return path_as_posix def exists(self, download_config: Optional[DownloadConfig] = None): """Extend `pathlib.Path.exists` method to support both local and remote files. Args: download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ return xexists(str(self), download_config=download_config) def glob(self, pattern, download_config: Optional[DownloadConfig] = None): """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. download_config : mainly use token or storage_options to support different platforms and auth types. Yields: [`xPath`] """ posix_path = self.as_posix() main_hop, *rest_hops = posix_path.split("::") if is_local_path(main_hop): yield from Path(main_hop).glob(pattern) else: # globbing inside a zip in a private repo requires authentication if rest_hops: urlpath = rest_hops[0] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) storage_options = {urlpath.split("://")[0]: storage_options} posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]]) else: storage_options = None fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options) # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. # - If there is "**" in the pattern, `fs.glob` must be called anyway. globbed_paths = fs.glob(xjoin(main_hop, pattern)) for globbed_path in globbed_paths: yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) def rglob(self, pattern, **kwargs): """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. Yields: [`xPath`] """ return self.glob("**/" + pattern, **kwargs) @property def parent(self) -> "xPath": """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: [`xPath`] """ return type(self)(xdirname(self.as_posix())) @property def name(self) -> str: """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).name @property def stem(self) -> str: """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).stem @property def suffix(self) -> str: """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).suffix def open(self, *args, **kwargs): """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`. Args: **args: Arguments passed to :func:`fsspec.open`. **kwargs: Keyword arguments passed to :func:`fsspec.open`. Returns: `io.FileIO`: File-like object. """ return xopen(str(self), *args, **kwargs) def joinpath(self, *p: Tuple[str, ...]) -> "xPath": """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`. Args: *p (`tuple` of `str`): Other path components. Returns: [`xPath`] """ return type(self)(xjoin(self.as_posix(), *p)) def __truediv__(self, p: str) -> "xPath": return self.joinpath(p) def with_suffix(self, suffix): main_hop, *rest_hops = str(self).split("::") if is_local_path(main_hop): return type(self)(str(super().with_suffix(suffix))) return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) def _as_str(path: Union[str, Path, xPath]): return str(path) if isinstance(path, xPath) else str(xPath(str(path))) def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import gzip if hasattr(filepath_or_buffer, "read"): return gzip.open(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import numpy as np if hasattr(filepath_or_buffer, "read"): return np.load(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): return pd.read_csv(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) if kwargs.get("compression", "infer") == "infer": kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config) return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): try: return pd.read_excel(filepath_or_buffer, **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) try: return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel( BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs ) def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pyarrow.parquet as pq if hasattr(filepath_or_buffer, "read"): return pq.read_table(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs) def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import scipy.io as sio if hasattr(filepath_or_buffer, "read"): return sio.loadmat(filepath_or_buffer, **kwargs) else: return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None): """Extend `xml.etree.ElementTree.parse` function to support remote files. Args: source: File path or file object. parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `xml.etree.ElementTree.Element`: Root element of the given source document. """ if hasattr(source, "read"): return ET.parse(source, parser=parser) else: with xopen(source, "rb", download_config=download_config) as f: return ET.parse(f, parser=parser) def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `xml.dom.minidom.parse` function to support remote files. Args: filename_or_file (`str` or file): File path or file object. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`. Returns: :obj:`xml.dom.minidom.Document`: Parsed document. """ if hasattr(filename_or_file, "read"): return xml.dom.minidom.parse(filename_or_file, **kwargs) else: with xopen(filename_or_file, "rb", download_config=download_config) as f: return xml.dom.minidom.parse(f, **kwargs) class _IterableFromGenerator(Iterable): """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" def __init__(self, generator: Callable, *args, **kwargs): self.generator = generator self.args = args self.kwargs = kwargs def __iter__(self): yield from self.generator(*self.args, **self.kwargs) class ArchiveIterable(_IterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_urlpath( cls, urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol(urlpath, download_config=download_config) with xopen(urlpath, "rb", download_config=download_config) as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable": return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) class FilesIterable(_IterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_urlpaths( cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None ) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if xisfile(urlpath, download_config=download_config): yield urlpath elif xisdir(urlpath, download_config=download_config): for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config): # in-place modification to prune the search dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if xbasename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield xjoin(dirpath, filename) else: raise FileNotFoundError(urlpath) @classmethod def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable": return cls(cls._iter_from_urlpaths, urlpaths, download_config) class StreamingDownloadManager: """ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives. Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract data, but they rather return the path or url that could be opened using the `xopen` function which extends the built-in `open` function to stream data from remote files. """ is_streaming = True def __init__( self, dataset_name: Optional[str] = None, data_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, base_path: Optional[str] = None, ): self._dataset_name = dataset_name self._data_dir = data_dir self._base_path = base_path or os.path.abspath(".") self.download_config = download_config or DownloadConfig() @property def manual_dir(self): return self._data_dir def download(self, url_or_urls): """Normalize URL(s) of files to stream data from. This is the lazy version of `DownloadManager.download` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True) return url_or_urls def _download(self, urlpath: str) -> str: urlpath = str(urlpath) if is_relative_path(urlpath): # append the relative path to the base_path urlpath = url_or_path_join(self._base_path, urlpath) return urlpath def extract(self, url_or_urls): """Add extraction protocol for given url(s) for streaming. This is the lazy version of `DownloadManager.extract` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) return urlpaths def _extract(self, urlpath: str) -> str: urlpath = str(urlpath) protocol = _get_extraction_protocol(urlpath, download_config=self.download_config) # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz path = urlpath.split("::")[0] extension = _get_path_extension(path) if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")): raise NotImplementedError( f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. " f"Please use `dl_manager.iter_archive` instead.\n\n" f"Example usage:\n\n" f"\turl = dl_manager.download(url)\n" f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n" f"\tfor filename, file in tar_archive_iterator:\n" f"\t\t..." ) if protocol is None: # no extraction return urlpath elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: # there is one single file which is the uncompressed file inner_file = os.path.basename(urlpath.split("::")[0]) inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file return f"{protocol}://{inner_file}::{urlpath}" else: return f"{protocol}://::{urlpath}" def download_and_extract(self, url_or_urls): """Prepare given `url_or_urls` for streaming (add extraction protocol). This is the lazy version of `DownloadManager.download_and_extract` for streaming. Is equivalent to: ``` urls = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL(s) to stream from data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. """ return self.extract(self.download(url_or_urls)) def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: """Iterate over files within an archive. Args: urlpath_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(urlpath_or_buf, "read"): return ArchiveIterable.from_buf(urlpath_or_buf) else: return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config) def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: """Iterate over files. Args: urlpaths (`str` or `list` of `str`): Root paths. Yields: str: File URL path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/download_manager.py
# Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Download manager interface.""" import enum import io import os import posixpath import tarfile import warnings import zipfile from datetime import datetime from functools import partial from itertools import chain from typing import Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union from .. import config from ..utils import tqdm as hf_tqdm from ..utils.deprecation_utils import DeprecatedEnum, deprecated from ..utils.file_utils import cached_path, get_from_cache, hash_url_to_filename, is_relative_path, url_or_path_join from ..utils.info_utils import get_size_checksum_dict from ..utils.logging import get_logger from ..utils.py_utils import NestedDataStructure, map_nested, size_str from .download_config import DownloadConfig logger = get_logger(__name__) BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", ] MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class DownloadMode(enum.Enum): """`Enum` for how to treat pre-existing downloads and data. The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both raw downloads and the prepared dataset if they exist. The generations modes: | | Downloads | Dataset | |-------------------------------------|-----------|---------| | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse | | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh | | `FORCE_REDOWNLOAD` | Fresh | Fresh | """ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists" REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists" FORCE_REDOWNLOAD = "force_redownload" class GenerateMode(DeprecatedEnum): REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists" REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists" FORCE_REDOWNLOAD = "force_redownload" @property def help_message(self): return "Use 'DownloadMode' instead." def _get_path_extension(path: str) -> str: # Get extension: train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(path: str) -> Optional[str]: path = str(path) extension = _get_path_extension(path) # TODO(mariosasko): The below check will be useful once we can preserve the original extension in the new cache layout (use the `filename` parameter of `hf_hub_download`) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None with open(path, "rb") as f: return _get_extraction_protocol_with_magic_number(f) class _IterableFromGenerator(Iterable): """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" def __init__(self, generator: Callable, *args, **kwargs): self.generator = generator self.args = args self.kwargs = kwargs def __iter__(self): yield from self.generator(*self.args, **self.kwargs) class ArchiveIterable(_IterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_path(cls, urlpath: str) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol(urlpath) with open(urlpath, "rb") as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_path(cls, urlpath_or_buf) -> "ArchiveIterable": return cls(cls._iter_from_path, urlpath_or_buf) class FilesIterable(_IterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_paths(cls, urlpaths: Union[str, List[str]]) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if os.path.isfile(urlpath): yield urlpath else: for dirpath, dirnames, filenames in os.walk(urlpath): # in-place modification to prune the search dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if os.path.basename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield os.path.join(dirpath, filename) @classmethod def from_paths(cls, urlpaths) -> "FilesIterable": return cls(cls._iter_from_paths, urlpaths) class DownloadManager: is_streaming = False def __init__( self, dataset_name: Optional[str] = None, data_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, base_path: Optional[str] = None, record_checksums=True, ): """Download manager constructor. Args: data_dir: can be used to specify a manual directory to get the files from. dataset_name (`str`): name of dataset this instance will be used for. If provided, downloads will contain which datasets they were used for. download_config (`DownloadConfig`): to specify the cache directory and other download options base_path (`str`): base path that is used when relative paths are used to download files. This can be a remote url. record_checksums (`bool`, defaults to `True`): Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder. """ self._dataset_name = dataset_name self._data_dir = data_dir self._base_path = base_path or os.path.abspath(".") # To record what is being used: {url: {num_bytes: int, checksum: str}} self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {} self.record_checksums = record_checksums self.download_config = download_config or DownloadConfig() self.downloaded_paths = {} self.extracted_paths = {} @property def manual_dir(self): return self._data_dir @property def downloaded_size(self): """Returns the total size of downloaded files.""" return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values()) @staticmethod def ship_files_with_pipeline(downloaded_path_or_paths, pipeline): """Ship the files using Beam FileSystems to the pipeline temp dir. Args: downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`): Nested structure containing the downloaded path(s). pipeline ([`utils.beam_utils.BeamPipeline`]): Apache Beam Pipeline. Returns: `str` or `list[str]` or `dict[str, str]` """ from ..utils.beam_utils import upload_local_to_remote remote_dir = pipeline._options.get_all_options().get("temp_location") if remote_dir is None: raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files") def upload(local_file_path): remote_file_path = posixpath.join( remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path) ) logger.info( f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}." ) upload_local_to_remote(local_file_path, remote_file_path) return remote_file_path uploaded_path_or_paths = map_nested( lambda local_file_path: upload(local_file_path), downloaded_path_or_paths, ) return uploaded_path_or_paths def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure): """Record size/checksum of downloaded files.""" delay = 5 for url, path in hf_tqdm( list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())), delay=delay, desc="Computing checksums", ): # call str to support PathLike objects self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict( path, record_checksum=self.record_checksums ) @deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.") def download_custom(self, url_or_urls, custom_download): """ Download given urls(s) by calling `custom_download`. Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`. custom_download (`Callable[src_url, dst_path]`): The source URL and destination path. For example `tf.io.gfile.copy`, that lets you download from Google storage. Returns: downloaded_path(s): `str`, The downloaded paths matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket) ``` """ cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH max_retries = self.download_config.max_retries def url_to_downloaded_path(url): return os.path.join(cache_dir, hash_url_to_filename(url)) downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls) url_or_urls = NestedDataStructure(url_or_urls) downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()): try: get_from_cache( url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries ) cached = True except FileNotFoundError: cached = False if not cached or self.download_config.force_download: custom_download(url, path) get_from_cache( url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries ) self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) return downloaded_path_or_paths.data def download(self, url_or_urls): """Download given URL(s). By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior. Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download. Each URL is a `str`. Returns: `str` or `list` or `dict`: The downloaded paths matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ download_config = self.download_config.copy() download_config.extract_compressed_file = False if download_config.download_desc is None: download_config.download_desc = "Downloading data" download_func = partial(self._download, download_config=download_config) start_time = datetime.now() downloaded_path_or_paths = map_nested( download_func, url_or_urls, map_tuple=True, num_proc=download_config.num_proc, desc="Downloading data files", ) duration = datetime.now() - start_time logger.info(f"Downloading took {duration.total_seconds() // 60} min") url_or_urls = NestedDataStructure(url_or_urls) downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) start_time = datetime.now() self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) duration = datetime.now() - start_time logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min") return downloaded_path_or_paths.data def _download(self, url_or_filename: str, download_config: DownloadConfig) -> str: url_or_filename = str(url_or_filename) if is_relative_path(url_or_filename): # append the relative path to the base_path url_or_filename = url_or_path_join(self._base_path, url_or_filename) return cached_path(url_or_filename, download_config=download_config) def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): """Iterate over files within an archive. Args: path_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(path_or_buf, "read"): return ArchiveIterable.from_buf(path_or_buf) else: return ArchiveIterable.from_path(path_or_buf) def iter_files(self, paths: Union[str, List[str]]): """Iterate over file paths. Args: paths (`str` or `list` of `str`): Root paths. Yields: `str`: File path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_paths(paths) def extract(self, path_or_paths, num_proc="deprecated"): """Extract given path(s). Args: path_or_paths (path or `list` or `dict`): Path of file to extract. Each path is a `str`. num_proc (`int`): Use multi-processing if `num_proc` > 1 and the length of `path_or_paths` is larger than `num_proc`. <Deprecated version="2.6.2"> Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead. </Deprecated> Returns: extracted_path(s): `str`, The extracted paths matching the given input path_or_paths. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ if num_proc != "deprecated": warnings.warn( "'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.", FutureWarning, ) download_config = self.download_config.copy() download_config.extract_compressed_file = True # Extract downloads the file first if it is not already downloaded if download_config.download_desc is None: download_config.download_desc = "Downloading data" extracted_paths = map_nested( partial(cached_path, download_config=download_config), path_or_paths, num_proc=download_config.num_proc, desc="Extracting data files", ) path_or_paths = NestedDataStructure(path_or_paths) extracted_paths = NestedDataStructure(extracted_paths) self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten()))) return extracted_paths.data def download_and_extract(self, url_or_urls): """Download and extract given `url_or_urls`. Is roughly equivalent to: ``` extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`. Returns: extracted_path(s): `str`, extracted paths of given URL(s). """ return self.extract(self.download(url_or_urls)) def get_recorded_sizes_checksums(self): return self._recorded_sizes_checksums.copy() def delete_extracted_files(self): paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values()) for key, path in list(self.extracted_paths.items()): if path in paths_to_delete and os.path.isfile(path): os.remove(path) del self.extracted_paths[key] def manage_extracted_files(self): if self.download_config.delete_extracted: self.delete_extracted_files()
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/__init__.py
__all__ = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/language_modeling.py
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=True) class LanguageModeling(TaskTemplate): task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"text": Value("string")}) label_schema: ClassVar[Features] = Features({}) text_column: str = "text" @property def column_mapping(self) -> Dict[str, str]: return {self.text_column: "text"}
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/text_classification.py
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=True) class TextClassification(TaskTemplate): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization task: str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"text": Value("string")}) label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) text_column: str = "text" label_column: str = "labels" def align_with_features(self, features): if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features.") if not isinstance(features[self.label_column], ClassLabel): raise ValueError(f"Column {self.label_column} is not a ClassLabel.") task_template = copy.deepcopy(self) label_schema = self.label_schema.copy() label_schema["labels"] = features[self.label_column] task_template.__dict__["label_schema"] = label_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/question_answering.py
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=True) class QuestionAnsweringExtractive(TaskTemplate): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")}) label_schema: ClassVar[Features] = Features( { "answers": Sequence( { "text": Value("string"), "answer_start": Value("int32"), } ) } ) question_column: str = "question" context_column: str = "context" answers_column: str = "answers" @property def column_mapping(self) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/automatic_speech_recognition.py
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=True) class AutomaticSpeechRecognition(TaskTemplate): task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"audio": Audio()}) label_schema: ClassVar[Features] = Features({"transcription": Value("string")}) audio_column: str = "audio" transcription_column: str = "transcription" def align_with_features(self, features): if self.audio_column not in features: raise ValueError(f"Column {self.audio_column} is not present in features.") if not isinstance(features[self.audio_column], Audio): raise ValueError(f"Column {self.audio_column} is not an Audio type.") task_template = copy.deepcopy(self) input_schema = self.input_schema.copy() input_schema["audio"] = features[self.audio_column] task_template.__dict__["input_schema"] = input_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/base.py
import abc import copy import dataclasses from dataclasses import dataclass from typing import ClassVar, Dict, Type, TypeVar from ..features import Features T = TypeVar("T", bound="TaskTemplate") @dataclass(frozen=True) class TaskTemplate(abc.ABC): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization task: str input_schema: ClassVar[Features] label_schema: ClassVar[Features] def align_with_features(self: T, features: Features) -> T: """ Align features with the task template. """ # No-op return copy.deepcopy(self) @property def features(self) -> Features: return Features(**self.input_schema, **self.label_schema) @property @abc.abstractmethod def column_mapping(self) -> Dict[str, str]: raise NotImplementedError @classmethod def from_dict(cls: Type[T], template_dict: dict) -> T: field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in template_dict.items() if k in field_names})
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/image_classification.py
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=True) class ImageClassification(TaskTemplate): task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"image": Image()}) label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) image_column: str = "image" label_column: str = "labels" def align_with_features(self, features): if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features.") if not isinstance(features[self.label_column], ClassLabel): raise ValueError(f"Column {self.label_column} is not a ClassLabel.") task_template = copy.deepcopy(self) label_schema = self.label_schema.copy() label_schema["labels"] = features[self.label_column] task_template.__dict__["label_schema"] = label_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return { self.image_column: "image", self.label_column: "labels", }
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/audio_classification.py
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=True) class AudioClassification(TaskTemplate): task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"audio": Audio()}) label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) audio_column: str = "audio" label_column: str = "labels" def align_with_features(self, features): if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features.") if not isinstance(features[self.label_column], ClassLabel): raise ValueError(f"Column {self.label_column} is not a ClassLabel.") task_template = copy.deepcopy(self) label_schema = self.label_schema.copy() label_schema["labels"] = features[self.label_column] task_template.__dict__["label_schema"] = label_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return { self.audio_column: "audio", self.label_column: "labels", }
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/__init__.py
from typing import Optional from ..utils.logging import get_logger from .audio_classification import AudioClassification from .automatic_speech_recognition import AutomaticSpeechRecognition from .base import TaskTemplate from .image_classification import ImageClassification from .language_modeling import LanguageModeling from .question_answering import QuestionAnsweringExtractive from .summarization import Summarization from .text_classification import TextClassification __all__ = [ "AutomaticSpeechRecognition", "AudioClassification", "ImageClassification", "LanguageModeling", "QuestionAnsweringExtractive", "Summarization", "TaskTemplate", "TextClassification", ] logger = get_logger(__name__) NAME2TEMPLATE = { AutomaticSpeechRecognition.task: AutomaticSpeechRecognition, AudioClassification.task: AudioClassification, ImageClassification.task: ImageClassification, LanguageModeling.task: LanguageModeling, QuestionAnsweringExtractive.task: QuestionAnsweringExtractive, Summarization.task: Summarization, TextClassification.task: TextClassification, } def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]: """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary.""" task_name = task_template_dict.get("task") if task_name is None: logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}") return None template = NAME2TEMPLATE.get(task_name) return template.from_dict(task_template_dict)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/tasks/summarization.py
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=True) class Summarization(TaskTemplate): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"text": Value("string")}) label_schema: ClassVar[Features] = Features({"summary": Value("string")}) text_column: str = "text" summary_column: str = "summary" @property def column_mapping(self) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/formatting/jax_formatter.py
# Copyright 2021 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib logger = get_logger() DEVICE_MAPPING: Optional[dict] = None class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]): def __init__(self, features=None, device=None, **jnp_array_kwargs): super().__init__(features=features) import jax from jaxlib.xla_client import Device if isinstance(device, Device): raise ValueError( f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` " "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) self.device = device if isinstance(device, str) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: DEVICE_MAPPING = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( f"Device with string identifier {self.device} not listed among the available " f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default " f"device: {str(jax.devices()[0])}." ) self.device = str(jax.devices()[0]) self.jnp_array_kwargs = jnp_array_kwargs @staticmethod def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]: import jax return {str(device): device for device in jax.devices()} def _consolidate(self, column): import jax import jax.numpy as jnp if isinstance(column, list) and column: if all( isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(column, axis=0) return column def _tensorize(self, value): import jax import jax.numpy as jnp if isinstance(value, (str, bytes, type(None))): return value elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): return value.tolist() default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_x64: default_dtype = {"dtype": jnp.int64} else: default_dtype = {"dtype": jnp.int32} elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": jnp.float32} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): value = np.asarray(value) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: DEVICE_MAPPING = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) def _recursive_tensorize(self, data_struct): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) elif isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "jax.Array": column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/formatting/np_formatter.py
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from collections.abc import Mapping import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]): def __init__(self, features=None, **np_array_kwargs): super().__init__(features=features) self.np_array_kwargs = np_array_kwargs def _consolidate(self, column): if isinstance(column, list): if column and all( isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return np.stack(column) else: # don't use np.array(column, dtype=object) # since it fails in certain cases # see https://stackoverflow.com/q/51005699 out = np.empty(len(column), dtype=object) out[:] = column return out return column def _tensorize(self, value): if isinstance(value, (str, bytes, type(None))): return value elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): return value elif isinstance(value, np.number): return value default_dtype = {} if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": np.int64} elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": np.float32} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): return np.asarray(value, **self.np_array_kwargs) return np.asarray(value, **{**default_dtype, **self.np_array_kwargs}) def _recursive_tensorize(self, data_struct): # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) if isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> np.ndarray: column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/formatting/formatting.py
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Mapping, MutableMapping from functools import partial # Lint as: python3 from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union import numpy as np import pandas as pd import pyarrow as pa from packaging import version from .. import config from ..features import Features from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper from ..table import Table from ..utils.py_utils import no_op_if_value_is_null T = TypeVar("T") RowFormat = TypeVar("RowFormat") ColumnFormat = TypeVar("ColumnFormat") BatchFormat = TypeVar("BatchFormat") def _is_range_contiguous(key: range) -> bool: return key.step == 1 and key.stop >= key.start def _raise_bad_key_type(key: Any): raise TypeError( f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable." ) def _query_table_with_indices_mapping( table: Table, key: Union[int, slice, range, str, Iterable], indices: Table ) -> pa.Table: """ Query a pyarrow Table to extract the subtable that correspond to the given key. The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into account a shuffling or an indices selection for example. The indices table must contain one column named "indices" of type uint64. """ if isinstance(key, int): key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py() return _query_table(table, key) if isinstance(key, slice): key = range(*key.indices(indices.num_rows)) if isinstance(key, range): if _is_range_contiguous(key) and key.start >= 0: return _query_table( table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)] ) else: pass # treat as an iterable if isinstance(key, str): table = table.select([key]) return _query_table(table, indices.column(0).to_pylist()) if isinstance(key, Iterable): return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key]) _raise_bad_key_type(key) def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table: """ Query a pyarrow Table to extract the subtable that correspond to the given key. """ if isinstance(key, int): return table.fast_slice(key % table.num_rows, 1) if isinstance(key, slice): key = range(*key.indices(table.num_rows)) if isinstance(key, range): if _is_range_contiguous(key) and key.start >= 0: return table.fast_slice(key.start, key.stop - key.start) else: pass # treat as an iterable if isinstance(key, str): return table.table.drop([column for column in table.column_names if column != key]) if isinstance(key, Iterable): key = np.fromiter(key, np.int64) if len(key) == 0: return table.table.slice(0, 0) # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773) return table.fast_gather(key % table.num_rows) _raise_bad_key_type(key) def _is_array_with_nulls(pa_array: pa.Array) -> bool: return pa_array.null_count > 0 class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]): """ Arrow extractor are used to extract data from pyarrow tables. It makes it possible to extract rows, columns and batches. These three extractions types have to be implemented. """ def extract_row(self, pa_table: pa.Table) -> RowFormat: raise NotImplementedError def extract_column(self, pa_table: pa.Table) -> ColumnFormat: raise NotImplementedError def extract_batch(self, pa_table: pa.Table) -> BatchFormat: raise NotImplementedError def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]: """Return the first element of a batch (dict) as a row (dict)""" return {key: array[0] for key, array in py_dict.items()} class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]): def extract_row(self, pa_table: pa.Table) -> pa.Table: return pa_table def extract_column(self, pa_table: pa.Table) -> pa.Array: return pa_table.column(0) def extract_batch(self, pa_table: pa.Table) -> pa.Table: return pa_table class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]): def extract_row(self, pa_table: pa.Table) -> dict: return _unnest(pa_table.to_pydict()) def extract_column(self, pa_table: pa.Table) -> list: return pa_table.column(0).to_pylist() def extract_batch(self, pa_table: pa.Table) -> dict: return pa_table.to_pydict() class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]): def __init__(self, **np_array_kwargs): self.np_array_kwargs = np_array_kwargs def extract_row(self, pa_table: pa.Table) -> dict: return _unnest(self.extract_batch(pa_table)) def extract_column(self, pa_table: pa.Table) -> np.ndarray: return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]]) def extract_batch(self, pa_table: pa.Table) -> dict: return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray: if isinstance(pa_array, pa.ChunkedArray): if isinstance(pa_array.type, _ArrayXDExtensionType): # don't call to_pylist() to preserve dtype of the fixed-size array zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) array: List = [ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) ] else: zero_copy_only = _is_zero_copy_only(pa_array.type) and all( not _is_array_with_nulls(chunk) for chunk in pa_array.chunks ) array: List = [ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) ] else: if isinstance(pa_array.type, _ArrayXDExtensionType): # don't call to_pylist() to preserve dtype of the fixed-size array zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only) else: zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array) array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() if len(array) > 0: if any( (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape)) or (isinstance(x, float) and np.isnan(x)) for x in array ): return np.array(array, copy=False, dtype=object) return np.array(array, copy=False) class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]): def extract_row(self, pa_table: pa.Table) -> pd.DataFrame: return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper) def extract_column(self, pa_table: pa.Table) -> pd.Series: return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]] def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame: return pa_table.to_pandas(types_mapper=pandas_types_mapper) class PythonFeaturesDecoder: def __init__(self, features: Optional[Features]): self.features = features def decode_row(self, row: dict) -> dict: return self.features.decode_example(row) if self.features else row def decode_column(self, column: list, column_name: str) -> list: return self.features.decode_column(column, column_name) if self.features else column def decode_batch(self, batch: dict) -> dict: return self.features.decode_batch(batch) if self.features else batch class PandasFeaturesDecoder: def __init__(self, features: Optional[Features]): self.features = features def decode_row(self, row: pd.DataFrame) -> pd.DataFrame: decode = ( { column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) for column_name, feature in self.features.items() if self.features._column_requires_decoding[column_name] } if self.features else {} ) if decode: row[list(decode.keys())] = row.transform(decode) return row def decode_column(self, column: pd.Series, column_name: str) -> pd.Series: decode = ( no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] else None ) if decode: column = column.transform(decode) return column def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame: return self.decode_row(batch) class LazyDict(MutableMapping): """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary.""" def __init__(self, pa_table: pa.Table, formatter: "Formatter"): self.pa_table = pa_table self.formatter = formatter self.data = {key: None for key in pa_table.column_names} self.keys_to_format = set(self.data.keys()) def __len__(self): return len(self.data) def __getitem__(self, key): value = self.data[key] if key in self.keys_to_format: value = self.format(key) self.data[key] = value self.keys_to_format.remove(key) return value def __setitem__(self, key, value): if key in self.keys_to_format: self.keys_to_format.remove(key) self.data[key] = value def __delitem__(self, key) -> None: if key in self.keys_to_format: self.keys_to_format.remove(key) del self.data[key] def __iter__(self): return iter(self.data) def __contains__(self, key): return key in self.data def __repr__(self): self._format_all() return repr(self.data) if config.PY_VERSION >= version.parse("3.9"): # merging with the union ("|") operator is supported in Python 3.9+ def __or__(self, other): if isinstance(other, LazyDict): inst = self.copy() other = other.copy() other._format_all() inst.keys_to_format -= other.data.keys() inst.data = inst.data | other.data return inst if isinstance(other, dict): inst = self.copy() inst.keys_to_format -= other.keys() inst.data = inst.data | other return inst return NotImplemented def __ror__(self, other): if isinstance(other, LazyDict): inst = self.copy() other = other.copy() other._format_all() inst.keys_to_format -= other.data.keys() inst.data = other.data | inst.data return inst if isinstance(other, dict): inst = self.copy() inst.keys_to_format -= other.keys() inst.data = other | inst.data return inst return NotImplemented def __ior__(self, other): if isinstance(other, LazyDict): other = other.copy() other._format_all() self.keys_to_format -= other.data.keys() self.data |= other.data else: self.keys_to_format -= other.keys() self.data |= other return self def __copy__(self): # Identical to `UserDict.__copy__` inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"].copy() inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy() return inst def copy(self): import copy return copy.copy(self) @classmethod def fromkeys(cls, iterable, value=None): raise NotImplementedError def format(self, key): raise NotImplementedError def _format_all(self): for key in self.keys_to_format: self.data[key] = self.format(key) self.keys_to_format.clear() class LazyRow(LazyDict): def format(self, key): return self.formatter.format_column(self.pa_table.select([key]))[0] class LazyBatch(LazyDict): def format(self, key): return self.formatter.format_column(self.pa_table.select([key])) class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]): """ A formatter is an object that extracts and formats data from pyarrow tables. It defines the formatting for rows, columns and batches. """ simple_arrow_extractor = SimpleArrowExtractor python_arrow_extractor = PythonArrowExtractor numpy_arrow_extractor = NumpyArrowExtractor pandas_arrow_extractor = PandasArrowExtractor def __init__(self, features: Optional[Features] = None): self.features = features self.python_features_decoder = PythonFeaturesDecoder(self.features) self.pandas_features_decoder = PandasFeaturesDecoder(self.features) def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]: if query_type == "row": return self.format_row(pa_table) elif query_type == "column": return self.format_column(pa_table) elif query_type == "batch": return self.format_batch(pa_table) def format_row(self, pa_table: pa.Table) -> RowFormat: raise NotImplementedError def format_column(self, pa_table: pa.Table) -> ColumnFormat: raise NotImplementedError def format_batch(self, pa_table: pa.Table) -> BatchFormat: raise NotImplementedError class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]): def recursive_tensorize(self, data_struct: dict): raise NotImplementedError class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]): def format_row(self, pa_table: pa.Table) -> pa.Table: return self.simple_arrow_extractor().extract_row(pa_table) def format_column(self, pa_table: pa.Table) -> pa.Array: return self.simple_arrow_extractor().extract_column(pa_table) def format_batch(self, pa_table: pa.Table) -> pa.Table: return self.simple_arrow_extractor().extract_batch(pa_table) class PythonFormatter(Formatter[Mapping, list, Mapping]): def __init__(self, features=None, lazy=False): super().__init__(features) self.lazy = lazy def format_row(self, pa_table: pa.Table) -> Mapping: if self.lazy: return LazyRow(pa_table, self) row = self.python_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return row def format_column(self, pa_table: pa.Table) -> list: column = self.python_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) return column def format_batch(self, pa_table: pa.Table) -> Mapping: if self.lazy: return LazyBatch(pa_table, self) batch = self.python_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) return batch class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]): def format_row(self, pa_table: pa.Table) -> pd.DataFrame: row = self.pandas_arrow_extractor().extract_row(pa_table) row = self.pandas_features_decoder.decode_row(row) return row def format_column(self, pa_table: pa.Table) -> pd.Series: column = self.pandas_arrow_extractor().extract_column(pa_table) column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0]) return column def format_batch(self, pa_table: pa.Table) -> pd.DataFrame: row = self.pandas_arrow_extractor().extract_batch(pa_table) row = self.pandas_features_decoder.decode_batch(row) return row class CustomFormatter(Formatter[dict, ColumnFormat, dict]): """ A user-defined custom formatter function defined by a ``transform``. The transform must take as input a batch of data extracted for an arrow table using the python extractor, and return a batch. If the output batch is not a dict, then output_all_columns won't work. If the ouput batch has several fields, then querying a single column won't work since we don't know which field to return. """ def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs): super().__init__(features=features) self.transform = transform def format_row(self, pa_table: pa.Table) -> dict: formatted_batch = self.format_batch(pa_table) try: return _unnest(formatted_batch) except Exception as exc: raise TypeError( f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}" ) from exc def format_column(self, pa_table: pa.Table) -> ColumnFormat: formatted_batch = self.format_batch(pa_table) if hasattr(formatted_batch, "keys"): if len(formatted_batch.keys()) > 1: raise TypeError( "Tried to query a column but the custom formatting function returns too many columns. " f"Only one column was expected but got columns {list(formatted_batch.keys())}." ) else: raise TypeError( f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" ) try: return formatted_batch[pa_table.column_names[0]] except Exception as exc: raise TypeError( f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" ) from exc def format_batch(self, pa_table: pa.Table) -> dict: batch = self.python_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) return self.transform(batch) def _check_valid_column_key(key: str, columns: List[str]) -> None: if key not in columns: raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}") def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: if isinstance(key, int): if (key < 0 and key + size < 0) or (key >= size): raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") return elif isinstance(key, slice): pass elif isinstance(key, range): if len(key) > 0: _check_valid_index_key(max(key), size=size) _check_valid_index_key(min(key), size=size) elif isinstance(key, Iterable): if len(key) > 0: _check_valid_index_key(int(max(key)), size=size) _check_valid_index_key(int(min(key)), size=size) else: _raise_bad_key_type(key) def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: if isinstance(key, int): return "row" elif isinstance(key, str): return "column" elif isinstance(key, (slice, range, Iterable)): return "batch" _raise_bad_key_type(key) def query_table( table: Table, key: Union[int, slice, range, str, Iterable], indices: Optional[Table] = None, ) -> pa.Table: """ Query a Table to extract the subtable that correspond to the given key. Args: table (``datasets.table.Table``): The input Table to query from key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: - an integer i: the subtable containing only the i-th row - a slice [i:j:k]: the subtable containing the rows that correspond to this slice - a range(i, j, k): the subtable containing the rows that correspond to this range - a string c: the subtable containing all the rows but only the column c - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. The indices table must contain one column named "indices" of type uint64. This is used in case of shuffling or rows selection. Returns: ``pyarrow.Table``: the result of the query on the input table """ # Check if key is valid if not isinstance(key, (int, slice, range, str, Iterable)): _raise_bad_key_type(key) if isinstance(key, str): _check_valid_column_key(key, table.column_names) else: size = indices.num_rows if indices is not None else table.num_rows _check_valid_index_key(key, size) # Query the main table if indices is None: pa_subtable = _query_table(table, key) else: pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) return pa_subtable def format_table( table: Table, key: Union[int, slice, range, str, Iterable], formatter: Formatter, format_columns: Optional[list] = None, output_all_columns=False, ): """ Format a Table depending on the key that was used and a Formatter object. Args: table (``datasets.table.Table``): The input Table to format key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats the table as either a row, a column or a batch. formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as PythonFormatter, NumpyFormatter, etc. format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the given formatter. Other columns are discarded (unless ``output_all_columns`` is True) output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used. Returns: A row, column or batch formatted object defined by the Formatter: - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column. - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column. - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column. - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column. - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column. """ if isinstance(table, Table): pa_table = table.table else: pa_table = table query_type = key_to_query_type(key) python_formatter = PythonFormatter(features=formatter.features) if format_columns is None: return formatter(pa_table, query_type=query_type) elif query_type == "column": if key in format_columns: return formatter(pa_table, query_type) else: return python_formatter(pa_table, query_type=query_type) else: pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns) formatted_output = formatter(pa_table_to_format, query_type=query_type) if output_all_columns: if isinstance(formatted_output, MutableMapping): pa_table_with_remaining_columns = pa_table.drop( col for col in pa_table.column_names if col in format_columns ) remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type) formatted_output.update(remaining_columns_dict) else: raise TypeError( f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}" ) return formatted_output
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/formatting/torch_formatter.py
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]): def __init__(self, features=None, **torch_tensor_kwargs): super().__init__(features=features) self.torch_tensor_kwargs = torch_tensor_kwargs import torch # noqa import torch at initialization def _consolidate(self, column): import torch if isinstance(column, list) and column: if all( isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(column) return column def _tensorize(self, value): import torch if isinstance(value, (str, bytes, type(None))): return value elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): return value.tolist() default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": torch.int64} elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": torch.float32} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): value = np.asarray(value) return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs}) def _recursive_tensorize(self, data_struct): import torch # support for torch, tf, jax etc. if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) elif isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "torch.Tensor": column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/formatting/__init__.py
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter logger = logging.get_logger(__name__) _FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} _FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} _FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} def _register_formatter( formatter_cls: type, format_type: Optional[str], aliases: Optional[List[str]] = None, ): """ Register a Formatter object using a name and optional aliases. This function must be used on a Formatter class. """ aliases = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _FORMAT_TYPES[format_type] = formatter_cls for alias in set(aliases + [format_type]): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _FORMAT_TYPES_ALIASES[alias] = format_type def _register_unavailable_formatter( unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None ): """ Register an unavailable Formatter object using a name and optional aliases. This function must be used on an Exception object that is raised when trying to get the unavailable formatter. """ aliases = aliases if aliases is not None else [] for alias in set(aliases + [format_type]): _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: """ Factory function to get a Formatter given its type name and keyword arguments. A formatter is an object that extracts and formats data from pyarrow table. It defines the formatting for rows, colums and batches. If the formatter for a given type name doesn't exist or is not available, an error is raised. """ format_type = get_format_type_from_alias(format_type) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**format_kwargs) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'" )
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/formatting/tf_formatter.py
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import tensorflow as tf class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): def __init__(self, features=None, **tf_tensor_kwargs): super().__init__(features=features) self.tf_tensor_kwargs = tf_tensor_kwargs import tensorflow as tf # noqa: F401 - import tf at initialization def _consolidate(self, column): import tensorflow as tf if isinstance(column, list) and column: if all( isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return tf.stack(column) elif all( isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype for x in column ): # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated return tf.ragged.stack(column) return column def _tensorize(self, value): import tensorflow as tf if value is None: return value default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": tf.int64} elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": tf.float32} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): value = np.asarray(value) return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) def _recursive_tensorize(self, data_struct): import tensorflow as tf # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) elif isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "tf.Tensor": column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/packaged_modules/__init__.py
import inspect import re from typing import Dict, List from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text from .webdataset import webdataset def _hash_python_lines(lines: List[str]) -> str: filtered_lines = [] for line in lines: line = re.sub(r"#.*", "", line) # remove comments if line: filtered_lines.append(line) full_str = "\n".join(filtered_lines) # Make a hash from all this code full_bytes = full_str.encode("utf-8") return insecure_hashlib.sha256(full_bytes).hexdigest() # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())), } # Used to infer the module to use based on the data files extensions _EXTENSION_TO_MODULE = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), ".tar": ("webdataset", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name _MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) for _module in _MODULE_TO_EXTENSIONS: _MODULE_TO_EXTENSIONS[_module].append(".zip")
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/generator/generator.py
from dataclasses import dataclass from typing import Callable, Optional import datasets @dataclass class GeneratorConfig(datasets.BuilderConfig): generator: Optional[Callable] = None gen_kwargs: Optional[dict] = None features: Optional[datasets.Features] = None def __post_init__(self): assert self.generator is not None, "generator must be specified" if self.gen_kwargs is None: self.gen_kwargs = {} class Generator(datasets.GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = GeneratorConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)] def _generate_examples(self, **gen_kwargs): for idx, ex in enumerate(self.config.generator(**gen_kwargs)): yield idx, ex
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/arrow/arrow.py
import itertools from dataclasses import dataclass from typing import Optional import pyarrow as pa import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ArrowConfig(datasets.BuilderConfig): """BuilderConfig for Arrow.""" features: Optional[datasets.Features] = None class Arrow(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = ArrowConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(files): with open(file, "rb") as f: self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema) break splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.info.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: try: for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/csv/csv.py
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal logger = datasets.utils.logging.get_logger(__name__) _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"] _PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"] _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"] @dataclass class CsvConfig(datasets.BuilderConfig): """BuilderConfig for CSV.""" sep: str = "," delimiter: Optional[str] = None header: Optional[Union[int, List[int], str]] = "infer" names: Optional[List[str]] = None column_names: Optional[List[str]] = None index_col: Optional[Union[int, str, List[int], List[str]]] = None usecols: Optional[Union[List[int], List[str]]] = None prefix: Optional[str] = None mangle_dupe_cols: bool = True engine: Optional[Literal["c", "python", "pyarrow"]] = None converters: Dict[Union[int, str], Callable[[Any], Any]] = None true_values: Optional[list] = None false_values: Optional[list] = None skipinitialspace: bool = False skiprows: Optional[Union[int, List[int]]] = None nrows: Optional[int] = None na_values: Optional[Union[str, List[str]]] = None keep_default_na: bool = True na_filter: bool = True verbose: bool = False skip_blank_lines: bool = True thousands: Optional[str] = None decimal: str = "." lineterminator: Optional[str] = None quotechar: str = '"' quoting: int = 0 escapechar: Optional[str] = None comment: Optional[str] = None encoding: Optional[str] = None dialect: Optional[str] = None error_bad_lines: bool = True warn_bad_lines: bool = True skipfooter: int = 0 doublequote: bool = True memory_map: bool = False float_precision: Optional[str] = None chunksize: int = 10_000 features: Optional[datasets.Features] = None encoding_errors: Optional[str] = "strict" on_bad_lines: Literal["error", "warn", "skip"] = "error" date_format: Optional[str] = None def __post_init__(self): if self.delimiter is not None: self.sep = self.delimiter if self.column_names is not None: self.names = self.column_names @property def pd_read_csv_kwargs(self): pd_read_csv_kwargs = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class Csv(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = CsvConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self, files): schema = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str dtype = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(files)): csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(csv_file_reader): pa_table = pa.Table.from_pandas(df) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/spark/spark.py
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int logger = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class SparkConfig(datasets.BuilderConfig): """BuilderConfig for Spark.""" features: Optional[datasets.Features] = None def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") for partition_id in new_partition_order[1:]: partition_df = df.select("*").where(f"part_id = {partition_id}") df_combined = df_combined.union(partition_df) return df_combined def _generate_iterable_examples( df: "pyspark.sql.DataFrame", partition_order: List[int], ): import pyspark def generate_fn(): df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order) row_id = 0 # pipeline next partition in parallel to hide latency rows = partition_df.toLocalIterator(prefetchPartitions=True) curr_partition = -1 for row in rows: row_as_dict = row.asDict() part_id = row_as_dict["part_id"] row_as_dict.pop("part_id") if curr_partition != part_id: curr_partition = part_id row_id = 0 yield f"{part_id}_{row_id}", row_as_dict row_id += 1 return generate_fn class SparkExamplesIterable(_BaseExamplesIterable): def __init__( self, df: "pyspark.sql.DataFrame", partition_order=None, ): self.df = df self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order) def __iter__(self): yield from self.generate_examples_fn() def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable": partition_order = list(range(self.df.rdd.getNumPartitions())) generator.shuffle(partition_order) return SparkExamplesIterable(self.df, partition_order=partition_order) def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable": partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) return SparkExamplesIterable(self.df, partition_order=partition_order) @property def n_shards(self) -> int: return len(self.partition_order) class Spark(datasets.DatasetBuilder): BUILDER_CONFIG_CLASS = SparkConfig def __init__( self, df: "pyspark.sql.DataFrame", cache_dir: str = None, working_dir: str = None, **config_kwargs, ): import pyspark self._spark = pyspark.sql.SparkSession.builder.getOrCreate() self.df = df self._working_dir = working_dir super().__init__( cache_dir=cache_dir, config_name=str(self.df.semanticHash()), **config_kwargs, ) def _validate_cache_dir(self): # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling # error due to pickling the SparkContext. cache_dir = self._cache_dir # Returns the path of the created file. def create_cache_and_write_probe(context): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(cache_dir, exist_ok=True) probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(probe_file, "a") return [probe_file] if self._spark.conf.get("spark.master", "").startswith("local"): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: probe = ( self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() ) if os.path.isfile(probe[0]): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] def _repartition_df_if_needed(self, max_shard_size): import pyspark def get_arrow_batch_size(it): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) df_num_rows = self.df.count() sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. approx_bytes_per_row = ( self.df.limit(sample_num_rows) .repartition(1) .mapInArrow(get_arrow_batch_size, "batch_bytes: long") .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes")) .collect()[0] .sample_bytes / sample_num_rows ) approx_total_size = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) self.df = self.df.repartition(new_num_partitions) def _prepare_split_single( self, fpath: str, file_format: str, max_shard_size: int, ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath embed_local_files = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. features = self.config.features writer_batch_size = self._writer_batch_size storage_options = self._fs.storage_options def write_arrow(it): # Within the same SparkContext, no two task attempts will share the same attempt ID. task_id = pyspark.TaskContext().taskAttemptId() first_batch = next(it, None) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]], names=["task_id", "num_examples", "num_bytes"], ) shard_id = 0 writer = writer_class( features=features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([first_batch]) writer.write_table(table) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) shard_id += 1 writer = writer_class( features=writer._features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([batch]) writer.write_table(table) if writer._num_bytes > 0: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(working_fpath)): dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) shutil.move(file, dest) stats = ( self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long") .groupBy("task_id") .agg( pyspark.sql.functions.sum("num_examples").alias("total_num_examples"), pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"), pyspark.sql.functions.count("num_bytes").alias("num_shards"), pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"), ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _prepare_split( self, split_generator: "datasets.SplitGenerator", file_format: str = "arrow", max_shard_size: Optional[Union[str, int]] = None, num_proc: Optional[int] = None, **kwargs, ): self._validate_cache_dir() max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) self._repartition_df_if_needed(max_shard_size) is_local = not is_remote_filesystem(self._fs) path_join = os.path.join if is_local else posixpath.join SUFFIX = "-TTTTT-SSSSS-of-NNNNN" fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" fpath = path_join(self._output_dir, fname) total_num_examples = 0 total_num_bytes = 0 total_shards = 0 task_id_and_num_shards = [] all_shard_lengths = [] for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size): ( num_examples, num_bytes, num_shards, shard_lengths, ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards)) all_shard_lengths.extend(shard_lengths) split_generator.split_info.num_examples = total_num_examples split_generator.split_info.num_bytes = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards.") if total_shards > 1: split_generator.split_info.shard_lengths = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. fs = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( task_id: int, shard_id: int, global_shard_id: int, ): rename( fs, fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), ) args = [] global_shard_id = 0 for i in range(len(task_id_and_num_shards)): task_id, num_shards = task_id_and_num_shards[i] for shard_id in range(num_shards): args.append([task_id, shard_id, global_shard_id]) global_shard_id += 1 self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() else: # don't use any pattern shard_id = 0 task_id = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace(SUFFIX, ""), ) def _get_examples_iterable_for_split( self, split_generator: "datasets.SplitGenerator", ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df)
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/parquet/parquet.py
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ParquetConfig(datasets.BuilderConfig): """BuilderConfig for Parquet.""" batch_size: int = 10_000 columns: Optional[List[str]] = None features: Optional[datasets.Features] = None class Parquet(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = ParquetConfig def _info(self): if ( self.config.columns is not None and self.config.features is not None and set(self.config.columns) != set(self.config.features) ): raise ValueError( "The columns and features argument must contain the same columns, but got ", f"{self.config.columns} and {self.config.features}", ) return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] # Infer features if they are stored in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(files): with open(file, "rb") as f: self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f)) break splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) if self.config.columns is not None and set(self.config.columns) != set(self.info.features): self.info.features = datasets.Features( {col: feat for col, feat in self.info.features.items() if col in self.config.columns} ) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.info.features.arrow_schema) return pa_table def _generate_tables(self, files): if self.config.features is not None and self.config.columns is not None: if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns): raise ValueError( f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" ) for file_idx, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: parquet_file = pq.ParquetFile(f) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns) ): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py
from typing import List import datasets from datasets.tasks import ImageClassification from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: bool = None class ImageFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Image BASE_COLUMN_NAME = "image" BUILDER_CONFIG_CLASS = ImageFolderConfig EXTENSIONS: List[str] # definition at the bottom of the script CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label") # Obtained with: # ``` # import PIL.Image # IMAGE_EXTENSIONS = [] # PIL.Image.init() # for ext, format in PIL.Image.EXTENSION.items(): # if format in PIL.Image.OPEN: # IMAGE_EXTENSIONS.append(ext[1:]) # ``` # We intentionally do not run this code on launch because: # (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic IMAGE_EXTENSIONS = [ ".blp", ".bmp", ".dib", ".bufr", ".cur", ".pcx", ".dcx", ".dds", ".ps", ".eps", ".fit", ".fits", ".fli", ".flc", ".ftc", ".ftu", ".gbr", ".gif", ".grib", ".h5", ".hdf", ".png", ".apng", ".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c", ".icns", ".ico", ".im", ".iim", ".tif", ".tiff", ".jfif", ".jpe", ".jpg", ".jpeg", ".mpg", ".mpeg", ".msp", ".pcd", ".pxr", ".pbm", ".pgm", ".ppm", ".pnm", ".psd", ".bw", ".rgb", ".rgba", ".sgi", ".ras", ".tga", ".icb", ".vda", ".vst", ".webp", ".wmf", ".emf", ".xbm", ".xpm", ] ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
import collections import itertools import os from dataclasses import dataclass from typing import List, Optional, Tuple, Type import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets from datasets.features.features import FeatureType from datasets.tasks.base import TaskTemplate logger = datasets.utils.logging.get_logger(__name__) def count_path_segments(path): return path.replace("\\", "/").count("/") @dataclass class FolderBasedBuilderConfig(datasets.BuilderConfig): """BuilderConfig for AutoFolder.""" features: Optional[datasets.Features] = None drop_labels: bool = None drop_metadata: bool = None class FolderBasedBuilder(datasets.GeneratorBasedBuilder): """ Base class for generic data loaders for vision and image data. Abstract class attributes to be overridden by a child class: BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files will be included in a dataset) CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure """ BASE_FEATURE: Type[FeatureType] BASE_COLUMN_NAME: str BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig EXTENSIONS: List[str] CLASSIFICATION_TASK: TaskTemplate METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") # Do an early pass if: # * `drop_labels` is None (default) or False, to infer the class labels # * `drop_metadata` is None (default) or False, to find the metadata files do_analyze = not self.config.drop_labels or not self.config.drop_metadata labels, path_depths = set(), set() metadata_files = collections.defaultdict(set) def analyze(files_or_archives, downloaded_files_or_dirs, split): if len(downloaded_files_or_dirs) == 0: return # The files are separated from the archives at this point, so check the first sample # to see if it's a file or a directory and iterate accordingly if os.path.isfile(downloaded_files_or_dirs[0]): original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs for original_file, downloaded_file in zip(original_files, downloaded_files): original_file, downloaded_file = str(original_file), str(downloaded_file) _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(original_file))) path_depths.add(count_path_segments(original_file)) elif os.path.basename(original_file) in self.METADATA_FILENAMES: metadata_files[split].add((original_file, downloaded_file)) else: original_file_name = os.path.basename(original_file) logger.debug( f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." ) else: archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs for archive, downloaded_dir in zip(archives, downloaded_dirs): archive, downloaded_dir = str(archive), str(downloaded_dir) for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) path_depths.add(count_path_segments(downloaded_dir_file)) elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: metadata_files[split].add((None, downloaded_dir_file)) else: archive_file_name = os.path.basename(archive) original_file_name = os.path.basename(downloaded_dir_file) logger.debug( f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." ) data_files = self.config.data_files splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files, archives = self._split_files_and_archives(files) downloaded_files = dl_manager.download(files) downloaded_dirs = dl_manager.download_and_extract(archives) if do_analyze: # drop_metadata is None or False, drop_labels is None or False logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") analyze(files, downloaded_files, split_name) analyze(archives, downloaded_dirs, split_name) if metadata_files: # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False add_metadata = not self.config.drop_metadata # if `metadata_files` are found, add labels only if # `drop_labels` is set up to False explicitly (not-default behavior) add_labels = self.config.drop_labels is False else: # if `metadata_files` are not found, don't add metadata add_metadata = False # if `metadata_files` are not found and `drop_labels` is None (default) - # add labels if files are on the same level in directory hierarchy and there is more than one label add_labels = ( (len(labels) > 1 and len(path_depths) == 1) if self.config.drop_labels is None else not self.config.drop_labels ) if add_labels: logger.info("Adding the labels inferred from data directories to the dataset's features...") if add_metadata: logger.info("Adding metadata to the dataset...") else: add_labels, add_metadata, metadata_files = False, False, {} splits.append( datasets.SplitGenerator( name=split_name, gen_kwargs={ "files": list(zip(files, downloaded_files)) + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], "metadata_files": metadata_files, "split_name": split_name, "add_labels": add_labels, "add_metadata": add_metadata, }, ) ) if add_metadata: # Verify that: # * all metadata files have the same set of features # * the `file_name` key is one of the metadata keys and is of type string features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] # Check that all metadata files share the same format metadata_ext = { os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values()) } if len(metadata_ext) > 1: raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") metadata_ext = metadata_ext.pop() for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) features_per_metadata_file.append( (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) ) for downloaded_metadata_file, metadata_features in features_per_metadata_file: if metadata_features != features_per_metadata_file[0][1]: raise ValueError( f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" ) metadata_features = features_per_metadata_file[0][1] if "file_name" not in metadata_features: raise ValueError("`file_name` must be present as dictionary key in metadata files") if metadata_features["file_name"] != datasets.Value("string"): raise ValueError("`file_name` key must be a string") del metadata_features["file_name"] else: metadata_features = None # Normally, we would do this in _info, but we need to know the labels and/or metadata # before building the features if self.config.features is None: if add_labels: self.info.features = datasets.Features( { self.BASE_COLUMN_NAME: self.BASE_FEATURE(), "label": datasets.ClassLabel(names=sorted(labels)), } ) self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] else: self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) if add_metadata: # Warn if there are duplicated keys in metadata compared to the existing features # (`BASE_COLUMN_NAME`, optionally "label") duplicated_keys = set(self.info.features) & set(metadata_features) if duplicated_keys: logger.warning( f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " f"the features dictionary." ) # skip metadata duplicated keys self.info.features.update( { feature: metadata_features[feature] for feature in metadata_features if feature not in duplicated_keys } ) return splits def _split_files_and_archives(self, data_files): files, archives = [], [] for data_file in data_files: _, data_file_ext = os.path.splitext(data_file) if data_file_ext.lower() in self.EXTENSIONS: files.append(data_file) elif os.path.basename(data_file) in self.METADATA_FILENAMES: files.append(data_file) else: archives.append(data_file) return files, archives def _read_metadata(self, metadata_file, metadata_ext: str = ""): if metadata_ext == ".csv": # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module return pa.Table.from_pandas(pd.read_csv(metadata_file)) else: with open(metadata_file, "rb") as f: return paj.read_json(f) def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): split_metadata_files = metadata_files.get(split_name, []) sample_empty_metadata = ( {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} ) last_checked_dir = None metadata_dir = None metadata_dict = None downloaded_metadata_file = None metadata_ext = "" if split_metadata_files: metadata_ext = { os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files } metadata_ext = metadata_ext.pop() file_idx = 0 for original_file, downloaded_file_or_dir in files: if original_file is not None: _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if add_metadata: # If the file is a file of a needed type, and we've just entered a new directory, # find the nereast metadata file (by counting path segments) for the directory current_dir = os.path.dirname(original_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is not None # ignore metadata_files that are inside archives and not os.path.relpath( original_file, os.path.dirname(metadata_file_candidate) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata( downloaded_metadata_file, metadata_ext=metadata_ext ) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) if metadata_dir is not None and downloaded_metadata_file is not None: file_relpath = os.path.relpath(original_file, metadata_dir) file_relpath = file_relpath.replace("\\", "/") if file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[file_relpath] else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(original_file))} else: sample_label = {} yield ( file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_file_or_dir, **sample_metadata, **sample_label, }, ) file_idx += 1 else: for downloaded_dir_file in downloaded_file_or_dir: _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext.lower() in self.EXTENSIONS: if add_metadata: current_dir = os.path.dirname(downloaded_dir_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is None # ignore metadata_files that are not inside archives and not os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata( downloaded_metadata_file, metadata_ext=metadata_ext ) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(downloaded_metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) if metadata_dir is not None and downloaded_metadata_file is not None: downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") if downloaded_dir_file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[downloaded_dir_file_relpath] else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} else: sample_label = {} yield ( file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_dir_file, **sample_metadata, **sample_label, }, ) file_idx += 1
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/webdataset/webdataset.py
import io import json from itertools import islice from typing import Any, Callable, Dict, List import numpy as np import pyarrow as pa import datasets logger = datasets.utils.logging.get_logger(__name__) class WebDataset(datasets.GeneratorBasedBuilder): DEFAULT_WRITER_BATCH_SIZE = 100 IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5 @classmethod def _get_pipeline_from_tar(cls, tar_path, tar_iterator): current_example = {} for filename, f in tar_iterator: if "." in filename: example_key, field_name = filename.split(".", 1) if current_example and current_example["__key__"] != example_key: yield current_example current_example = {} current_example["__key__"] = example_key current_example["__url__"] = tar_path current_example[field_name.lower()] = f.read() if field_name in cls.DECODERS: current_example[field_name] = cls.DECODERS[field_name](current_example[field_name]) if current_example: yield current_example def _info(self) -> datasets.DatasetInfo: return datasets.DatasetInfo() def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" # Download the data files if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download(self.config.data_files) if isinstance(data_files, (str, list, tuple)): tar_paths = data_files if isinstance(tar_paths, str): tar_paths = [tar_paths] tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths] splits = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators} ) ] else: splits = [] for split_name, tar_paths in data_files.items(): if isinstance(tar_paths, str): tar_paths = [tar_paths] tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths] splits.append( datasets.SplitGenerator( name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators} ) ) # Get one example to get the feature types pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0]) first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE)) if any(example.keys() != first_examples[0].keys() for example in first_examples): raise ValueError( "The TAR archives of the dataset should be in WebDataset format, " "but the files in the archive don't share the same prefix or the same types." ) inferred_arrow_schema = pa.Table.from_pylist(first_examples[:1]).schema features = datasets.Features.from_arrow_schema(inferred_arrow_schema) # Set Image types for field_name in first_examples[0]: extension = field_name.rsplit(".", 1)[-1] if extension in self.IMAGE_EXTENSIONS: features[field_name] = datasets.Image() self.info.features = features return splits def _generate_examples(self, tar_paths, tar_iterators): image_field_names = [ field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image) ] for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)): for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)): for field_name in image_field_names: example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]} yield f"{tar_idx}_{example_idx}", example # Obtained with: # ``` # import PIL.Image # IMAGE_EXTENSIONS = [] # PIL.Image.init() # for ext, format in PIL.Image.EXTENSION.items(): # if format in PIL.Image.OPEN: # IMAGE_EXTENSIONS.append(ext[1:]) # ``` # We intentionally do not run this code on launch because: # (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic IMAGE_EXTENSIONS = [ "blp", "bmp", "dib", "bufr", "cur", "pcx", "dcx", "dds", "ps", "eps", "fit", "fits", "fli", "flc", "ftc", "ftu", "gbr", "gif", "grib", "h5", "hdf", "png", "apng", "jp2", "j2k", "jpc", "jpf", "jpx", "j2c", "icns", "ico", "im", "iim", "tif", "tiff", "jfif", "jpe", "jpg", "jpeg", "mpg", "mpeg", "msp", "pcd", "pxr", "pbm", "pgm", "ppm", "pnm", "psd", "bw", "rgb", "rgba", "sgi", "ras", "tga", "icb", "vda", "vst", "webp", "wmf", "emf", "xbm", "xpm", ] WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS def text_loads(data: bytes): return data.decode("utf-8") def tenbin_loads(data: bytes): from . import _tenbin return _tenbin.decode_buffer(data) def msgpack_loads(data: bytes): import msgpack return msgpack.unpackb(data) def npy_loads(data: bytes): import numpy.lib.format stream = io.BytesIO(data) return numpy.lib.format.read_array(stream, allow_pickle=False) def npz_loads(data: bytes): return np.load(io.BytesIO(data), allow_pickle=False) def cbor_loads(data: bytes): import cbor return cbor.loads(data) # Obtained by checking `decoders` in `webdataset.autodecode` # and removing unsafe extension decoders. # Removed Pickle decoders: # - "pyd": lambda data: pickle.loads(data) # - "pickle": lambda data: pickle.loads(data) # Removed Torch decoders: # - "pth": lambda data: torch_loads(data) # Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False): # - "npy": npy_loads, # - "npz": lambda data: np.load(io.BytesIO(data)), DECODERS = { "txt": text_loads, "text": text_loads, "transcript": text_loads, "cls": int, "cls2": int, "index": int, "inx": int, "id": int, "json": json.loads, "jsn": json.loads, "ten": tenbin_loads, "tb": tenbin_loads, "mp": msgpack_loads, "msg": msgpack_loads, "npy": npy_loads, "npz": npz_loads, "cbor": cbor_loads, } WebDataset.DECODERS = DECODERS
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/webdataset/_tenbin.py
# # Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved. # This file coems from the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). # """ Binary tensor encodings for PyTorch and NumPy. This defines efficient binary encodings for tensors. The format is 8 byte aligned and can be used directly for computations when transmitted, say, via RDMA. The format is supported by WebDataset with the `.ten` filename extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used for fast tensor storage with LMDB and in disk files (which can be memory mapped) Data is encoded as a series of chunks: - magic number (int64) - length in bytes (int64) - bytes (multiple of 64 bytes long) Arrays are a header chunk followed by a data chunk. Header chunks have the following structure: - dtype (int64) - 8 byte array name - ndim (int64) - dim[0] - dim[1] - ... """ import struct import sys import numpy as np def bytelen(a): """Determine the length of a in bytes.""" if hasattr(a, "nbytes"): return a.nbytes elif isinstance(a, (bytearray, bytes)): return len(a) else: raise ValueError(a, "cannot determine nbytes") def bytedata(a): """Return a the raw data corresponding to a.""" if isinstance(a, (bytearray, bytes, memoryview)): return a elif hasattr(a, "data"): return a.data else: raise ValueError(a, "cannot return bytedata") # tables for converting between long/short NumPy dtypes long_to_short = """ float16 f2 float32 f4 float64 f8 int8 i1 int16 i2 int32 i4 int64 i8 uint8 u1 uint16 u2 unit32 u4 uint64 u8 """.strip() long_to_short = [x.split() for x in long_to_short.split("\n")] long_to_short = {x[0]: x[1] for x in long_to_short} short_to_long = {v: k for k, v in long_to_short.items()} def check_acceptable_input_type(data, allow64): """Check that the data has an acceptable type for tensor encoding. :param data: array :param allow64: allow 64 bit types """ for a in data: if a.dtype.name not in long_to_short: raise ValueError("unsupported dataypte") if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]: raise ValueError("64 bit datatypes not allowed unless explicitly enabled") def str64(s): """Convert a string to an int64.""" s = s + "\0" * (8 - len(s)) s = s.encode("ascii") return struct.unpack("@q", s)[0] def unstr64(i): """Convert an int64 to a string.""" b = struct.pack("@q", i) return b.decode("ascii").strip("\0") def check_infos(data, infos, required_infos=None): """Verify the info strings.""" if required_infos is False or required_infos is None: return data if required_infos is True: return data, infos if not isinstance(required_infos, (tuple, list)): raise ValueError("required_infos must be tuple or list") for required, actual in zip(required_infos, infos): raise ValueError(f"actual info {actual} doesn't match required info {required}") return data def encode_header(a, info=""): """Encode an array header as a byte array.""" if a.ndim >= 10: raise ValueError("too many dimensions") if a.nbytes != np.prod(a.shape) * a.itemsize: raise ValueError("mismatch between size and shape") if a.dtype.name not in long_to_short: raise ValueError("unsupported array type") header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape) return bytedata(np.array(header, dtype="i8")) def decode_header(h): """Decode a byte array into an array header.""" h = np.frombuffer(h, dtype="i8") if unstr64(h[0]) not in short_to_long: raise ValueError("unsupported array type") dtype = np.dtype(short_to_long[unstr64(h[0])]) info = unstr64(h[1]) rank = int(h[2]) shape = tuple(h[3 : 3 + rank]) return shape, dtype, info def encode_list(l, infos=None): # noqa: E741 """Given a list of arrays, encode them into a list of byte arrays.""" if infos is None: infos = [""] else: if len(l) != len(infos): raise ValueError(f"length of list {l} must muatch length of infos {infos}") result = [] for i, a in enumerate(l): header = encode_header(a, infos[i % len(infos)]) result += [header, bytedata(a)] return result def decode_list(l, infos=False): # noqa: E741 """Given a list of byte arrays, decode them into arrays.""" result = [] infos0 = [] for header, data in zip(l[::2], l[1::2]): shape, dtype, info = decode_header(header) a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape) result += [a] infos0 += [info] return check_infos(result, infos0, infos) magic_str = "~TenBin~" magic = str64(magic_str) magic_bytes = unstr64(magic).encode("ascii") def roundup(n, k=64): """Round up to the next multiple of 64.""" return k * ((n + k - 1) // k) def encode_chunks(l): # noqa: E741 """Encode a list of chunks into a single byte array, with lengths and magics..""" size = sum(16 + roundup(b.nbytes) for b in l) result = bytearray(size) offset = 0 for b in l: result[offset : offset + 8] = magic_bytes offset += 8 result[offset : offset + 8] = struct.pack("@q", b.nbytes) offset += 8 result[offset : offset + bytelen(b)] = b offset += roundup(bytelen(b)) return result def decode_chunks(buf): """Decode a byte array into a list of chunks.""" result = [] offset = 0 total = bytelen(buf) while offset < total: if magic_bytes != buf[offset : offset + 8]: raise ValueError("magic bytes mismatch") offset += 8 nbytes = struct.unpack("@q", buf[offset : offset + 8])[0] offset += 8 b = buf[offset : offset + nbytes] offset += roundup(nbytes) result.append(b) return result def encode_buffer(l, infos=None): # noqa: E741 """Encode a list of arrays into a single byte array.""" if not isinstance(l, list): raise ValueError("requires list") return encode_chunks(encode_list(l, infos=infos)) def decode_buffer(buf, infos=False): """Decode a byte array into a list of arrays.""" return decode_list(decode_chunks(buf), infos=infos) def write_chunk(stream, buf): """Write a byte chunk to the stream with magics, length, and padding.""" nbytes = bytelen(buf) stream.write(magic_bytes) stream.write(struct.pack("@q", nbytes)) stream.write(bytedata(buf)) padding = roundup(nbytes) - nbytes if padding > 0: stream.write(b"\0" * padding) def read_chunk(stream): """Read a byte chunk from a stream with magics, length, and padding.""" magic = stream.read(8) if magic == b"": return None if magic != magic_bytes: raise ValueError("magic number does not match") nbytes = stream.read(8) nbytes = struct.unpack("@q", nbytes)[0] if nbytes < 0: raise ValueError("negative nbytes") data = stream.read(nbytes) padding = roundup(nbytes) - nbytes if padding > 0: stream.read(padding) return data def write(stream, l, infos=None): # noqa: E741 """Write a list of arrays to a stream, with magics, length, and padding.""" for chunk in encode_list(l, infos=infos): write_chunk(stream, chunk) def read(stream, n=sys.maxsize, infos=False): """Read a list of arrays from a stream, with magics, length, and padding.""" chunks = [] for _ in range(n): header = read_chunk(stream) if header is None: break data = read_chunk(stream) if data is None: raise ValueError("premature EOF") chunks += [header, data] return decode_list(chunks, infos=infos) def save(fname, *args, infos=None, nocheck=False): """Save a list of arrays to a file, with magics, length, and padding.""" if not nocheck and not fname.endswith(".ten"): raise ValueError("file name should end in .ten") with open(fname, "wb") as stream: write(stream, args, infos=infos) def load(fname, infos=False, nocheck=False): """Read a list of arrays from a file, with magics, length, and padding.""" if not nocheck and not fname.endswith(".ten"): raise ValueError("file name should end in .ten") with open(fname, "rb") as stream: return read(stream, infos=infos)
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/json/json.py
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline logger = datasets.utils.logging.get_logger(__name__) @dataclass class JsonConfig(datasets.BuilderConfig): """BuilderConfig for JSON.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" encoding_errors: Optional[str] = None field: Optional[str] = None use_threads: bool = True # deprecated block_size: Optional[int] = None # deprecated chunksize: int = 10 << 20 # 10MB newlines_in_values: Optional[bool] = None class Json(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = JsonConfig def _info(self): if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead") self.config.chunksize = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported") return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): type = self.config.features.arrow_schema.field(column_name).type pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: dataset = json.load(f) # We keep only the field we are interested in dataset = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(dataset, (list, tuple)): keys = set().union(*[row.keys() for row in dataset]) mapping = {col: [row.get(col) for row in dataset] for col in keys} else: mapping = dataset pa_table = pa.Table.from_pydict(mapping) yield file_idx, self._cast_table(pa_table) # If the file has one json object per line else: with open(file, "rb") as f: batch_idx = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small block_size = max(self.config.chunksize // 32, 16 << 10) encoding_errors = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: batch = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(f) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8") try: while True: try: pa_table = paj.read_json( io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(e, pa.ArrowInvalid) and "straddling" not in str(e) or block_size > len(batch) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( file, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f: dataset = json.load(f) except json.JSONDecodeError: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(dataset, list): # list is the only sequence type supported in JSON try: keys = set().union(*[row.keys() for row in dataset]) mapping = {col: [row.get(col) for row in dataset] for col in keys} pa_table = pa.Table.from_pydict(mapping) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise ValueError(f"Not able to read records in the JSON file at {file}.") from None yield file_idx, self._cast_table(pa_table) break else: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise ValueError( f"Not able to read records in the JSON file at {file}. " f"You should probably indicate the field of the JSON file containing your records. " f"This JSON file contain the following fields: {str(list(dataset.keys()))}. " f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None class AudioFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Audio BASE_COLUMN_NAME = "audio" BUILDER_CONFIG_CLASS = AudioFolderConfig EXTENSIONS: List[str] # definition at the bottom of the script CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label") # Obtained with: # ``` # import soundfile as sf # # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] # # # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30: # AUDIO_EXTENSIONS.extend([".mp3", ".opus"]) # ``` # We intentionally do not run this code on launch because: # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic AUDIO_EXTENSIONS = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/sql/sql.py
import sys from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast if TYPE_CHECKING: import sqlite3 import sqlalchemy logger = datasets.utils.logging.get_logger(__name__) @dataclass class SqlConfig(datasets.BuilderConfig): """BuilderConfig for SQL.""" sql: Union[str, "sqlalchemy.sql.Selectable"] = None con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None index_col: Optional[Union[str, List[str]]] = None coerce_float: bool = True params: Optional[Union[List, Tuple, Dict]] = None parse_dates: Optional[Union[List, Dict]] = None columns: Optional[List[str]] = None chunksize: Optional[int] = 10_000 features: Optional[datasets.Features] = None def __post_init__(self): if self.sql is None: raise ValueError("sql must be specified") if self.con is None: raise ValueError("con must be specified") def create_config_id( self, config_kwargs: dict, custom_features: Optional[datasets.Features] = None, ) -> str: config_kwargs = config_kwargs.copy() # We need to stringify the Selectable object to make its hash deterministic # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html sql = config_kwargs["sql"] if not isinstance(sql, str): if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules: import sqlalchemy if isinstance(sql, sqlalchemy.sql.Selectable): engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://") sql_str = str(sql.compile(dialect=engine.dialect)) config_kwargs["sql"] = sql_str else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) con = config_kwargs["con"] if not isinstance(con, str): config_kwargs["con"] = id(con) logger.info( f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead." ) return super().create_config_id(config_kwargs, custom_features=custom_features) @property def pd_read_sql_kwargs(self): pd_read_sql_kwargs = { "index_col": self.index_col, "columns": self.columns, "params": self.params, "coerce_float": self.coerce_float, "parse_dates": self.parse_dates, } return pd_read_sql_kwargs class Sql(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = SqlConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self): chunksize = self.config.chunksize sql_reader = pd.read_sql( self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs ) sql_reader = [sql_reader] if chunksize is None else sql_reader for chunk_idx, df in enumerate(sql_reader): pa_table = pa.Table.from_pandas(df) yield chunk_idx, self._cast_table(pa_table)
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/text/text.py
import itertools import warnings from dataclasses import InitVar, dataclass from io import StringIO from typing import Optional import pyarrow as pa import datasets from datasets.features.features import require_storage_cast from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class TextConfig(datasets.BuilderConfig): """BuilderConfig for text files.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" errors: InitVar[Optional[str]] = "deprecated" encoding_errors: Optional[str] = None chunksize: int = 10 << 20 # 10MB keep_linebreaks: bool = False sample_by: str = "line" def __post_init__(self, errors): if errors != "deprecated": warnings.warn( "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'encoding_errors={errors}' instead.", FutureWarning, ) self.encoding_errors = errors class Text(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = TextConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. If str or List[str], then the dataset returns only the 'train' split. If dict, then keys should be from the `datasets.Split` enum. """ if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa_table.cast(schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table else: return pa_table.cast(pa.schema({"text": pa.string()})) def _generate_tables(self, files): pa_table_names = list(self.config.features) if self.config.features is not None else ["text"] for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n" with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: if self.config.sample_by == "line": batch_idx = 0 while True: batch = f.read(self.config.chunksize) if not batch: break batch += f.readline() # finish current line # StringIO.readlines, by default splits only on "\n" (and keeps line breaks) batch = StringIO(batch).readlines() if not self.config.keep_linebreaks: batch = [line.rstrip("\n") for line in batch] pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 elif self.config.sample_by == "paragraph": batch_idx = 0 batch = "" while True: new_batch = f.read(self.config.chunksize) if not new_batch: break batch += new_batch batch += f.readline() # finish current line batch = batch.split("\n\n") pa_table = pa.Table.from_arrays( [pa.array([example for example in batch[:-1] if example])], names=pa_table_names ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 batch = batch[-1] if batch: pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) yield (file_idx, batch_idx), self._cast_table(pa_table) elif self.config.sample_by == "document": text = f.read() pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) yield file_idx, self._cast_table(pa_table)
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/pandas/pandas.py
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class PandasConfig(datasets.BuilderConfig): """BuilderConfig for Pandas.""" features: Optional[datasets.Features] = None class Pandas(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = PandasConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for i, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: pa_table = pa.Table.from_pandas(pd.read_pickle(f)) yield i, self._cast_table(pa_table)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/spark.py
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class SparkDatasetReader(AbstractDatasetReader): """A dataset reader that reads from a Spark DataFrame. When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be provided. Streaming is not currently supported. """ def __init__( self, df: pyspark.sql.DataFrame, split: Optional[NamedSplit] = None, features: Optional[Features] = None, streaming: bool = True, cache_dir: str = None, keep_in_memory: bool = False, working_dir: str = None, load_from_cache_file: bool = True, file_format: str = "arrow", **kwargs, ): super().__init__( split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, **kwargs, ) self._load_from_cache_file = load_from_cache_file self._file_format = file_format self.builder = Spark( df=df, features=features, cache_dir=cache_dir, working_dir=working_dir, **kwargs, ) def read(self): if self.streaming: return self.builder.as_streaming_dataset(split=self.split) download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=download_mode, file_format=self._file_format, ) return self.builder.as_dataset(split=self.split)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/text.py
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class TextDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Text( cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/csv.py
import multiprocessing import os from typing import BinaryIO, Optional, Union from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.csv.csv import Csv from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class CsvDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Csv( cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class CsvDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_csv_kwargs, ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0.") self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE self.num_proc = num_proc self.encoding = "utf-8" self.to_csv_kwargs = to_csv_kwargs def write(self) -> int: _ = self.to_csv_kwargs.pop("path_or_buf", None) header = self.to_csv_kwargs.pop("header", True) index = self.to_csv_kwargs.pop("index", False) if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with open(self.path_or_buf, "wb+") as buffer: written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs) else: written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs) return written def _batch_csv(self, args): offset, header, index, to_csv_kwargs = args batch = query_table( table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices, ) csv_str = batch.to_pandas().to_csv( path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs ) return csv_str.encode(self.encoding) def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int: """Writes the pyarrow table as CSV to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 if self.num_proc is None or self.num_proc == 1: for offset in hf_tqdm( range(0, len(self.dataset), self.batch_size), unit="ba", desc="Creating CSV from Arrow format", ): csv_str = self._batch_csv((offset, header, index, to_csv_kwargs)) written += file_obj.write(csv_str) else: num_rows, batch_size = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for csv_str in hf_tqdm( pool.imap( self._batch_csv, [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", desc="Creating CSV from Arrow format", ): written += file_obj.write(csv_str) return written
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/sql.py
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import tqdm as hf_tqdm from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlite3 import sqlalchemy class SqlDatasetReader(AbstractDatasetInputStream): def __init__( self, sql: Union[str, "sqlalchemy.sql.Selectable"], con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs) self.builder = Sql( cache_dir=cache_dir, features=features, sql=sql, con=con, **kwargs, ) def read(self): download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, ) # Build dataset for splits dataset = self.builder.as_dataset( split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class SqlDatasetWriter: def __init__( self, dataset: Dataset, name: str, con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_sql_kwargs, ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0.") self.dataset = dataset self.name = name self.con = con self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE self.num_proc = num_proc self.to_sql_kwargs = to_sql_kwargs def write(self) -> int: _ = self.to_sql_kwargs.pop("sql", None) _ = self.to_sql_kwargs.pop("con", None) index = self.to_sql_kwargs.pop("index", False) written = self._write(index=index, **self.to_sql_kwargs) return written def _batch_sql(self, args): offset, index, to_sql_kwargs = args to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs batch = query_table( table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices, ) df = batch.to_pandas() num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs) return num_rows or len(df) def _write(self, index, **to_sql_kwargs) -> int: """Writes the pyarrow table as SQL to a database. Caller is responsible for opening and closing the SQL connection. """ written = 0 if self.num_proc is None or self.num_proc == 1: for offset in hf_tqdm( range(0, len(self.dataset), self.batch_size), unit="ba", desc="Creating SQL from Arrow format", ): written += self._batch_sql((offset, index, to_sql_kwargs)) else: num_rows, batch_size = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for num_rows in hf_tqdm( pool.imap( self._batch_sql, [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", desc="Creating SQL from Arrow format", ): written += num_rows return written
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/parquet.py
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def get_writer_batch_size(features: Features) -> Optional[int]: """ Get the writer_batch_size that defines the maximum row group size in the parquet files. The default in `datasets` is 1,000 but we lower it to 100 for image datasets. This allows to optimize random access to parquet file, since accessing 1 row requires to read its entire row group. This can be improved to get optimized size for querying/iterating but at least it matches the dataset viewer expectations on HF. Args: ds_config_info (`datasets.info.DatasetInfo`): Dataset info from `datasets`. Returns: writer_batch_size (`Optional[int]`): Writer batch size to pass to a dataset builder. If `None`, then it will use the `datasets` default. """ batch_size = np.inf def set_batch_size(feature: FeatureType) -> None: nonlocal batch_size if isinstance(feature, Image): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(feature, Audio): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(feature, Value) and feature.dtype == "binary": batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(features, set_batch_size) return None if batch_size is np.inf else batch_size class ParquetDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} hash = _PACKAGED_DATASETS_MODULES["parquet"][1] self.builder = Parquet( cache_dir=cache_dir, data_files=path_or_paths, features=features, hash=hash, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class ParquetDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, **parquet_writer_kwargs, ): self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size or get_writer_batch_size(dataset.features) self.parquet_writer_kwargs = parquet_writer_kwargs def write(self) -> int: batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with open(self.path_or_buf, "wb+") as buffer: written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs) else: written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs) return written def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int: """Writes the pyarrow table as Parquet to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 _ = parquet_writer_kwargs.pop("path_or_buf", None) schema = self.dataset.features.arrow_schema writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) for offset in hf_tqdm( range(0, len(self.dataset), batch_size), unit="ba", desc="Creating parquet from Arrow format", ): batch = query_table( table=self.dataset._data, key=slice(offset, offset + batch_size), indices=self.dataset._indices if self.dataset._indices is not None else None, ) writer.write_table(batch) written += batch.nbytes writer.close() return written
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/json.py
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class JsonDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, field: Optional[str] = None, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) self.field = field path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Json( cache_dir=cache_dir, data_files=path_or_paths, features=features, field=field, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class JsonDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_json_kwargs, ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0.") self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE self.num_proc = num_proc self.encoding = "utf-8" self.to_json_kwargs = to_json_kwargs def write(self) -> int: _ = self.to_json_kwargs.pop("path_or_buf", None) orient = self.to_json_kwargs.pop("orient", "records") lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False) if "index" not in self.to_json_kwargs and orient in ["split", "table"]: self.to_json_kwargs["index"] = False compression = self.to_json_kwargs.pop("compression", None) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f"`datasets` currently does not support {compression} compression") if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer: written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs) else: if compression: raise NotImplementedError( f"The compression parameter is not supported when writing to a buffer, but compression={compression}" " was passed. Please provide a local path instead." ) written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs) return written def _batch_json(self, args): offset, orient, lines, to_json_kwargs = args batch = query_table( table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices, ) json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs) if not json_str.endswith("\n"): json_str += "\n" return json_str.encode(self.encoding) def _write( self, file_obj: BinaryIO, orient, lines, **to_json_kwargs, ) -> int: """Writes the pyarrow table as JSON lines to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 if self.num_proc is None or self.num_proc == 1: for offset in hf_tqdm( range(0, len(self.dataset), self.batch_size), unit="ba", desc="Creating json from Arrow format", ): json_str = self._batch_json((offset, orient, lines, to_json_kwargs)) written += file_obj.write(json_str) else: num_rows, batch_size = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in hf_tqdm( pool.imap( self._batch_json, [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", desc="Creating json from Arrow format", ): written += file_obj.write(json_str) return written
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/abc.py
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class AbstractDatasetReader(ABC): def __init__( self, path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None, split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): self.path_or_paths = path_or_paths self.split = split if split or isinstance(path_or_paths, dict) else "train" self.features = features self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.streaming = streaming self.num_proc = num_proc self.kwargs = kwargs @abstractmethod def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class AbstractDatasetInputStream(ABC): def __init__( self, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): self.features = features self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.streaming = streaming self.num_proc = num_proc self.kwargs = kwargs @abstractmethod def read(self) -> Union[Dataset, IterableDataset]: pass
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/io/generator.py
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class GeneratorDatasetInputStream(AbstractDatasetInputStream): def __init__( self, generator: Callable, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, gen_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, **kwargs, ): super().__init__( features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) self.builder = Generator( cache_dir=cache_dir, features=features, generator=generator, gen_kwargs=gen_kwargs, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split="train") # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, try_from_hf_gcs=False, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/audio.py
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType @dataclass class Audio: """Audio [`Feature`] to extract audio data from an audio file. Input: The Audio feature accepts as input: - A `str`: Absolute path to the audio file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `bytes`: Bytes content of the audio file. This is useful for archived files with sequential access. - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `array`: Array containing the audio sample - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample. This is useful for archived files with sequential access. Args: sampling_rate (`int`, *optional*): Target sampling rate. If `None`, the native sampling rate is used. mono (`bool`, defaults to `True`): Whether to convert the audio signal to mono by averaging samples across channels. decode (`bool`, defaults to `True`): Whether to decode the audio data. If `False`, returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`. Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` """ sampling_rate: Optional[int] = None mono: bool = True decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Audio", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict]) -> dict: """Encode example into a format for Arrow. Args: value (`str` or `dict`): Data passed as input to Audio feature. Returns: `dict` """ try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'.") from err if isinstance(value, str): return {"bytes": None, "path": value} elif isinstance(value, bytes): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes buffer = BytesIO() sf.write(buffer, value["array"], value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm"): # "PCM" only has raw audio bytes if value.get("sampling_rate") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object") if value.get("bytes"): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767 else: bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767 buffer = BytesIO(bytes()) sf.write(buffer, bytes_value, value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example( self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: """Decode example audio file into audio data. Args: value (`dict`): A dictionary with keys: - `path`: String with relative audio file path. - `bytes`: Bytes of the audio file. token_per_repo_id (`dict`, *optional*): To access and decode audio files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`) Returns: `dict` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.") path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.") try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err audio_format = xsplitext(path)[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: token_per_repo_id = token_per_repo_id or {} source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id[repo_id] except (ValueError, KeyError): token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: array, sampling_rate = sf.read(f) else: array, sampling_rate = sf.read(file) array = array.T if self.mono: array = librosa.to_mono(array) if self.sampling_rate and self.sampling_rate != sampling_rate: array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate) sampling_rate = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.""" from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature.") return { "bytes": Value("binary"), "path": Value("string"), } def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray: """Cast an Arrow array to the Audio arrow storage type. The Arrow types that can be converted to the Audio pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the audio bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter Args: storage (`Union[pa.StringArray, pa.StructArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})` """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"): storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()]) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed audio files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ @no_op_if_value_is_null def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/translation.py
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`FeatureConnector` for translations with fixed languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to string translations. Example: ```python >>> # At construction time: >>> datasets.features.Translation(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': 'le chat', ... 'de': 'die katze' ... } ``` """ languages: List[str] id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="Translation", init=False, repr=False) def __call__(self): return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """Flatten the Translation feature into a dictionary.""" from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class TranslationVariableLanguages: """`FeatureConnector` for translations with variable languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to one or more string translations. The languages present may vary from example to example. Returns: - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`): Language codes sorted in ascending order or plain text translations, sorted to align with language codes. Example: ```python >>> # At construction time: >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': ['le chat', 'la chatte,'] ... 'de': 'die katze' ... } >>> # Tensor returned : >>> { ... 'language': ['en', 'de', 'fr', 'fr'], ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'], ... } ``` """ languages: Optional[List] = None num_languages: Optional[int] = None id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="TranslationVariableLanguages", init=False, repr=False) def __post_init__(self): self.languages = sorted(set(self.languages)) if self.languages else None self.num_languages = len(self.languages) if self.languages else None def __call__(self): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def encode_example(self, translation_dict): lang_set = set(self.languages) if self.languages and set(translation_dict) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. translation_tuples = [] for lang, text in translation_dict.items(): if isinstance(text, str): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. languages, translations = zip(*sorted(translation_tuples)) return {"language": languages, "translation": translations} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """Flatten the TranslationVariableLanguages feature into a dictionary.""" from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/image.py
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None _NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _VALID_IMAGE_ARRAY_DTPYES = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class Image: """Image [`Feature`] to read image data from an image file. Input: The Image feature accepts as input: - A `str`: Absolute path to the image file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the image file to the archive file. - `bytes`: Bytes of the image file. This is useful for archived files with sequential access. - An `np.ndarray`: NumPy array representing an image. - A `PIL.Image.Image`: PIL image object. Args: decode (`bool`, defaults to `True`): Whether to decode the image data. If `False`, returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`. Examples: ```py >>> from datasets import load_dataset, Image >>> ds = load_dataset("beans", split="train") >>> ds.features["image"] Image(decode=True, id=None) >>> ds[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0> >>> ds = ds.cast_column('image', Image(decode=False)) {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'} ``` """ decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "PIL.Image.Image" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Image", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`): Data passed as input to Image feature. Returns: `dict` with "path" and "bytes" fields """ if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if isinstance(value, list): value = np.array(value) if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, bytes): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(value) elif isinstance(value, PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image": """Decode example image file into image data. Args: value (`str` or `dict`): A string with the absolute image file path, a dictionary with keys: - `path`: String with absolute or relative image file path. - `bytes`: The bytes of the image file. token_per_repo_id (`dict`, *optional*): To access and decode image files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `PIL.Image.Image` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.") if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install 'Pillow'.") if token_per_repo_id is None: token_per_repo_id = {} path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): image = PIL.Image.open(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id.get(repo_id) except ValueError: token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: bytes_ = BytesIO(f.read()) image = PIL.Image.open(bytes_) else: image = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Image arrow storage type. The Arrow types that can be converted to the Image pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the image bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the image array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_list(storage.type): bytes_array = pa.array( [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays( [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed image files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ @no_op_if_value_is_null def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) def list_image_compression_formats() -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys())) return _IMAGE_COMPRESSION_FORMATS def image_to_bytes(image: "PIL.Image.Image") -> bytes: """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression.""" buffer = BytesIO() if image.format in list_image_compression_formats(): format = image.format else: format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(buffer, format=format) return buffer.getvalue() def encode_pil_image(image: "PIL.Image.Image") -> dict: if hasattr(image, "filename") and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(image)} def encode_np_array(array: np.ndarray) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") dtype = array.dtype dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER dtype_kind = dtype.kind dtype_itemsize = dtype.itemsize dest_dtype = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: if dtype_kind not in ["u", "i"]: raise TypeError( f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." ) dest_dtype = np.dtype("|u1") if dtype != dest_dtype: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize) if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = np.dtype(dtype_str) warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" ) image = PIL.Image.fromarray(array.astype(dest_dtype)) return {"path": None, "bytes": image_to_bytes(image)} def objects_to_list_of_image_dicts( objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`.""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if objs: _, obj = first_non_null_value(objs) if isinstance(obj, str): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(obj, np.ndarray): obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array) return [obj_to_image_dict_func(obj) for obj in objs] elif isinstance(obj, PIL.Image.Image): obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image) return [obj_to_image_dict_func(obj) for obj in objs] else: return objs else: return objs
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/features.py
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """ This class handle features definition in datasets and some utilities to display table type.""" import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1 from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages logger = logging.get_logger(__name__) def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str: """ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` """ if pyarrow.types.is_null(arrow_type): return "null" elif pyarrow.types.is_boolean(arrow_type): return "bool" elif pyarrow.types.is_int8(arrow_type): return "int8" elif pyarrow.types.is_int16(arrow_type): return "int16" elif pyarrow.types.is_int32(arrow_type): return "int32" elif pyarrow.types.is_int64(arrow_type): return "int64" elif pyarrow.types.is_uint8(arrow_type): return "uint8" elif pyarrow.types.is_uint16(arrow_type): return "uint16" elif pyarrow.types.is_uint32(arrow_type): return "uint32" elif pyarrow.types.is_uint64(arrow_type): return "uint64" elif pyarrow.types.is_float16(arrow_type): return "float16" # pyarrow dtype is "halffloat" elif pyarrow.types.is_float32(arrow_type): return "float32" # pyarrow dtype is "float" elif pyarrow.types.is_float64(arrow_type): return "float64" # pyarrow dtype is "double" elif pyarrow.types.is_time32(arrow_type): return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_time64(arrow_type): return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_timestamp(arrow_type): if arrow_type.tz is None: return f"timestamp[{arrow_type.unit}]" elif arrow_type.tz: return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]" else: raise ValueError(f"Unexpected timestamp object {arrow_type}.") elif pyarrow.types.is_date32(arrow_type): return "date32" # pyarrow dtype is "date32[day]" elif pyarrow.types.is_date64(arrow_type): return "date64" # pyarrow dtype is "date64[ms]" elif pyarrow.types.is_duration(arrow_type): return f"duration[{arrow_type.unit}]" elif pyarrow.types.is_decimal128(arrow_type): return f"decimal128({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_decimal256(arrow_type): return f"decimal256({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_binary(arrow_type): return "binary" elif pyarrow.types.is_large_binary(arrow_type): return "large_binary" elif pyarrow.types.is_string(arrow_type): return "string" elif pyarrow.types.is_large_string(arrow_type): return "large_string" else: raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.") def string_to_arrow(datasets_dtype: str) -> pa.DataType: """ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function. """ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None): msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type." if examples: examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0] msg += f"\nValid examples include: {examples}." if urls: urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0] msg += f"\nFor more insformation, see: {urls}." return msg if datasets_dtype in pa.__dict__: return pa.__dict__[datasets_dtype]() if (datasets_dtype + "_") in pa.__dict__: return pa.__dict__[datasets_dtype + "_"]() timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype) if timestamp_matches: timestamp_internals = timestamp_matches.group(1) internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals) if timestamp_internals in ["s", "ms", "us", "ns"]: return pa.timestamp(timestamp_internals) elif internals_matches: return pa.timestamp(internals_matches.group(1), internals_matches.group(2)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "timestamp", examples=["timestamp[us]", "timestamp[us, tz=America/New_York"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"], ) ) duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype) if duration_matches: duration_internals = duration_matches.group(1) if duration_internals in ["s", "ms", "us", "ns"]: return pa.duration(duration_internals) else: raise ValueError( _dtype_error_msg( datasets_dtype, "duration", examples=["duration[s]", "duration[us]"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"], ) ) time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype) if time_matches: time_internals_bits = time_matches.group(1) if time_internals_bits == "32": time_internals_unit = time_matches.group(2) if time_internals_unit in ["s", "ms"]: return pa.time32(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)." ) elif time_internals_bits == "64": time_internals_unit = time_matches.group(2) if time_internals_unit in ["us", "ns"]: return pa.time64(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)." ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "time", examples=["time32[s]", "time64[us]"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html", "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html", ], ) ) decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype) if decimal_matches: decimal_internals_bits = decimal_matches.group(1) if decimal_internals_bits == "128": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal128(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal128", examples=["decimal128(10, 2)", "decimal128(4, -2)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"], ) ) elif decimal_internals_bits == "256": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal256(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal256", examples=["decimal256(30, 2)", "decimal256(38, -4)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"], ) ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal", examples=["decimal128(12, 3)", "decimal256(40, 6)"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html", "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html", ], ) ) raise ValueError( f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. " f"Please make sure to use a correct data type, see: " f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions" ) def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]: """ Cast pytorch/tensorflow/pandas objects to python numpy array/lists. It works recursively. If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast. only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object has_changed (bool): True if the object has been changed, False if it is identical """ if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp if config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(obj, np.ndarray): if obj.ndim == 0: return obj[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj, False else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj ], True, ) elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor): if obj.ndim == 0: return obj.detach().cpu().numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.detach().cpu().numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.detach().cpu().numpy() ], True, ) elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor): if obj.ndim == 0: return obj.numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.numpy() ], True, ) elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray): if obj.ndim == 0: return np.asarray(obj)[()], True elif not only_1d_for_numpy or obj.ndim == 1: return np.asarray(obj), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in np.asarray(obj) ], True, ) elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image): return encode_pil_image(obj), True elif isinstance(obj, pd.Series): return ( _cast_to_python_objects( obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, pd.DataFrame): return ( { key: _cast_to_python_objects( value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for key, value in obj.to_dict("list").items() }, True, ) elif isinstance(obj, pd.Timestamp): return obj.to_pydatetime(), True elif isinstance(obj, pd.Timedelta): return obj.to_pytimedelta(), True elif isinstance(obj, Mapping): has_changed = not isinstance(obj, dict) output = {} for k, v in obj.items(): casted_v, has_changed_v = _cast_to_python_objects( v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) has_changed |= has_changed_v output[k] = casted_v return output if has_changed else obj, has_changed elif hasattr(obj, "__array__"): return ( _cast_to_python_objects( obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, (list, tuple)): if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt): break casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects( first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) if has_changed_first_elmt or not optimize_list_casting: return ( [ _cast_to_python_objects( elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for elmt in obj ], True, ) else: if isinstance(obj, (list, tuple)): return obj, False else: return list(obj), True else: return obj, False else: return obj, False def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """ return _cast_to_python_objects( obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] @dataclass class Value: """ The `Value` dtypes are as follows: - `null` - `bool` - `int8` - `int16` - `int32` - `int64` - `uint8` - `uint16` - `uint32` - `uint64` - `float16` - `float32` (alias float) - `float64` (alias double) - `time32[(s|ms)]` - `time64[(us|ns)]` - `timestamp[(s|ms|us|ns)]` - `timestamp[(s|ms|us|ns), tz=(tzstring)]` - `date32` - `date64` - `duration[(s|ms|us|ns)]` - `decimal128(precision, scale)` - `decimal256(precision, scale)` - `binary` - `large_binary` - `string` - `large_string` Example: ```py >>> from datasets import Features >>> features = Features({'stars': Value(dtype='int32')}) >>> features {'stars': Value(dtype='int32', id=None)} ``` """ dtype: str id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value class _ArrayXD: def __post_init__(self): self.shape = tuple(self.shape) def __call__(self): pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype) return pa_type def encode_example(self, value): return value @dataclass class Array2D(_ArrayXD): """Create a two-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array2D", init=False, repr=False) @dataclass class Array3D(_ArrayXD): """Create a three-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array3D", init=False, repr=False) @dataclass class Array4D(_ArrayXD): """Create a four-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array4D", init=False, repr=False) @dataclass class Array5D(_ArrayXD): """Create a five-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array5D", init=False, repr=False) class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) class Array2DExtensionType(_ArrayXDExtensionType): ndims = 2 class Array3DExtensionType(_ArrayXDExtensionType): ndims = 3 class Array4DExtensionType(_ArrayXDExtensionType): ndims = 4 class Array5DExtensionType(_ArrayXDExtensionType): ndims = 5 # Register the extension types for deserialization pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool: """ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22 """ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType: if pa.types.is_list(pa_type): return _unnest_pa_type(pa_type.value_type) return pa_type if unnest: pa_type = _unnest_pa_type(pa_type) return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type)) class ArrayExtensionArray(pa.ExtensionArray): def __array__(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) return self.to_numpy(zero_copy_only=zero_copy_only) def __getitem__(self, i): return self.storage[i] def to_numpy(self, zero_copy_only=True): storage: pa.ListArray = self.storage null_mask = storage.is_null().to_numpy(zero_copy_only=False) if self.type.shape[0] is not None: size = 1 null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask)) for i in range(self.type.ndims): size *= self.type.shape[i] storage = storage.flatten() numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only) numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape) if len(null_indices): numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0) else: shape = self.type.shape ndims = self.type.ndims arrays = [] first_dim_offsets = np.array([off.as_py() for off in storage.offsets]) for i, is_null in enumerate(null_mask): if is_null: arrays.append(np.nan) else: storage_el = storage[i : i + 1] first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i] # flatten storage for _ in range(ndims): storage_el = storage_el.flatten() numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only) arrays.append(numpy_arr.reshape(first_dim, *shape[1:])) if len(np.unique(np.diff(first_dim_offsets))) > 1: # ragged numpy_arr = np.empty(len(arrays), dtype=object) numpy_arr[:] = arrays else: numpy_arr = np.array(arrays) return numpy_arr def to_pylist(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only) if self.type.shape[0] is None and numpy_arr.dtype == object: return [arr.tolist() for arr in numpy_arr.tolist()] else: return numpy_arr.tolist() class PandasArrayExtensionDtype(PandasExtensionDtype): _metadata = "value_type" def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]): self._value_type = value_type def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): if isinstance(array, pa.ChunkedArray): array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) return PandasArrayExtensionArray(numpy_arr) @classmethod def construct_array_type(cls): return PandasArrayExtensionArray @property def type(self) -> type: return np.ndarray @property def kind(self) -> str: return "O" @property def name(self) -> str: return f"array[{self.value_type}]" @property def value_type(self) -> np.dtype: return self._value_type class PandasArrayExtensionArray(PandasExtensionArray): def __init__(self, data: np.ndarray, copy: bool = False): self._data = data if not copy else np.array(data) self._dtype = PandasArrayExtensionDtype(data.dtype) def __array__(self, dtype=None): """ Convert to NumPy Array. Note that Pandas expects a 1D array when dtype is set to object. But for other dtypes, the returned shape is the same as the one of ``data``. More info about pandas 1D requirement for PandasExtensionArray here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray """ if dtype == object: out = np.empty(len(self._data), dtype=object) for i in range(len(self._data)): out[i] = self._data[i] return out if dtype is None: return self._data else: return self._data.astype(dtype) def copy(self, deep: bool = False) -> "PandasArrayExtensionArray": return PandasArrayExtensionArray(self._data, copy=True) @classmethod def _from_sequence( cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False ) -> "PandasArrayExtensionArray": if len(scalars) > 1 and all( isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars ): data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy) else: data = np.empty(len(scalars), dtype=object) data[:] = scalars return cls(data, copy=copy) @classmethod def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray": if len(to_concat) > 1 and all( va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype for va in to_concat ): data = np.vstack([va._data for va in to_concat]) else: data = np.empty(len(to_concat), dtype=object) data[:] = [va._data for va in to_concat] return cls(data, copy=False) @property def dtype(self) -> PandasArrayExtensionDtype: return self._dtype @property def nbytes(self) -> int: return self._data.nbytes def isna(self) -> np.ndarray: return np.array([pd.isna(arr).any() for arr in self._data]) def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: raise NotImplementedError() def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]: if isinstance(item, int): return self._data[item] return PandasArrayExtensionArray(self._data[item], copy=False) def take( self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None ) -> "PandasArrayExtensionArray": indices: np.ndarray = np.asarray(indices, dtype=int) if allow_fill: fill_value = ( self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type) ) mask = indices == -1 if (indices < -1).any(): raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True") elif len(self) > 0: pass elif not np.all(mask): raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.") else: data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type) return PandasArrayExtensionArray(data, copy=False) took = self._data.take(indices, axis=0) if allow_fill and mask.any(): took[mask] = [fill_value] * np.sum(mask) return PandasArrayExtensionArray(took, copy=False) def __len__(self) -> int: return len(self._data) def __eq__(self, other) -> np.ndarray: if not isinstance(other, PandasArrayExtensionArray): raise NotImplementedError(f"Invalid type to compare to: {type(other)}") return (self._data == other._data).all() def pandas_types_mapper(dtype): if isinstance(dtype, _ArrayXDExtensionType): return PandasArrayExtensionDtype(dtype.value_type) @dataclass class ClassLabel: """Feature type for integer class labels. There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments: * `num_classes`: Create 0 to (num_classes-1) labels. * `names`: List of label strings. * `names_file`: File containing the list of labels. Under the hood the labels are stored as integers. You can use negative integers to represent unknown/missing labels. Args: num_classes (`int`, *optional*): Number of classes. All labels must be < `num_classes`. names (`list` of `str`, *optional*): String names for the integer classes. The order in which the names are provided is kept. names_file (`str`, *optional*): Path to a file with names for the integer classes, one per line. Example: ```py >>> from datasets import Features >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])}) >>> features {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)} ``` """ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict names: List[str] = None names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "int64" pa_type: ClassVar[Any] = pa.int64() _str2int: ClassVar[Dict[str, int]] = None _int2str: ClassVar[Dict[int, int]] = None _type: str = field(default="ClassLabel", init=False, repr=False) def __post_init__(self, num_classes, names_file): self.num_classes = num_classes self.names_file = names_file if self.names_file is not None and self.names is not None: raise ValueError("Please provide either names or names_file but not both.") # Set self.names if self.names is None: if self.names_file is not None: self.names = self._load_names_from_file(self.names_file) elif self.num_classes is not None: self.names = [str(i) for i in range(self.num_classes)] else: raise ValueError("Please provide either num_classes, names or names_file.") elif not isinstance(self.names, SequenceABC): raise TypeError(f"Please provide names as a list, is {type(self.names)}") # Set self.num_classes if self.num_classes is None: self.num_classes = len(self.names) elif self.num_classes != len(self.names): raise ValueError( "ClassLabel number of names do not match the defined num_classes. " f"Got {len(self.names)} names VS {self.num_classes} num_classes" ) # Prepare mappings self._int2str = [str(name) for name in self.names] self._str2int = {name: i for i, name in enumerate(self._int2str)} if len(self._int2str) != len(self._str2int): raise ValueError("Some label names are duplicated. Each label name should be unique.") def __call__(self): return self.pa_type def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: """Conversion class name `string` => `integer`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].str2int('neg') 0 ``` """ if not isinstance(values, str) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, str): values = [values] return_list = False output = [self._strval2int(value) for value in values] return output if return_list else output[0] def _strval2int(self, value: str) -> int: failed_parse = False value = str(value) # first attempt - raw string value int_value = self._str2int.get(value) if int_value is None: # second attempt - strip whitespace int_value = self._str2int.get(value.strip()) if int_value is None: # third attempt - convert str to int try: int_value = int(value) except ValueError: failed_parse = True else: if int_value < -1 or int_value >= self.num_classes: failed_parse = True if failed_parse: raise ValueError(f"Invalid string class label {value}") return int_value def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0] def encode_example(self, example_data): if self.num_classes is None: raise ValueError( "Trying to use ClassLabel feature with undefined number of class. " "Please set ClassLabel.names or num_classes." ) # If a string is given, convert to associated integer if isinstance(example_data, str): example_data = self.str2int(example_data) # Allowing -1 to mean no label. if not -1 <= example_data < self.num_classes: raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}") return example_data def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: """Cast an Arrow array to the `ClassLabel` arrow storage type. The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are: - `pa.string()` - `pa.int()` Args: storage (`Union[pa.StringArray, pa.IntegerArray]`): PyArrow array to cast. Returns: `pa.Int64Array`: Array in the `ClassLabel` arrow storage type. """ if isinstance(storage, pa.IntegerArray) and len(storage) > 0: min_max = pc.min_max(storage).as_py() if min_max["max"] is not None and min_max["max"] >= self.num_classes: raise ValueError( f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}" ) elif isinstance(storage, pa.StringArray): storage = pa.array( [self._strval2int(label) if label is not None else None for label in storage.to_pylist()] ) return array_cast(storage, self.pa_type) @staticmethod def _load_names_from_file(names_filepath): with open(names_filepath, encoding="utf-8") as f: return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names @dataclass class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) FeatureType = Union[ dict, list, tuple, Value, ClassLabel, Translation, TranslationVariableLanguages, Sequence, Array2D, Array3D, Array4D, Array5D, Audio, Image, ] def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool: """ Check if the object is not None. If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence. """ if obj is None: return False elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))): if len(obj) > 0: if schema is None: pass elif isinstance(schema, (list, tuple)): schema = schema[0] else: schema = schema.feature return _check_non_null_non_empty_recursive(obj[0], schema) else: return False else: return True def get_nested_type(schema: FeatureType) -> pa.DataType: """ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of generate_from_arrow_type(). It performs double-duty as the implementation of Features.type and handles the conversion of datasets.Feature->pa.struct """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, Features): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # Features is subclass of dict, and dict order is deterministic since Python 3.6 elif isinstance(schema, dict): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # however don't sort on struct types since the order matters elif isinstance(schema, (list, tuple)): if len(schema) != 1: raise ValueError("When defining list feature, you should just provide one example of the inner type") value_type = get_nested_type(schema[0]) return pa.list_(value_type) elif isinstance(schema, Sequence): value_type = get_nested_type(schema.feature) # We allow to reverse list of dict => dict of list for compatibility with tfds if isinstance(schema.feature, dict): return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type}) return pa.list_(value_type, schema.length) # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods) return schema() def encode_nested_example(schema, obj, level=0): """Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): if level == 0 and obj is None: raise ValueError("Got None but expected a dictionary instead") return ( { k: encode_nested_example(sub_schema, sub_obj, level=level + 1) for k, (sub_schema, sub_obj) in zip_dict(schema, obj) } if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] return list(obj) elif isinstance(schema, Sequence): if obj is None: return None # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k, dict_tuples in zip_dict(schema.feature, *obj): list_dict[k] = [encode_nested_example(dict_tuples[0], o, level=level + 1) for o in dict_tuples[1:]] return list_dict else: # obj is a single dict for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): list_dict[k] = [encode_nested_example(sub_schema, o, level=level + 1) for o in sub_objs] return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f"Got a string but expected a list instead: '{obj}'") else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if ( not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt ): return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return ( {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): # we pass the token to read and decode files from private repositories in streaming mode if obj is not None and schema.decode: return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) return obj def generate_from_dict(obj: Any): """Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(obj, list): return [generate_from_dict(value) for value in obj] # Otherwise we have a dict or a dataclass if "_type" not in obj or isinstance(obj["_type"], dict): return {key: generate_from_dict(value) for key, value in obj.items()} obj = dict(obj) class_type = globals()[obj.pop("_type")] if class_type == Sequence: return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1)) field_names = {f.name for f in fields(class_type)} return class_type(**{k: v for k, v in obj.items() if k in field_names}) def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType: """ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema """ if isinstance(pa_type, pa.StructType): return {field.name: generate_from_arrow_type(field.type) for field in pa_type} elif isinstance(pa_type, pa.FixedSizeListType): return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size) elif isinstance(pa_type, pa.ListType): feature = generate_from_arrow_type(pa_type.value_type) if isinstance(feature, (dict, tuple, list)): return [feature] return Sequence(feature=feature) elif isinstance(pa_type, _ArrayXDExtensionType): array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims] return array_feature(shape=pa_type.shape, dtype=pa_type.value_type) elif isinstance(pa_type, pa.DictionaryType): raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table elif isinstance(pa_type, pa.DataType): return Value(dtype=_arrow_to_datasets_dtype(pa_type)) else: raise ValueError(f"Cannot convert {pa_type} to a Feature type.") def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a multidimensional NumPy array""" arr = np.array(arr) values = pa.array(arr.flatten(), type=type) for i in range(arr.ndim - 1): n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1) step_offsets = arr.shape[arr.ndim - i - 1] offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) values = pa.ListArray.from_arrays(offsets, values) return values def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray: null_mask = np.array([arr is None for arr in l_arr]) null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask)) l_arr = [arr for arr in l_arr if arr is not None] offsets = np.cumsum( [0] + [len(arr) for arr in l_arr], dtype=object ) # convert to dtype object to allow None insertion offsets = np.insert(offsets, null_indices, None) offsets = pa.array(offsets, type=pa.int32()) values = pa.concat_arrays(l_arr) return pa.ListArray.from_arrays(offsets, values) def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a possibly nested list of NumPy arrays""" if len(l_arr) > 0: return list_of_pa_arrays_to_pyarrow_listarray( [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr] ) else: return pa.array([], type=type) def contains_any_np_array(data: Any): """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray. Args: data (Any): Data. Returns: bool """ if isinstance(data, np.ndarray): return True elif isinstance(data, list): return contains_any_np_array(first_non_null_value(data)[1]) else: return False def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray: """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray. Args: data (Union[np.ndarray, List]): Data. type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type. Returns: pa.ListArray """ if isinstance(data, np.ndarray): return numpy_to_pyarrow_listarray(data, type=type) elif isinstance(data, list): return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data]) def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array: """Convert to PyArrow ListArray. Args: data (Any): Sequence, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array """ if contains_any_np_array(data): return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type) else: return pa.array(data, pa_type.storage_dtype) def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType: """Visit a (possibly nested) feature. Args: feature (FeatureType): the feature type to be checked Returns: visited feature (FeatureType) """ if isinstance(feature, dict): out = func({k: _visit(f, func) for k, f in feature.items()}) elif isinstance(feature, (list, tuple)): out = func([_visit(feature[0], func)]) elif isinstance(feature, Sequence): out = func(Sequence(_visit(feature.feature, func), length=feature.length)) else: out = func(feature) return feature if out is None else out def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool: """Check if a (possibly nested) feature requires decoding. Args: feature (FeatureType): the feature type to be checked ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value of the `decode` attribute of the decodable feature types. Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_decoding(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_decoding(feature[0]) elif isinstance(feature, Sequence): return require_decoding(feature.feature) else: return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True) def require_storage_cast(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires storage casting. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "cast_storage") def require_storage_embed(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires embedding data into storage. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "embed_storage") def keep_features_dicts_synced(func): """ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary. """ @wraps(func) def wrapper(*args, **kwargs): if args: self: "Features" = args[0] args = args[1:] else: self: "Features" = kwargs.pop("self") out = func(self, *args, **kwargs) assert hasattr(self, "_column_requires_decoding") self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()} return out wrapper._decorator_name_ = "_keep_dicts_synced" return wrapper class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`. - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields features. It's possible to have nested fields of nested fields in an arbitrary manner. - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature type hosted in this list. <Tip> A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the [`~datasets.Sequence`]. </Tip> - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) @property def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) @property def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) @classmethod def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: return Features.from_dict(metadata["info"]["features"]) obj = {field.name: generate_from_arrow_type(field.type) for field in pa_schema} return cls(**obj) @classmethod def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: -> sequence: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]: feature["sequence"] = feature["sequence"]["dtype"] # # sequence: -> sequence: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]: feature["sequence"] = feature["sequence"]["struct"] # # list: -> list: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]: feature["list"] = feature["list"]["dtype"] # # list: -> list: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]: feature["list"] = feature["list"]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return to_yaml_inner(yaml_data)["struct"] @classmethod def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: int32 -> sequence: # -> dtype: int32 # if isinstance(feature.get("sequence"), str): feature["sequence"] = {"dtype": feature["sequence"]} # # list: int32 -> list: # -> dtype: int32 # if isinstance(feature.get("list"), str): feature["list"] = {"dtype": feature["list"]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # for audio and image that are Audio and Image types, not Value return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have to features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): source, id_, length = source.feature, source.id, source.length if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], id=id_, length=length) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self def _align_features(features_list: List[Features]) -> List[Features]: """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.""" name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] def _check_if_features_can_be_aligned(features_list: List[Features]): """Check if the dictionaries of features can be aligned. Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`. """ name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v for features in features_list: for k, v in features.items(): if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v: raise ValueError( f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").' )
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/__init__.py
# flake8: noqa __all__ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/commands/dummy_data.py
import fnmatch import json import os import shutil import tempfile import xml.etree.ElementTree as ET from argparse import ArgumentParser from pathlib import Path from typing import Optional from datasets import config from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.download.mock_download_manager import MockDownloadManager from datasets.load import dataset_module_factory, import_main_class from datasets.utils.deprecation_utils import deprecated from datasets.utils.logging import get_logger, set_verbosity_warning from datasets.utils.py_utils import map_nested logger = get_logger(__name__) DEFAULT_ENCODING = "utf-8" def dummy_data_command_factory(args): return DummyDataCommand( args.path_to_dataset, args.auto_generate, args.n_lines, args.json_field, args.xml_tag, args.match_text_files, args.keep_uncompressed, args.cache_dir, args.encoding, ) class DummyDataGeneratorDownloadManager(DownloadManager): def __init__(self, mock_download_manager, *args, **kwargs): super().__init__(*args, **kwargs) self.mock_download_manager = mock_download_manager self.downloaded_dummy_paths = [] self.expected_dummy_paths = [] def download(self, url_or_urls): output = super().download(url_or_urls) dummy_output = self.mock_download_manager.download(url_or_urls) map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) return output def download_and_extract(self, url_or_urls): output = super().extract(super().download(url_or_urls)) dummy_output = self.mock_download_manager.download(url_or_urls) map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) return output def auto_generate_dummy_data_folder( self, n_lines: int = 5, json_field: Optional[str] = None, xml_tag: Optional[str] = None, match_text_files: Optional[str] = None, encoding: Optional[str] = None, ) -> bool: os.makedirs( os.path.join( self.mock_download_manager.datasets_scripts_dir, self.mock_download_manager.dataset_name, self.mock_download_manager.dummy_data_folder, "dummy_data", ), exist_ok=True, ) total = 0 self.mock_download_manager.load_existing_dummy_data = False for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths): dst_path = os.path.join( self.mock_download_manager.datasets_scripts_dir, self.mock_download_manager.dataset_name, self.mock_download_manager.dummy_data_folder, relative_dst_path, ) total += self._create_dummy_data( src_path, dst_path, n_lines=n_lines, json_field=json_field, xml_tag=xml_tag, match_text_files=match_text_files, encoding=encoding, ) if total == 0: logger.error( "Dummy data generation failed: no dummy files were created. " "Make sure the data files format is supported by the auto-generation." ) return total > 0 def _create_dummy_data( self, src_path: str, dst_path: str, n_lines: int, json_field: Optional[str] = None, xml_tag: Optional[str] = None, match_text_files: Optional[str] = None, encoding: Optional[str] = None, ) -> int: encoding = encoding or DEFAULT_ENCODING if os.path.isfile(src_path): logger.debug(f"Trying to generate dummy data file {dst_path}") dst_path_extensions = Path(dst_path).suffixes line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"] is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions) if match_text_files is not None: file_name = os.path.basename(dst_path) for pattern in match_text_files.split(","): is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern) # Line by line text file (txt, csv etc.) if is_line_by_line_text_file: Path(dst_path).parent.mkdir(exist_ok=True, parents=True) with open(src_path, encoding=encoding) as src_file: with open(dst_path, "w", encoding=encoding) as dst_file: first_lines = [] for i, line in enumerate(src_file): if i >= n_lines: break first_lines.append(line) dst_file.write("".join(first_lines).strip()) return 1 # json file elif ".json" in dst_path_extensions: with open(src_path, encoding=encoding) as src_file: json_data = json.load(src_file) if json_field is not None: json_data = json_data[json_field] if isinstance(json_data, dict): if not all(isinstance(v, list) for v in json_data.values()): raise ValueError( f"Couldn't parse columns {list(json_data.keys())}. " "Maybe specify which json field must be used " "to read the data with --json_field <my_field>." ) first_json_data = {k: v[:n_lines] for k, v in json_data.items()} else: first_json_data = json_data[:n_lines] if json_field is not None: first_json_data = {json_field: first_json_data} Path(dst_path).parent.mkdir(exist_ok=True, parents=True) with open(dst_path, "w", encoding=encoding) as dst_file: json.dump(first_json_data, dst_file) return 1 # xml file elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]): if xml_tag is None: logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag") else: self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding) return 1 logger.warning( f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data." ) return 0 # directory, iterate through all files elif os.path.isdir(src_path): total = 0 for path, _, files in os.walk(src_path): for name in files: if not name.startswith("."): # ignore files like .DS_Store etc. src_file_path = os.path.join(path, name) dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path)) total += self._create_dummy_data( src_file_path, dst_file_path, n_lines=n_lines, json_field=json_field, xml_tag=xml_tag, match_text_files=match_text_files, encoding=encoding, ) return total @staticmethod def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING): Path(dst_path).parent.mkdir(exist_ok=True, parents=True) with open(src_path, encoding=encoding) as src_file: n_line = 0 parents = [] for event, elem in ET.iterparse(src_file, events=("start", "end")): if event == "start": parents.append(elem) else: _ = parents.pop() if elem.tag == xml_tag: if n_line < n_lines: n_line += 1 else: if parents: parents[-1].remove(elem) ET.ElementTree(element=elem).write(dst_path, encoding=encoding) def compress_autogenerated_dummy_data(self, path_to_dataset): root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder) base_name = os.path.join(root_dir, "dummy_data") base_dir = "dummy_data" logger.info(f"Compressing dummy data folder to '{base_name}.zip'") shutil.make_archive(base_name, "zip", root_dir, base_dir) shutil.rmtree(base_name) @deprecated( "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI." ) class DummyDataCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("dummy_data", help="Generate dummy data.") test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data") test_parser.add_argument( "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data" ) test_parser.add_argument( "--json_field", type=str, default=None, help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)", ) test_parser.add_argument( "--xml_tag", type=str, default=None, help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.", ) test_parser.add_argument( "--match_text_files", type=str, default=None, help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label", ) test_parser.add_argument( "--keep_uncompressed", action="store_true", help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.", ) test_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory to download and cache files when auto-generating dummy data", ) test_parser.add_argument( "--encoding", type=str, default=None, help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}", ) test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)") test_parser.set_defaults(func=dummy_data_command_factory) def __init__( self, path_to_dataset: str, auto_generate: bool, n_lines: int, json_field: Optional[str], xml_tag: Optional[str], match_text_files: Optional[str], keep_uncompressed: bool, cache_dir: Optional[str], encoding: Optional[str], ): self._path_to_dataset = path_to_dataset if os.path.isdir(path_to_dataset): self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1] else: self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2] cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE) self._auto_generate = auto_generate self._n_lines = n_lines self._json_field = json_field self._xml_tag = xml_tag self._match_text_files = match_text_files self._keep_uncompressed = keep_uncompressed self._cache_dir = cache_dir self._encoding = encoding def run(self): set_verbosity_warning() dataset_module = dataset_module_factory(self._path_to_dataset) builder_cls = import_main_class(dataset_module.module_path) # use `None` as config if no configs builder_configs = builder_cls.BUILDER_CONFIGS or [None] auto_generate_results = [] with tempfile.TemporaryDirectory() as tmp_dir: for builder_config in builder_configs: config_name = builder_config.name if builder_config else None dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir) version = builder_config.version if builder_config else dataset_builder.config.version mock_dl_manager = MockDownloadManager( dataset_name=self._dataset_name, config=builder_config, version=version, use_local_dummy_data=True, load_existing_dummy_data=False, ) if self._auto_generate: auto_generate_results.append( self._autogenerate_dummy_data( dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager, keep_uncompressed=self._keep_uncompressed, ) ) else: self._print_dummy_data_instructions( dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager ) if self._auto_generate and not self._keep_uncompressed: if all(auto_generate_results): print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'") else: print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'") def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]: dl_cache_dir = ( os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR) if self._cache_dir else config.DOWNLOADED_DATASETS_PATH ) download_config = DownloadConfig(cache_dir=dl_cache_dir) dl_manager = DummyDataGeneratorDownloadManager( dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config ) dataset_builder._split_generators(dl_manager) mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data dl_manager.auto_generate_dummy_data_folder( n_lines=self._n_lines, json_field=self._json_field, xml_tag=self._xml_tag, match_text_files=self._match_text_files, encoding=self._encoding, ) if not keep_uncompressed: path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name) dl_manager.compress_autogenerated_dummy_data(path_do_dataset) # now test that the dummy_data.zip file actually works mock_dl_manager.load_existing_dummy_data = True # use real dummy data n_examples_per_split = {} os.makedirs(dataset_builder._cache_dir, exist_ok=True) try: split_generators = dataset_builder._split_generators(mock_dl_manager) for split_generator in split_generators: dataset_builder._prepare_split(split_generator, check_duplicate_keys=False) n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples except OSError as e: logger.error( f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n" + str(e) ) return False else: if all(n_examples > 0 for n_examples in n_examples_per_split.values()): logger.warning( f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''." ) return True else: empty_splits = [ split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0 ] logger.warning( f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''." ) return False else: generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) logger.info( f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. " "Please compress this directory into a zip file to use it for dummy data tests." ) def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager): dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ") os.makedirs(dummy_data_folder, exist_ok=True) try: generator_splits = dataset_builder._split_generators(mock_dl_manager) except FileNotFoundError as e: print( f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}." ) files_to_create = set() split_names = [] dummy_file_name = mock_dl_manager.dummy_file_name for split in generator_splits: logger.info(f"Collecting dummy data file paths to create for {split.name}") split_names.append(split.name) gen_kwargs = split.gen_kwargs generator = dataset_builder._generate_examples(**gen_kwargs) try: dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n" config_string = ( f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else "" ) dummy_data_guidance_print += ( "- In order to create the dummy data for " + config_string + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n" ) # trigger generate function for key, record in generator: pass dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n" except FileNotFoundError as e: files_to_create.add(e.filename) split_names = ", ".join(split_names) if len(files_to_create) > 0: # no glob.glob(...) in `_generate_examples(...)` if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n" files_string = dummy_file_name else: files_string = ", ".join(files_to_create) dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n" dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n" dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n" if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n" dummy_data_guidance_print += ( f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n" ) dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" else: dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n" dummy_data_guidance_print += ( f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n" ) dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" dummy_data_guidance_print += ( f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n" ) dummy_data_guidance_print += 83 * "=" + "\n" print(dummy_data_guidance_print)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/commands/env.py
import platform from argparse import ArgumentParser import fsspec import huggingface_hub import pandas import pyarrow from datasets import __version__ as version from datasets.commands import BaseDatasetsCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env", help="Print relevant system environment info.") download_parser.set_defaults(func=info_command_factory) def run(self): info = { "`datasets` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "`huggingface_hub` version": huggingface_hub.__version__, "PyArrow version": pyarrow.__version__, "Pandas version": pandas.__version__, "`fsspec` version": fsspec.__version__, } print("\nCopy-and-paste the text below in your GitHub issue.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/commands/run_beam.py
import os from argparse import ArgumentParser from pathlib import Path from shutil import copyfile from typing import List from datasets import config from datasets.builder import DatasetBuilder from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadMode from datasets.load import dataset_module_factory, import_main_class from datasets.utils.info_utils import VerificationMode def run_beam_command_factory(args, **kwargs): return RunBeamCommand( args.dataset, args.name, args.cache_dir, args.beam_pipeline_options, args.data_dir, args.all_configs, args.save_info or args.save_infos, args.ignore_verifications, args.force_redownload, **kwargs, ) class RunBeamCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline") run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download") run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name") run_beam_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored", ) run_beam_parser.add_argument( "--beam_pipeline_options", type=str, default="", help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`", ) run_beam_parser.add_argument( "--data_dir", type=str, default=None, help="Can be used to specify a manual directory to get the files from", ) run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file") run_beam_parser.add_argument( "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks" ) run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") # aliases run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info") run_beam_parser.set_defaults(func=run_beam_command_factory) def __init__( self, dataset: str, name: str, cache_dir: str, beam_pipeline_options: str, data_dir: str, all_configs: bool, save_infos: bool, ignore_verifications: bool, force_redownload: bool, **config_kwargs, ): self._dataset = dataset self._name = name self._cache_dir = cache_dir self._beam_pipeline_options = beam_pipeline_options self._data_dir = data_dir self._all_configs = all_configs self._save_infos = save_infos self._ignore_verifications = ignore_verifications self._force_redownload = force_redownload self._config_kwargs = config_kwargs def run(self): import apache_beam as beam if self._name is not None and self._all_configs: print("Both parameters `name` and `all_configs` can't be used at once.") exit(1) path, config_name = self._dataset, self._name dataset_module = dataset_module_factory(path) builder_cls = import_main_class(dataset_module.module_path) builders: List[DatasetBuilder] = [] if self._beam_pipeline_options: beam_options = beam.options.pipeline_options.PipelineOptions( flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt] ) else: beam_options = None if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0: for builder_config in builder_cls.BUILDER_CONFIGS: builders.append( builder_cls( config_name=builder_config.name, data_dir=self._data_dir, hash=dataset_module.hash, beam_options=beam_options, cache_dir=self._cache_dir, base_path=dataset_module.builder_kwargs.get("base_path"), ) ) else: builders.append( builder_cls( config_name=config_name, data_dir=self._data_dir, beam_options=beam_options, cache_dir=self._cache_dir, base_path=dataset_module.builder_kwargs.get("base_path"), **self._config_kwargs, ) ) for builder in builders: builder.download_and_prepare( download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS if not self._force_redownload else DownloadMode.FORCE_REDOWNLOAD, download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH), verification_mode=VerificationMode.NO_CHECKS if self._ignore_verifications else VerificationMode.ALL_CHECKS, try_from_hf_gcs=False, ) if self._save_infos: builder._save_infos() print("Apache beam run successful.") # If save_infos=True, the dataset infos file is created next to the loaded module file. # Let's move it to the original directory of the dataset script, to allow the user to # upload them on S3 at the same time afterwards. if self._save_infos: dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) name = Path(path).name + ".py" combined_path = os.path.join(path, name) if os.path.isfile(path): dataset_dir = os.path.dirname(path) elif os.path.isfile(combined_path): dataset_dir = path else: # in case of a remote dataset print(f"Dataset Infos file saved at {dataset_infos_path}") exit(1) # Move datasetinfo back to the user user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME) copyfile(dataset_infos_path, user_dataset_infos_path) print(f"Dataset Infos file saved at {user_dataset_infos_path}")
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/commands/convert.py
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """ HIGHLIGHT_MESSAGE_POST = """======= >>>>>>> """ TO_HIGHLIGHT = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] TO_CONVERT = [ # (pattern, replacement) # Order is important here for some replacements (r"tfds\.core", r"datasets"), (r"tf\.io\.gfile\.GFile", r"open"), (r"tf\.([\w\d]+)", r"datasets.Value('\1')"), (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"), (r"tfds\.features\.Text\(", r"datasets.Value('string'),"), (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("), (r"tfds\.features\.FeaturesDict\(", r"dict("), (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (r"tfds\.", r"datasets."), (r"dl_manager\.manual_dir", r"self.config.data_dir"), (r"self\.builder_config", r"self.config"), ] def convert_command_factory(args: Namespace): """ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. Returns: ConvertCommand """ return ConvertCommand(args.tfds_path, args.datasets_directory) class ConvertCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): """ Register this command to argparse so it's available for the datasets-cli Args: parser: Root parser to register command-specific arguments """ train_parser = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=str, required=True, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=convert_command_factory) def __init__(self, tfds_path: str, datasets_directory: str, *args): self._logger = get_logger("datasets-cli/converting") self._tfds_path = tfds_path self._datasets_directory = datasets_directory def run(self): if os.path.isdir(self._tfds_path): abs_tfds_path = os.path.abspath(self._tfds_path) elif os.path.isfile(self._tfds_path): abs_tfds_path = os.path.dirname(self._tfds_path) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path.") abs_datasets_path = os.path.abspath(self._datasets_directory) self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}") utils_files = [] with_manual_update = [] imports_to_builder_map = {} if os.path.isdir(self._tfds_path): file_names = os.listdir(abs_tfds_path) else: file_names = [os.path.basename(self._tfds_path)] for f_name in file_names: self._logger.info(f"Looking at file {f_name}") input_file = os.path.join(abs_tfds_path, f_name) output_file = os.path.join(abs_datasets_path, f_name) if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file") continue with open(input_file, encoding="utf-8") as f: lines = f.readlines() out_lines = [] is_builder = False needs_manual_update = False tfds_imports = [] for line in lines: out_line = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: out_line = "import datasets\n" elif "import tensorflow" in out_line: # order is important here out_line = "" continue elif "from absl import logging" in out_line: out_line = "from datasets import logging\n" elif "getLogger" in out_line: out_line = out_line.replace("getLogger", "get_logger") elif any(expression in out_line for expression in TO_HIGHLIGHT): needs_manual_update = True to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n") out_lines.append(out_line) out_lines.append(HIGHLIGHT_MESSAGE_POST) continue else: for pattern, replacement in TO_CONVERT: out_line = re.sub(pattern, replacement, out_line) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line) tfds_imports.extend(imp.strip() for imp in match.group(1).split(",")) out_line = "from . import " + match.group(1) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f"Error converting {out_line.strip()}") if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: is_builder = True out_lines.append(out_line) if is_builder or "wmt" in f_name: # We create a new directory for each dataset dir_name = f_name.replace(".py", "") output_dir = os.path.join(abs_datasets_path, dir_name) output_file = os.path.join(output_dir, f_name) os.makedirs(output_dir, exist_ok=True) self._logger.info(f"Adding directory {output_dir}") imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) else: # Utilities will be moved at the end utils_files.append(output_file) if needs_manual_update: with_manual_update.append(output_file) with open(output_file, "w", encoding="utf-8") as f: f.writelines(out_lines) self._logger.info(f"Converted in {output_file}") for utils_file in utils_files: try: f_name = os.path.basename(utils_file) dest_folder = imports_to_builder_map[f_name.replace(".py", "")] self._logger.info(f"Moving {dest_folder} to {utils_file}") shutil.copy(utils_file, dest_folder) except KeyError: self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.") if with_manual_update: for file_path in with_manual_update: self._logger.warning( f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/commands/test.py
import logging import os from argparse import ArgumentParser from pathlib import Path from shutil import copyfile, rmtree from typing import Generator import datasets.config from datasets.builder import DatasetBuilder from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_manager import DownloadMode from datasets.load import dataset_module_factory, import_main_class from datasets.utils.info_utils import VerificationMode from datasets.utils.logging import ERROR, get_logger logger = get_logger(__name__) def _test_command_factory(args): return TestCommand( args.dataset, args.name, args.cache_dir, args.data_dir, args.all_configs, args.save_info or args.save_infos, args.ignore_verifications, args.force_redownload, args.clear_cache, ) class TestCommand(BaseDatasetsCLICommand): __test__ = False # to tell pytest it's not a test class @staticmethod def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("test", help="Test dataset implementation.") test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name") test_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored.", ) test_parser.add_argument( "--data_dir", type=str, default=None, help="Can be used to specify a manual directory to get the files from.", ) test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") test_parser.add_argument( "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)" ) test_parser.add_argument( "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks.", ) test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") test_parser.add_argument( "--clear_cache", action="store_true", help="Remove downloaded files and cached datasets after each config test", ) # aliases test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info") test_parser.add_argument("dataset", type=str, help="Name of the dataset to download") test_parser.set_defaults(func=_test_command_factory) def __init__( self, dataset: str, name: str, cache_dir: str, data_dir: str, all_configs: bool, save_infos: bool, ignore_verifications: bool, force_redownload: bool, clear_cache: bool, ): self._dataset = dataset self._name = name self._cache_dir = cache_dir self._data_dir = data_dir self._all_configs = all_configs self._save_infos = save_infos self._ignore_verifications = ignore_verifications self._force_redownload = force_redownload self._clear_cache = clear_cache if clear_cache and not cache_dir: print( "When --clear_cache is used, specifying a cache directory is mandatory.\n" "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n" "Please provide a --cache_dir that will be used to test the dataset script." ) exit(1) if save_infos: self._ignore_verifications = True def run(self): logging.getLogger("filelock").setLevel(ERROR) if self._name is not None and self._all_configs: print("Both parameters `config` and `all_configs` can't be used at once.") exit(1) path, config_name = self._dataset, self._name module = dataset_module_factory(path) builder_cls = import_main_class(module.module_path) n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1 def get_builders() -> Generator[DatasetBuilder, None, None]: if self._all_configs and builder_cls.BUILDER_CONFIGS: for i, config in enumerate(builder_cls.BUILDER_CONFIGS): if "config_name" in module.builder_kwargs: yield builder_cls( cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: yield builder_cls( config_name=config.name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: if "config_name" in module.builder_kwargs: yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs) else: yield builder_cls( config_name=config_name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) for j, builder in enumerate(get_builders()): print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})") builder._record_infos = os.path.exists( os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME) ) # record checksums only if we need to update a (deprecated) dataset_infos.json builder.download_and_prepare( download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS if not self._force_redownload else DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS if self._ignore_verifications else VerificationMode.ALL_CHECKS, try_from_hf_gcs=False, ) builder.as_dataset() if self._save_infos: builder._save_infos() # If save_infos=True, the dataset card (README.md) is created next to the loaded module file. # The dataset_infos are saved in the YAML part of the README.md # Let's move it to the original directory of the dataset script, to allow the user to # upload them on S3 at the same time afterwards. if self._save_infos: dataset_readme_path = os.path.join(builder_cls.get_imported_module_dir(), "README.md") name = Path(path).name + ".py" combined_path = os.path.join(path, name) if os.path.isfile(path): dataset_dir = os.path.dirname(path) elif os.path.isfile(combined_path): dataset_dir = path elif os.path.isdir(path): # for local directories containing only data files dataset_dir = path else: # in case of a remote dataset dataset_dir = None print(f"Dataset card saved at {dataset_readme_path}") # Move dataset_info back to the user if dataset_dir is not None: user_dataset_readme_path = os.path.join(dataset_dir, "README.md") copyfile(dataset_readme_path, user_dataset_readme_path) print(f"Dataset card saved at {user_dataset_readme_path}") # If clear_cache=True, the download folder and the dataset builder cache directory are deleted if self._clear_cache: if os.path.isdir(builder._cache_dir): logger.warning(f"Clearing cache at {builder._cache_dir}") rmtree(builder._cache_dir) download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR) if os.path.isdir(download_dir): logger.warning(f"Clearing cache at {download_dir}") rmtree(download_dir) print("Test successful.")
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/commands/__init__.py
from abc import ABC, abstractmethod from argparse import ArgumentParser class BaseDatasetsCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError()
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/commands/datasets_cli.py
#!/usr/bin/env python from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def parse_unknown_args(unknown_args): return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])} def main(): parser = ArgumentParser( "HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False ) commands_parser = parser.add_subparsers(help="datasets-cli command helpers") set_verbosity_info() # Register commands ConvertCommand.register_subcommand(commands_parser) EnvironmentCommand.register_subcommand(commands_parser) TestCommand.register_subcommand(commands_parser) RunBeamCommand.register_subcommand(commands_parser) DummyDataCommand.register_subcommand(commands_parser) # Parse args args, unknown_args = parser.parse_known_args() if not hasattr(args, "func"): parser.print_help() exit(1) kwargs = parse_unknown_args(unknown_args) # Run service = args.func(args, **kwargs) service.run() if __name__ == "__main__": main()
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_dataset_dict.py
import os import tempfile from unittest import TestCase import numpy as np import pandas as pd import pytest from datasets import load_from_disk from datasets.arrow_dataset import Dataset from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.features import ClassLabel, Features, Sequence, Value from datasets.iterable_dataset import IterableDataset from datasets.splits import NamedSplit from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_tf, require_torch class DatasetDictTest(TestCase): def _create_dummy_dataset(self, multiple_columns=False): if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} dset = Dataset.from_dict(data) else: dset = Dataset.from_dict( {"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]} ) return dset def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict: return DatasetDict( { "train": self._create_dummy_dataset(multiple_columns=multiple_columns), "test": self._create_dummy_dataset(multiple_columns=multiple_columns), } ) def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset: def gen(): if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} for v1, v2 in zip(data["col_1"], data["col_2"]): yield {"col_1": v1, "col_2": v2} else: for x in range(30): yield {"filename": "my_name-train" + "_" + f"{x:03d}"} return IterableDataset.from_generator(gen) def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict: return IterableDatasetDict( { "train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), "test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), } ) def test_flatten(self): dset_split = Dataset.from_dict( {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}), ) dset = DatasetDict({"train": dset_split, "test": dset_split}) dset = dset.flatten() self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]}) self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"]) self.assertDictEqual( dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")}) ) del dset def test_set_format_numpy(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="numpy", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], np.int64) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.reset_format() with dset.formatted_as(type="numpy", columns=["col_1"]): for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], np.int64) self.assertEqual(dset_split[0]["col_1"].item(), 3) for dset_split in dset.values(): self.assertEqual(dset_split.format["type"], None) self.assertEqual(dset_split.format["format_kwargs"], {}) self.assertEqual(dset_split.format["columns"], dset_split.column_names) self.assertEqual(dset_split.format["output_all_columns"], False) dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="numpy", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], np.str_) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset @require_torch def test_set_format_torch(self): import torch dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="torch", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="torch", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="torch") for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].item(), 3) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") del dset @require_tf def test_set_format_tf(self): import tensorflow as tf dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="tensorflow", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3) dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="tensorflow", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a") del dset def test_set_format_pandas(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="pandas", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 1) self.assertIsInstance(dset_split[0], pd.DataFrame) self.assertListEqual(list(dset_split[0].shape), [1, 1]) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="pandas", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 2) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset def test_set_transform(self): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_transform(transform=transform, columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(dset_split.format["type"], "custom") self.assertEqual(len(dset_split[0].keys()), 1) self.assertEqual(dset_split[0]["col_1"], "3") self.assertEqual(dset_split[:2]["col_1"], ["3", "2"]) self.assertEqual(dset_split["col_1"][:2], ["3", "2"]) prev_format = dset[list(dset.keys())[0]].format for dset_split in dset.values(): dset_split.set_format(**dset_split.format) self.assertEqual(prev_format, dset_split.format) dset.set_transform(transform=transform, columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].keys()), 2) self.assertEqual(dset_split[0]["col_2"], "A") del dset def test_with_format(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset2 = dset.with_format("numpy", columns=["col_1"]) dset.set_format("numpy", columns=["col_1"]) for dset_split, dset_split2 in zip(dset.values(), dset2.values()): self.assertDictEqual(dset_split.format, dset_split2.format) del dset, dset2 def test_with_transform(self): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} dset = self._create_dummy_dataset_dict(multiple_columns=True) dset2 = dset.with_transform(transform, columns=["col_1"]) dset.set_transform(transform, columns=["col_1"]) for dset_split, dset_split2 in zip(dset.values(), dset2.values()): self.assertDictEqual(dset_split.format, dset_split2.format) del dset, dset2 def test_cast(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) features = dset["train"].features features["col_1"] = Value("float64") dset = dset.cast(features) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) self.assertEqual(dset_split.features["col_1"], Value("float64")) self.assertIsInstance(dset_split[0]["col_1"], float) del dset def test_remove_columns(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.remove_columns(column_names="col_1") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_2"]) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.remove_columns(column_names=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 0) dset = self._create_dummy_dataset_dict(multiple_columns=True) for dset_split in dset.values(): dset_split._format_columns = ["col_1", "col_2"] dset = dset.remove_columns(column_names=["col_1"]) for dset_split in dset.values(): self.assertListEqual(dset_split._format_columns, ["col_2"]) self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_2"]) del dset def test_rename_column(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"]) del dset def test_select_columns(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names=[]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 0) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names="col_1") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_1"]) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) dset = self._create_dummy_dataset_dict(multiple_columns=True) for dset_split in dset.values(): dset_split._format_columns = ["col_1", "col_2"] dset = dset.select_columns(column_names=["col_1"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_1"]) self.assertListEqual(dset_split._format_columns, ["col_1"]) def test_map(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True) self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys())) self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"]) cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } mapped_dsets_2: DatasetDict = mapped_dsets_1.map( lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names ) self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys())) self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"])) del dsets, mapped_dsets_1, mapped_dsets_2 def test_iterable_map(self): dsets = self._create_dummy_iterable_dataset_dict() fn_kwargs = {"n": 3} mapped_dsets: IterableDatasetDict = dsets.map( lambda x, n: {"foo": [n] * len(x["filename"])}, batched=True, fn_kwargs=fn_kwargs, ) mapped_example = next(iter(mapped_dsets["train"])) self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"])) self.assertLessEqual(mapped_example["foo"], 3) del dsets, mapped_dsets def test_filter(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys())) self.assertEqual(len(filtered_dsets_1["train"]), 10) cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } filtered_dsets_2: DatasetDict = filtered_dsets_1.filter( lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names ) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys())) self.assertEqual(len(filtered_dsets_2["train"]), 5) filtered_dsets_3: DatasetDict = dsets.filter( lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True ) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys())) self.assertEqual(len(filtered_dsets_3["train"]), 10) del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3 def test_iterable_filter(self): dsets = self._create_dummy_iterable_dataset_dict() example = next(iter(dsets["train"])) fn_kwargs = {"n": 3} filtered_dsets: IterableDatasetDict = dsets.filter( lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs ) filtered_example = next(iter(filtered_dsets["train"])) self.assertListEqual(list(example.keys()), list(filtered_example.keys())) self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3 del dsets, filtered_dsets def test_sort(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() sorted_dsets_1: DatasetDict = dsets.sort("filename") self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys())) self.assertListEqual( [f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]], sorted(f"{x:03d}" for x in range(30)), ) indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } sorted_dsets_2: DatasetDict = sorted_dsets_1.sort( "filename", indices_cache_file_names=indices_cache_file_names, reverse=True ) self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys())) self.assertListEqual( [f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]], sorted((f"{x:03d}" for x in range(30)), reverse=True), ) del dsets, sorted_dsets_1, sorted_dsets_2 def test_shuffle(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } seeds = { "train": 1234, "test": 1234, } dsets_shuffled = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False ) self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"]) self.assertEqual(len(dsets_shuffled["train"]), 30) self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028") self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010") self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")})) self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")})) # Reproducibility indices_cache_file_names_2 = { "train": os.path.join(tmp_dir, "train_2.arrow"), "test": os.path.join(tmp_dir, "test_2.arrow"), } dsets_shuffled_2 = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False ) self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"]) seeds = { "train": 1234, "test": 1, } indices_cache_file_names_3 = { "train": os.path.join(tmp_dir, "train_3.arrow"), "test": os.path.join(tmp_dir, "test_3.arrow"), } dsets_shuffled_3 = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False ) self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"]) # other input types dsets_shuffled_int = dsets.shuffle(42) dsets_shuffled_alias = dsets.shuffle(seed=42) dsets_shuffled_none = dsets.shuffle() self.assertEqual(len(dsets_shuffled_int["train"]), 30) self.assertEqual(len(dsets_shuffled_alias["train"]), 30) self.assertEqual(len(dsets_shuffled_none["train"]), 30) del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3 del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none def test_flatten_indices(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } dsets_shuffled = dsets.shuffle( seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False ) self.assertIsNotNone(dsets_shuffled["train"]._indices) self.assertIsNotNone(dsets_shuffled["test"]._indices) dsets_flat = dsets_shuffled.flatten_indices() self.assertIsNone(dsets_flat["train"]._indices) self.assertIsNone(dsets_flat["test"]._indices) del dsets, dsets_shuffled, dsets_flat def test_check_values_type(self): dsets = self._create_dummy_dataset_dict() dsets["bad_split"] = None self.assertRaises(TypeError, dsets.map, lambda x: x) self.assertRaises(TypeError, dsets.filter, lambda x: True) self.assertRaises(TypeError, dsets.shuffle) self.assertRaises(TypeError, dsets.sort, "filename") del dsets def test_serialization(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) del reloaded_dsets del dsets["test"] dsets.save_to_disk(tmp_dir) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) del dsets, reloaded_dsets dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2}) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["train"].cache_files), 3) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) del reloaded_dsets dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir, num_proc=2) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["train"].cache_files), 2) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) del reloaded_dsets def test_load_from_disk(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir) del dsets dsets = load_from_disk(tmp_dir) self.assertListEqual(sorted(dsets), ["test", "train"]) self.assertEqual(len(dsets["train"]), 30) self.assertListEqual(dsets["train"].column_names, ["filename"]) self.assertEqual(len(dsets["test"]), 30) self.assertListEqual(dsets["test"].column_names, ["filename"]) del dsets def test_align_labels_with_mapping(self): train_features = Features( { "input_text": Value("string"), "input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), } ) test_features = Features( { "input_text": Value("string"), "input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]), } ) train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]} test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]} label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1} id2label = {v: k for k, v in label2id.items()} train_expected_labels = [2, 2, 1, 1, 0, 0] test_expected_labels = [2, 2, 0, 0, 1, 1] train_expected_label_names = [id2label[idx] for idx in train_expected_labels] test_expected_label_names = [id2label[idx] for idx in test_expected_labels] dsets = DatasetDict( { "train": Dataset.from_dict(train_data, features=train_features), "test": Dataset.from_dict(test_data, features=test_features), } ) dsets = dsets.align_labels_with_mapping(label2id, "input_labels") self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"]) self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"]) train_aligned_label_names = [ dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"] ] test_aligned_label_names = [ dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"] ] self.assertListEqual(train_expected_label_names, train_aligned_label_names) self.assertListEqual(test_expected_label_names, test_aligned_label_names) def test_dummy_datasetdict_serialize_fs(mockfs): dataset_dict = DatasetDict( { "train": Dataset.from_dict({"a": range(30)}), "test": Dataset.from_dict({"a": range(10)}), } ) dataset_path = "mock://my_dataset" dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options) assert mockfs.isdir(dataset_path) assert mockfs.glob(dataset_path + "/*") reloaded = dataset_dict.load_from_disk(dataset_path, storage_options=mockfs.storage_options) assert list(reloaded) == list(dataset_dict) for k in dataset_dict: assert reloaded[k].features == dataset_dict[k].features assert reloaded[k].to_dict() == dataset_dict[k].to_dict() def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_csv_features(features, csv_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir) _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_csv_split(split, csv_path, tmp_path): if split: path = {split: csv_path} else: split = "train" path = {"train": csv_path, "test": csv_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_csv(path, cache_dir=cache_dir) _check_csv_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir) _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): if split: path = {split: jsonl_path} else: split = "train" path = {"train": jsonl_path, "test": jsonl_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_json(path, cache_dir=cache_dir) _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir) _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path): if split: path = {split: parquet_path} else: split = "train" path = {"train": parquet_path, "test": parquet_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir) _check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ], ) def test_datasetdict_from_text_features(features, text_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"text": "string"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir) _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_text_split(split, text_path, tmp_path): if split: path = {split: text_path} else: split = "train" path = {"train": text_path, "test": text_path} cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = DatasetDict.from_text(path, cache_dir=cache_dir) _check_text_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys())
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_download_manager.py
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename URL = "http://www.mocksite.com/file1.txt" CONTENT = '"text": ["foo", "foo"]' HASH = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class MockResponse: status_code = 200 headers = {"Content-Length": "100"} cookies = {} def iter_content(self, **kwargs): return [bytes(CONTENT, "utf-8")] def mock_request(*args, **kwargs): return MockResponse() @pytest.mark.parametrize("urls_type", [str, list, dict]) def test_download_manager_download(urls_type, tmp_path, monkeypatch): import requests monkeypatch.setattr(requests, "request", mock_request) url = URL if issubclass(urls_type, str): urls = url elif issubclass(urls_type, list): urls = [url] elif issubclass(urls_type, dict): urls = {"train": url} dataset_name = "dummy" cache_subdir = "downloads" cache_dir_root = tmp_path download_config = DownloadConfig( cache_dir=os.path.join(cache_dir_root, cache_subdir), use_etag=False, ) dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config) downloaded_paths = dl_manager.download(urls) input_urls = urls for downloaded_paths in [downloaded_paths]: if isinstance(urls, str): downloaded_paths = [downloaded_paths] input_urls = [urls] elif isinstance(urls, dict): assert "train" in downloaded_paths.keys() downloaded_paths = downloaded_paths.values() input_urls = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(downloaded_paths, input_urls): assert downloaded_path == dl_manager.downloaded_paths[input_url] downloaded_path = Path(downloaded_path) parts = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() content = downloaded_path.read_text() assert content == CONTENT metadata_downloaded_path = downloaded_path.with_suffix(".json") assert metadata_downloaded_path.exists() metadata_content = json.loads(metadata_downloaded_path.read_text()) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("paths_type", [str, list, dict]) def test_download_manager_extract(paths_type, xz_file, text_file): filename = str(xz_file) if issubclass(paths_type, str): paths = filename elif issubclass(paths_type, list): paths = [filename] elif issubclass(paths_type, dict): paths = {"train": filename} dataset_name = "dummy" cache_dir = xz_file.parent extracted_subdir = "extracted" download_config = DownloadConfig( cache_dir=cache_dir, use_etag=False, ) dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config) extracted_paths = dl_manager.extract(paths) input_paths = paths for extracted_paths in [extracted_paths]: if isinstance(paths, str): extracted_paths = [extracted_paths] input_paths = [paths] elif isinstance(paths, dict): assert "train" in extracted_paths.keys() extracted_paths = extracted_paths.values() input_paths = paths.values() assert extracted_paths for extracted_path, input_path in zip(extracted_paths, input_paths): assert extracted_path == dl_manager.extracted_paths[input_path] extracted_path = Path(extracted_path) parts = extracted_path.parts assert parts[-1] == hash_url_to_filename(input_path, etag=None) assert parts[-2] == extracted_subdir assert extracted_path.exists() extracted_file_content = extracted_path.read_text() expected_file_content = text_file.read_text() assert extracted_file_content == expected_file_content def _test_jsonl(path, file): assert path.endswith(".jsonl") for num_items, line in enumerate(file, start=1): item = json.loads(line.decode("utf-8")) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"]) def test_iter_archive_path(archive_jsonl, request): archive_jsonl_path = request.getfixturevalue(archive_jsonl) dl_manager = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(archive_jsonl_path), start=1): _test_jsonl(path, file) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"]) def test_iter_archive_file(archive_nested_jsonl, request): archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl) dl_manager = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(archive_nested_jsonl_path), start=1): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): _test_jsonl(subpath, subfile) assert num_tar == 1 assert num_jsonl == 2 def test_iter_files(data_dir_with_hidden_files): dl_manager = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1): assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_file_utils.py
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) FILE_CONTENT = """\ Text data. Second line of data.""" FILE_PATH = "file" @pytest.fixture(scope="session") def zstd_path(tmp_path_factory): path = tmp_path_factory.mktemp("data") / (FILE_PATH + ".zstd") data = bytes(FILE_CONTENT, "utf-8") with zstd.open(path, "wb") as f: f.write(data) return path @pytest.fixture def tmpfs_file(tmpfs): with open(os.path.join(tmpfs.local_root_dir, FILE_PATH), "w") as f: f.write(FILE_CONTENT) return FILE_PATH @pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"]) def test_cached_path_extract(compression_format, gz_file, xz_file, zstd_path, tmp_path, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} input_path = input_paths[compression_format] cache_dir = tmp_path / "cache" download_config = DownloadConfig(cache_dir=cache_dir, extract_compressed_file=True) extracted_path = cached_path(input_path, download_config=download_config) with open(extracted_path) as f: extracted_file_content = f.read() with open(text_file) as f: expected_file_content = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted", [True, False]) @pytest.mark.parametrize("default_cache_dir", [True, False]) def test_extracted_datasets_path(default_extracted, default_cache_dir, xz_file, tmp_path, monkeypatch): custom_cache_dir = "custom_cache" custom_extracted_dir = "custom_extracted_dir" custom_extracted_path = tmp_path / "custom_extracted_path" if default_extracted: expected = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", custom_extracted_dir) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(custom_extracted_path)) expected = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) filename = xz_file download_config = ( DownloadConfig(extract_compressed_file=True) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=True) ) extracted_file_path = cached_path(filename, download_config=download_config) assert Path(extracted_file_path).parent.parts[-2:] == expected def test_cached_path_local(text_file): # input absolute path -> output absolute path text_file_abs = str(Path(text_file).resolve()) assert os.path.samefile(cached_path(text_file_abs), text_file_abs) # input relative path -> output absolute path text_file = __file__ text_file_abs = str(Path(text_file).resolve()) text_file_rel = str(Path(text_file).resolve().relative_to(Path(os.getcwd()))) assert os.path.samefile(cached_path(text_file_rel), text_file_abs) def test_cached_path_missing_local(tmp_path): # absolute path missing_file = str(tmp_path.resolve() / "__missing_file__.txt") with pytest.raises(FileNotFoundError): cached_path(missing_file) # relative path missing_file = "./__missing_file__.txt" with pytest.raises(FileNotFoundError): cached_path(missing_file) def test_get_from_cache_fsspec(tmpfs_file): output_path = get_from_cache(f"tmp://{tmpfs_file}") with open(output_path) as f: output_file_content = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_cached_path_offline(): with pytest.raises(OfflineModeIsEnabled): cached_path("https://huggingface.co") @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_http_offline(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.html" with pytest.raises(OfflineModeIsEnabled): http_get("https://huggingface.co", temp_file=filename) with pytest.raises(OfflineModeIsEnabled): http_head("https://huggingface.co") @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_ftp_offline(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.html" with pytest.raises(OfflineModeIsEnabled): ftp_get("ftp://huggingface.co", temp_file=filename) with pytest.raises(OfflineModeIsEnabled): ftp_head("ftp://huggingface.co") @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_fsspec_offline(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.html" with pytest.raises(OfflineModeIsEnabled): fsspec_get("s3://huggingface.co", temp_file=filename) with pytest.raises(OfflineModeIsEnabled): fsspec_head("s3://huggingface.co")
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/README.md
## Add Dummy data test **Important** In order to pass the `load_dataset_<dataset_name>` test, dummy data is required for all possible config names. First we distinguish between datasets scripts that - A) have no config class and - B) have a config class For A) the dummy data folder structure, will always look as follows: - ``dummy/<version>/dummy_data.zip``, *e.g.* ``cosmos_qa/dummy/0.1.0/dummy_data.zip``. For B) the dummy data folder structure, will always look as follows: - ``dummy/<config_name>/<version>/dummy_data.zip``, *e.g.* ``squad/dummy/plain-text/1.0.0/dummy_data.zip``. Now the difficult part is to create the correct `dummy_data.zip` file. **Important** When checking the dummy folder structure of already added datasets, always unzip ``dummy_data.zip``. If a folder ``dummy_data`` is found next to ``dummy_data.zip``, it is probably an old version and should be deleted. The tests only take the ``dummy_data.zip`` file into account. Here we have to pay close attention to the ``_split_generators(self, dl_manager)`` function of the dataset script in question. There are three general possibilties: 1) The ``dl_manager.download_and_extract()`` is given a **single path variable** of type `str` as its argument. In this case the file `dummy_data.zip` should unzip to the following structure: ``os.path.join("dummy_data", <additional-paths-as-defined-in-split-generations>)`` *e.g.* for ``sentiment140``, the unzipped ``dummy_data.zip`` has the following dir structure ``dummy_data/testdata.manual.2009.06.14.csv`` and ``dummy_data/training.1600000.processed.noemoticon.csv``. **Note** if there are no ``<additional-paths-as-defined-in-split-generations>``, then ``dummy_data`` should be the name of the single file. An example for this is the ``crime-and-punishment`` dataset script. 2) The ``dl_manager.download_and_extract()`` is given a **dictionary of paths** of type `str` as its argument. In this case the file `dummy_data.zip` should unzip to the following structure: ``os.path.join("dummy_data", <value_of_dict>.split('/')[-1], <additional-paths-as-defined-in-split-generations>)`` *e.g.* for ``squad``, the unzipped ``dummy_data.zip`` has the following dir structure ``dummy_data/dev-v1.1.json``, etc... **Note** if ``<value_of_dict>`` is a zipped file then the dummy data folder structure should contain the exact name of the zipped file and the following extracted folder structure. The file `dummy_data.zip` should **never** itself contain a zipped file since the dummy data is not unzipped by the ``MockDownloadManager`` during testing. *E.g.* check the dummy folder structure of ``hansards`` where the folders have to be named ``*.tar`` or the structure of ``wiki_split`` where the folders have to be named ``*.zip``. 3) The ``dl_manager.download_and_extract()`` is given a **dictionary of lists of paths** of type `str` as its argument. This is a very special case and has been seen only for the dataset ``ensli``. In this case the values are simply flattened and the dummy folder structure is the same as in 2).
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_load.py
import importlib import os import pickle import shutil import tempfile import time from hashlib import sha256 from multiprocessing import Pool from pathlib import Path from unittest import TestCase from unittest.mock import patch import dill import pyarrow as pa import pytest import requests import datasets from datasets import config, load_dataset, load_from_disk from datasets.arrow_dataset import Dataset from datasets.arrow_writer import ArrowWriter from datasets.builder import DatasetBuilder from datasets.config import METADATA_CONFIGS_FIELD from datasets.data_files import DataFilesDict from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.download.download_config import DownloadConfig from datasets.exceptions import DatasetNotFoundError from datasets.features import Features, Value from datasets.iterable_dataset import IterableDataset from datasets.load import ( CachedDatasetModuleFactory, CachedMetricModuleFactory, GithubMetricModuleFactory, HubDatasetModuleFactoryWithoutScript, HubDatasetModuleFactoryWithScript, LocalDatasetModuleFactoryWithoutScript, LocalDatasetModuleFactoryWithScript, LocalMetricModuleFactory, PackagedDatasetModuleFactory, infer_module_for_data_files_list, infer_module_for_data_files_list_in_archives, load_dataset_builder, resolve_trust_remote_code, ) from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig from datasets.utils.logging import INFO, get_logger from .utils import ( OfflineSimulationMode, assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, offline, require_pil, require_sndfile, set_current_working_directory_to_temp_dir, ) DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import os import datasets from datasets import DatasetInfo, Features, Split, SplitGenerator, Value class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self) -> DatasetInfo: return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}), SplitGenerator(Split.TEST, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "test.txt")}), ] def _generate_examples(self, filepath, **kwargs): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, {"text": line.strip()} """ SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script SAMPLE_DATASET_IDENTIFIER2 = "hf-internal-testing/dataset_with_data_files" # only has data files SAMPLE_DATASET_IDENTIFIER3 = "hf-internal-testing/multi_dir_dataset" # has multiple data directories SAMPLE_DATASET_IDENTIFIER4 = "hf-internal-testing/imagefolder_with_metadata" # imagefolder with a metadata file outside of the train/test directories SAMPLE_DATASET_IDENTIFIER5 = "hf-internal-testing/imagefolder_with_metadata_no_splits" # imagefolder with a metadata file and no default split names in data files SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER = "hf-internal-testing/_dummy" SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST = "_dummy" SAMPLE_DATASET_NO_CONFIGS_IN_METADATA = "hf-internal-testing/audiofolder_no_configs_in_metadata" SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT = ( "hf-internal-testing/audiofolder_two_configs_in_metadata_with_default" ) METRIC_LOADING_SCRIPT_NAME = "__dummy_metric1__" METRIC_LOADING_SCRIPT_CODE = """ import datasets from datasets import MetricInfo, Features, Value class __DummyMetric1__(datasets.Metric): def _info(self): return MetricInfo(features=Features({"predictions": Value("int"), "references": Value("int")})) def _compute(self, predictions, references): return {"__dummy_metric1__": sum(int(p == r) for p, r in zip(predictions, references))} """ @pytest.fixture def data_dir(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() with open(data_dir / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir) @pytest.fixture def data_dir_with_arrow(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() output_train = os.path.join(data_dir, "train.arrow") with ArrowWriter(path=output_train) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 output_test = os.path.join(data_dir, "test.arrow") with ArrowWriter(path=output_test) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["bar"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 return str(data_dir) @pytest.fixture def data_dir_with_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_metadata" data_dir.mkdir() with open(data_dir / "train.jpg", "wb") as f: f.write(b"train_image_bytes") with open(data_dir / "test.jpg", "wb") as f: f.write(b"test_image_bytes") with open(data_dir / "metadata.jsonl", "w") as f: f.write( """\ {"file_name": "train.jpg", "caption": "Cool tran image"} {"file_name": "test.jpg", "caption": "Cool test image"} """ ) return str(data_dir) @pytest.fixture def data_dir_with_single_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_one_default_config_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom drop_labels: true --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_two_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: "v1" drop_labels: true default: true - config_name: "v2" drop_labels: false --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_data_dir_configs_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") @pytest.fixture def sub_data_dirs(tmp_path): data_dir2 = tmp_path / "data_dir2" relative_subdir1 = "subdir1" sub_data_dir1 = data_dir2 / relative_subdir1 sub_data_dir1.mkdir(parents=True) with open(sub_data_dir1 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir1 / "test.txt", "w") as f: f.write("bar\n" * 10) relative_subdir2 = "subdir2" sub_data_dir2 = tmp_path / data_dir2 / relative_subdir2 sub_data_dir2.mkdir(parents=True) with open(sub_data_dir2 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir2 / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir2), relative_subdir1 @pytest.fixture def complex_data_dir(tmp_path): data_dir = tmp_path / "complex_data_dir" data_dir.mkdir() (data_dir / "data").mkdir() with open(data_dir / "data" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "data" / "test.txt", "w") as f: f.write("bar\n" * 10) with open(data_dir / "README.md", "w") as f: f.write("This is a readme") with open(data_dir / ".dummy", "w") as f: f.write("this is a dummy file that is not a data file") return str(data_dir) @pytest.fixture def dataset_loading_script_dir(tmp_path): script_name = DATASET_LOADING_SCRIPT_NAME script_dir = tmp_path / script_name script_dir.mkdir() script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(DATASET_LOADING_SCRIPT_CODE) return str(script_dir) @pytest.fixture def dataset_loading_script_dir_readonly(tmp_path): script_name = DATASET_LOADING_SCRIPT_NAME script_dir = tmp_path / "readonly" / script_name script_dir.mkdir(parents=True) script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(DATASET_LOADING_SCRIPT_CODE) dataset_loading_script_dir = str(script_dir) # Make this directory readonly os.chmod(dataset_loading_script_dir, 0o555) os.chmod(os.path.join(dataset_loading_script_dir, f"{script_name}.py"), 0o555) return dataset_loading_script_dir @pytest.fixture def metric_loading_script_dir(tmp_path): script_name = METRIC_LOADING_SCRIPT_NAME script_dir = tmp_path / script_name script_dir.mkdir() script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(METRIC_LOADING_SCRIPT_CODE) return str(script_dir) @pytest.mark.parametrize( "data_files, expected_module, expected_builder_kwargs", [ (["train.csv"], "csv", {}), (["train.tsv"], "csv", {"sep": "\t"}), (["train.json"], "json", {}), (["train.jsonl"], "json", {}), (["train.parquet"], "parquet", {}), (["train.arrow"], "arrow", {}), (["train.txt"], "text", {}), (["uppercase.TXT"], "text", {}), (["unsupported.ext"], None, {}), ([""], None, {}), ], ) def test_infer_module_for_data_files(data_files, expected_module, expected_builder_kwargs): module, builder_kwargs = infer_module_for_data_files_list(data_files) assert module == expected_module assert builder_kwargs == expected_builder_kwargs @pytest.mark.parametrize( "data_file, expected_module", [ ("zip_csv_path", "csv"), ("zip_csv_with_dir_path", "csv"), ("zip_uppercase_csv_path", "csv"), ("zip_unsupported_ext_path", None), ], ) def test_infer_module_for_data_files_in_archives( data_file, expected_module, zip_csv_path, zip_csv_with_dir_path, zip_uppercase_csv_path, zip_unsupported_ext_path ): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "zip_uppercase_csv_path": zip_uppercase_csv_path, "zip_unsupported_ext_path": zip_unsupported_ext_path, } data_files = [str(data_file_paths[data_file])] inferred_module, _ = infer_module_for_data_files_list_in_archives(data_files) assert inferred_module == expected_module class ModuleFactoryTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures( self, jsonl_path, data_dir, data_dir_with_metadata, data_dir_with_single_config_in_metadata, data_dir_with_two_config_in_metadata, sub_data_dirs, dataset_loading_script_dir, metric_loading_script_dir, ): self._jsonl_path = jsonl_path self._data_dir = data_dir self._data_dir_with_metadata = data_dir_with_metadata self._data_dir_with_single_config_in_metadata = data_dir_with_single_config_in_metadata self._data_dir_with_two_config_in_metadata = data_dir_with_two_config_in_metadata self._data_dir2 = sub_data_dirs[0] self._sub_data_dir = sub_data_dirs[1] self._dataset_loading_script_dir = dataset_loading_script_dir self._metric_loading_script_dir = metric_loading_script_dir def setUp(self): self.hf_modules_cache = tempfile.mkdtemp() self.cache_dir = tempfile.mkdtemp() self.download_config = DownloadConfig(cache_dir=self.cache_dir) self.dynamic_modules_path = datasets.load.init_dynamic_modules( name="test_datasets_modules_" + os.path.basename(self.hf_modules_cache), hf_modules_cache=self.hf_modules_cache, ) def test_HubDatasetModuleFactoryWithScript_dont_trust_remote_code(self): # "squad" has a dataset script factory = HubDatasetModuleFactoryWithScript( "squad", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None): # this will be the default soon self.assertRaises(ValueError, factory.get_module) factory = HubDatasetModuleFactoryWithScript( "squad", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=False, ) self.assertRaises(ValueError, factory.get_module) def test_HubDatasetModuleFactoryWithScript_with_github_dataset(self): # "wmt_t2t" has additional imports (internal) factory = HubDatasetModuleFactoryWithScript( "wmt_t2t", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) def test_GithubMetricModuleFactory_with_internal_import(self): # "squad_v2" requires additional imports (internal) factory = GithubMetricModuleFactory( "squad_v2", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.filterwarnings("ignore:GithubMetricModuleFactory is deprecated:FutureWarning") def test_GithubMetricModuleFactory_with_external_import(self): # "bleu" requires additional imports (external from github) factory = GithubMetricModuleFactory( "bleu", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_LocalMetricModuleFactory(self): path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py") factory = LocalMetricModuleFactory( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_LocalDatasetModuleFactoryWithScript(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) def test_LocalDatasetModuleFactoryWithScript_dont_trust_remote_code(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None): # this will be the default soon self.assertRaises(ValueError, factory.get_module) factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=False, ) self.assertRaises(ValueError, factory.get_module) def test_LocalDatasetModuleFactoryWithoutScript(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) def test_LocalDatasetModuleFactoryWithoutScript_with_data_dir(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir2, data_dir=self._sub_data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert ( module_factory_result.builder_kwargs["data_files"] is not None and len(module_factory_result.builder_kwargs["data_files"]["train"]) == 1 and len(module_factory_result.builder_kwargs["data_files"]["test"]) == 1 ) assert all( self._sub_data_dir in Path(data_file).parts for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + module_factory_result.builder_kwargs["data_files"]["test"] ) def test_LocalDatasetModuleFactoryWithoutScript_with_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir_with_metadata) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert ( module_factory_result.builder_kwargs["data_files"] is not None and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 ) assert any( Path(data_file).name == "metadata.jsonl" for data_file in module_factory_result.builder_kwargs["data_files"]["train"] ) assert any( Path(data_file).name == "metadata.jsonl" for data_file in module_factory_result.builder_kwargs["data_files"]["test"] ) def test_LocalDatasetModuleFactoryWithoutScript_with_single_config_in_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript( self._data_dir_with_single_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], ImageFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert len(module_builder_configs[0].data_files) == 1 # one train split assert len(module_builder_configs[0].data_files["train"]) == 2 # two files assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name is None # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_LocalDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript( self._data_dir_with_two_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, ImageFolderConfig) assert isinstance(module_builder_config_v2, ImageFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["train"] assert len(module_builder_config_v1.data_files["train"]) == 2 assert sorted(module_builder_config_v2.data_files) == ["train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata assert ( module_factory_result.builder_configs_parameters.default_config_name == "v1" ) # it's marked as a default one in yaml # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_PackagedDatasetModuleFactory(self): factory = PackagedDatasetModuleFactory( "json", data_files=self._jsonl_path, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_PackagedDatasetModuleFactory_with_data_dir(self): factory = PackagedDatasetModuleFactory("json", data_dir=self._data_dir, download_config=self.download_config) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert ( module_factory_result.builder_kwargs["data_files"] is not None and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 ) assert Path(module_factory_result.builder_kwargs["data_files"]["train"][0]).parent.samefile(self._data_dir) assert Path(module_factory_result.builder_kwargs["data_files"]["test"][0]).parent.samefile(self._data_dir) def test_PackagedDatasetModuleFactory_with_data_dir_and_metadata(self): factory = PackagedDatasetModuleFactory( "imagefolder", data_dir=self._data_dir_with_metadata, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert ( module_factory_result.builder_kwargs["data_files"] is not None and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 ) assert Path(module_factory_result.builder_kwargs["data_files"]["train"][0]).parent.samefile( self._data_dir_with_metadata ) assert Path(module_factory_result.builder_kwargs["data_files"]["test"][0]).parent.samefile( self._data_dir_with_metadata ) assert any( Path(data_file).name == "metadata.jsonl" for data_file in module_factory_result.builder_kwargs["data_files"]["train"] ) assert any( Path(data_file).name == "metadata.jsonl" for data_file in module_factory_result.builder_kwargs["data_files"]["test"] ) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER2, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_data_dir(self): data_dir = "data2" factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER3, data_dir=data_dir, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( module_factory_result.builder_kwargs["data_files"] is not None and len(module_factory_result.builder_kwargs["data_files"]["train"]) == 1 and len(module_factory_result.builder_kwargs["data_files"]["test"]) == 1 ) assert all( data_dir in Path(data_file).parts for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + module_factory_result.builder_kwargs["data_files"]["test"] ) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_metadata(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER4, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( module_factory_result.builder_kwargs["data_files"] is not None and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 ) assert any( Path(data_file).name == "metadata.jsonl" for data_file in module_factory_result.builder_kwargs["data_files"]["train"] ) assert any( Path(data_file).name == "metadata.jsonl" for data_file in module_factory_result.builder_kwargs["data_files"]["test"] ) factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER5, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( module_factory_result.builder_kwargs["data_files"] is not None and len(module_factory_result.builder_kwargs["data_files"]) == 1 and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 ) assert any( Path(data_file).name == "metadata.jsonl" for data_file in module_factory_result.builder_kwargs["data_files"]["train"] ) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_one_default_config_in_metadata(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, download_config=self.download_config, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], AudioFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert sorted(module_builder_configs[0].data_files) == ["test", "train"] assert len(module_builder_configs[0].data_files["train"]) == 3 assert len(module_builder_configs[0].data_files["test"]) == 3 assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name is None # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): datasets_names = [SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT] for dataset_name in datasets_names: factory = HubDatasetModuleFactoryWithoutScript(dataset_name, download_config=self.download_config) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, AudioFolderConfig) assert isinstance(module_builder_config_v2, AudioFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["test", "train"] assert len(module_builder_config_v1.data_files["train"]) == 3 assert len(module_builder_config_v1.data_files["test"]) == 3 assert sorted(module_builder_config_v2.data_files) == ["test", "train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert len(module_builder_config_v2.data_files["test"]) == 1 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs if dataset_name == SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT: assert module_factory_result.builder_configs_parameters.default_config_name == "v1" else: assert module_factory_result.builder_configs_parameters.default_config_name is None @pytest.mark.integration def test_HubDatasetModuleFactoryWithScript(self): factory = HubDatasetModuleFactoryWithScript( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) def test_CachedDatasetModuleFactory(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedDatasetModuleFactory( DATASET_LOADING_SCRIPT_NAME, dynamic_modules_path=self.dynamic_modules_path, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.filterwarnings("ignore:LocalMetricModuleFactory is deprecated:FutureWarning") @pytest.mark.filterwarnings("ignore:CachedMetricModuleFactory is deprecated:FutureWarning") def test_CachedMetricModuleFactory(self): path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py") factory = LocalMetricModuleFactory( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedMetricModuleFactory( METRIC_LOADING_SCRIPT_NAME, dynamic_modules_path=self.dynamic_modules_path, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.parametrize( "factory_class", [ CachedDatasetModuleFactory, CachedMetricModuleFactory, GithubMetricModuleFactory, HubDatasetModuleFactoryWithoutScript, HubDatasetModuleFactoryWithScript, LocalDatasetModuleFactoryWithoutScript, LocalDatasetModuleFactoryWithScript, LocalMetricModuleFactory, PackagedDatasetModuleFactory, ], ) def test_module_factories(factory_class): name = "dummy_name" factory = factory_class(name) assert factory.name == name @pytest.mark.integration class LoadTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): self._caplog = caplog def setUp(self): self.hf_modules_cache = tempfile.mkdtemp() self.dynamic_modules_path = datasets.load.init_dynamic_modules( name="test_datasets_modules2", hf_modules_cache=self.hf_modules_cache ) def tearDown(self): shutil.rmtree(self.hf_modules_cache) def _dummy_module_dir(self, modules_dir, dummy_module_name, dummy_code): assert dummy_module_name.startswith("__") module_dir = os.path.join(modules_dir, dummy_module_name) os.makedirs(module_dir, exist_ok=True) module_path = os.path.join(module_dir, dummy_module_name + ".py") with open(module_path, "w") as f: f.write(dummy_code) return module_dir def test_dataset_module_factory(self): with tempfile.TemporaryDirectory() as tmp_dir: # prepare module from directory path dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) dataset_module = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path ) dummy_module = importlib.import_module(dataset_module.module_path) self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "hello there") self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) # prepare module from file path + check resolved_file_path dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) module_path = os.path.join(module_dir, "__dummy_module_name1__.py") dataset_module = datasets.load.dataset_module_factory( module_path, dynamic_modules_path=self.dynamic_modules_path ) dummy_module = importlib.import_module(dataset_module.module_path) self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "general kenobi") self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) # missing module for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises( (DatasetNotFoundError, ConnectionError, requests.exceptions.ConnectionError) ): datasets.load.dataset_module_factory( "__missing_dummy_module_name__", dynamic_modules_path=self.dynamic_modules_path ) def test_offline_dataset_module_factory(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) dataset_module_1 = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path ) time.sleep(0.1) # make sure there's a difference in the OS update time of the python file dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) dataset_module_2 = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the module name without an explicit path to remote or local actual file dataset_module_3 = datasets.load.dataset_module_factory( "__dummy_module_name2__", dynamic_modules_path=self.dynamic_modules_path ) # it loads the most recent version of the module self.assertEqual(dataset_module_2.module_path, dataset_module_3.module_path) self.assertNotEqual(dataset_module_1.module_path, dataset_module_3.module_path) self.assertIn("Using the latest cached version of the module", self._caplog.text) def test_load_dataset_from_hub(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("_dummy") self.assertIn( "Dataset '_dummy' doesn't exist on the Hub", str(context.exception), ) with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("_dummy", revision="0.0.0") self.assertIn( "Dataset '_dummy' doesn't exist on the Hub", str(context.exception), ) self.assertIn( "at revision '0.0.0'", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("_dummy") if offline_simulation_mode != OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: self.assertIn( "Couldn't reach '_dummy' on the Hub", str(context.exception), ) def test_load_dataset_namespace(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn( "hf-internal-testing/_dummy", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn("hf-internal-testing/_dummy", str(context.exception), msg=offline_simulation_mode) @pytest.mark.integration def test_load_dataset_builder_with_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) assert isinstance(builder, ImageFolder) assert builder.config.name == "default" assert builder.config.data_files is not None assert builder.config.drop_metadata is None builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, "non-existing-config") assert isinstance(builder, ImageFolder) assert builder.config.name == "non-existing-config" @pytest.mark.integration def test_load_dataset_builder_config_kwargs_passed_as_arguments(): builder_default = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) builder_custom = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert builder_custom.config.drop_metadata != builder_default.config.drop_metadata assert builder_custom.config.drop_metadata is True @pytest.mark.integration def test_load_dataset_builder_with_two_configs_in_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert isinstance(builder, AudioFolder) assert builder.config.name == "v1" assert builder.config.data_files is not None with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "non-existing-config") @pytest.mark.parametrize("serializer", [pickle, dill]) def test_load_dataset_builder_with_metadata_configs_pickable(serializer): builder = datasets.load_dataset_builder(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) builder_unpickled = serializer.loads(serializer.dumps(builder)) assert builder.BUILDER_CONFIGS == builder_unpickled.BUILDER_CONFIGS assert list(builder_unpickled.builder_configs) == ["custom"] assert isinstance(builder_unpickled.builder_configs["custom"], AudioFolderConfig) builder2 = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") builder2_unpickled = serializer.loads(serializer.dumps(builder2)) assert builder2.BUILDER_CONFIGS == builder2_unpickled.BUILDER_CONFIGS != builder_unpickled.BUILDER_CONFIGS assert list(builder2_unpickled.builder_configs) == ["v1", "v2"] assert isinstance(builder2_unpickled.builder_configs["v1"], AudioFolderConfig) assert isinstance(builder2_unpickled.builder_configs["v2"], AudioFolderConfig) def test_load_dataset_builder_for_absolute_script_dir(dataset_loading_script_dir, data_dir): builder = datasets.load_dataset_builder(dataset_loading_script_dir, data_dir=data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_relative_script_dir(dataset_loading_script_dir, data_dir): with set_current_working_directory_to_temp_dir(): relative_script_dir = DATASET_LOADING_SCRIPT_NAME shutil.copytree(dataset_loading_script_dir, relative_script_dir) builder = datasets.load_dataset_builder(relative_script_dir, data_dir=data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_script_path(dataset_loading_script_dir, data_dir): builder = datasets.load_dataset_builder( os.path.join(dataset_loading_script_dir, DATASET_LOADING_SCRIPT_NAME + ".py"), data_dir=data_dir ) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_absolute_data_dir(complex_data_dir): builder = datasets.load_dataset_builder(complex_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == Path(complex_data_dir).name assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_for_relative_data_dir(complex_data_dir): with set_current_working_directory_to_temp_dir(): relative_data_dir = "relative_data_dir" shutil.copytree(complex_data_dir, relative_data_dir) builder = datasets.load_dataset_builder(relative_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == relative_data_dir assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_with_script(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER) assert isinstance(builder, DatasetBuilder) assert builder.name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.config.name == "default" assert builder.info.features == Features({"text": Value("string")}) namespace = SAMPLE_DATASET_IDENTIFIER[: SAMPLE_DATASET_IDENTIFIER.index("/")] assert builder._relative_data_dir().startswith(namespace) assert SAMPLE_DATASET_IDENTIFIER.replace("/", "--") in builder.__module__ @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_without_script(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER2) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER2.split("/")[-1] assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_fail(): with pytest.raises(DatasetNotFoundError): datasets.load_dataset_builder("blabla") @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_load_dataset_local(dataset_loading_script_dir, data_dir, keep_in_memory, caplog): with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=keep_in_memory) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): caplog.clear() # Load dataset from cache dataset = datasets.load_dataset(DATASET_LOADING_SCRIPT_NAME, data_dir=data_dir) assert len(dataset) == 2 assert "Using the latest cached version of the module" in caplog.text with pytest.raises(DatasetNotFoundError) as exc_info: datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value) def test_load_dataset_streaming(dataset_loading_script_dir, data_dir): dataset = load_dataset(dataset_loading_script_dir, streaming=True, data_dir=data_dir) assert isinstance(dataset, IterableDatasetDict) assert all(isinstance(d, IterableDataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) def test_load_dataset_streaming_gz_json(jsonl_gz_path): data_files = jsonl_gz_path ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.integration @pytest.mark.parametrize( "path", ["sample.jsonl", "sample.jsonl.gz", "sample.tar", "sample.jsonl.xz", "sample.zip", "sample.jsonl.zst"] ) def test_load_dataset_streaming_compressed_files(path): repo_id = "hf-internal-testing/compressed_files" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path}" if data_files[-3:] in ("zip", "tar"): # we need to glob "*" inside archives data_files = data_files[-3:] + "://*::" + data_files return # TODO(QL, albert): support re-add support for ZIP and TAR archives streaming ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == { "tokens": ["Ministeri", "de", "Justícia", "d'Espanya"], "ner_tags": [1, 2, 2, 2], "langs": ["ca", "ca", "ca", "ca"], "spans": ["PER: Ministeri de Justícia d'Espanya"], } @pytest.mark.parametrize("path_extension", ["csv", "csv.bz2"]) @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_streaming_csv(path_extension, streaming, csv_path, bz2_csv_path): paths = {"csv": csv_path, "csv.bz2": bz2_csv_path} data_files = str(paths[path_extension]) features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_csv_path", "zip_csv_with_dir_path", "csv_path"]) def test_load_dataset_zip_csv(data_file, streaming, zip_csv_path, zip_csv_with_dir_path, csv_path): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "csv_path": csv_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_jsonl_path", "zip_jsonl_with_dir_path", "jsonl_path"]) def test_load_dataset_zip_jsonl(data_file, streaming, zip_jsonl_path, zip_jsonl_with_dir_path, jsonl_path): data_file_paths = { "zip_jsonl_path": zip_jsonl_path, "zip_jsonl_with_dir_path": zip_jsonl_with_dir_path, "jsonl_path": jsonl_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_text_path", "zip_text_with_dir_path", "text_path"]) def test_load_dataset_zip_text(data_file, streaming, zip_text_path, zip_text_with_dir_path, text_path): data_file_paths = { "zip_text_path": zip_text_path, "zip_text_with_dir_path": zip_text_with_dir_path, "text_path": text_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 ds = load_dataset("text", split="train", data_files=data_files, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"text": "0"} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"text": "0"} @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_arrow(streaming, data_dir_with_arrow): ds = load_dataset("arrow", split="train", data_dir=data_dir_with_arrow, streaming=streaming) expected_size = 10 if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "foo"} ds_item_counter += 1 assert ds_item_counter == 10 else: assert ds.num_rows == 10 assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "foo"} def test_load_dataset_text_with_unicode_new_lines(text_path_with_unicode_new_lines): data_files = str(text_path_with_unicode_new_lines) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 3 def test_load_dataset_with_unsupported_extensions(text_dir_with_unsupported_extension): data_files = str(text_dir_with_unsupported_extension) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 4 @pytest.mark.integration def test_loading_from_the_datasets_hub(): with tempfile.TemporaryDirectory() as tmp_dir: dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=tmp_dir) assert len(dataset["train"]) == 2 assert len(dataset["validation"]) == 3 del dataset @pytest.mark.integration def test_loading_from_the_datasets_hub_with_token(): true_request = requests.Session().request def assert_auth(method, url, *args, headers, **kwargs): assert headers["authorization"] == "Bearer foo" return true_request(method, url, *args, headers=headers, **kwargs) with patch("requests.Session.request") as mock_request: mock_request.side_effect = assert_auth with tempfile.TemporaryDirectory() as tmp_dir: with offline(): with pytest.raises((ConnectionError, requests.exceptions.ConnectionError)): load_dataset(SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER, cache_dir=tmp_dir, token="foo") mock_request.assert_called() @pytest.mark.integration def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data): ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_builder_private_dataset(hf_token, hf_private_dataset_repo_txt_data): builder = load_dataset_builder(hf_private_dataset_repo_txt_data, token=hf_token) assert isinstance(builder, DatasetBuilder) @pytest.mark.integration def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data): ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_config_kwargs_passed_as_arguments(): ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4) ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert list(ds_default["train"].features) == ["image", "caption"] assert list(ds_custom["train"].features) == ["image"] @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_single_config_in_metadata(): # load the same dataset but with no configurations (=with default parameters) ds = load_dataset(SAMPLE_DATASET_NO_CONFIGS_IN_METADATA) assert list(ds["train"].features) == ["audio", "label"] # assert label feature is here as expected by default assert len(ds["train"]) == 5 and len(ds["test"]) == 4 ds2 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) # single config -> no need to specify it assert list(ds2["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds2["train"]) == 3 and len(ds2["test"]) == 3 ds3 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "custom") assert list(ds3["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds3["train"]) == 3 and len(ds3["test"]) == 3 with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "default") @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_two_config_in_metadata(): ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert list(ds["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds2 = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2") assert list(ds2["train"].features) == [ "audio", "label", ] # assert param `drop_labels=False` from metadata is passed assert len(ds2["train"]) == 2 and len(ds2["test"]) == 1 with pytest.raises(ValueError): # config is required but not specified _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "default") ds_with_default = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT) # it's a dataset with the same data but "v1" config is marked as a default one assert list(ds_with_default["train"].features) == list(ds["train"].features) assert len(ds_with_default["train"]) == len(ds["train"]) and len(ds_with_default["test"]) == len(ds["test"]) @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_metadata_config_in_parallel(): # assert it doesn't fail (pickling of dynamically created class works) ds = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1", num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2", num_proc=2) assert "label" in ds["train"].features assert len(ds["train"]) == 2 and len(ds["test"]) == 1 @require_pil @pytest.mark.integration @pytest.mark.parametrize("streaming", [True]) def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming): ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=hf_token) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_items = list(ds) assert len(ds_items) == 2 def test_load_dataset_then_move_then_reload(dataset_loading_script_dir, data_dir, tmp_path, caplog): cache_dir1 = tmp_path / "cache1" cache_dir2 = tmp_path / "cache2" dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1) fingerprint1 = dataset._fingerprint del dataset os.rename(cache_dir1, cache_dir2) caplog.clear() with caplog.at_level(INFO, logger=get_logger().name): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir2) assert "Found cached dataset" in caplog.text assert dataset._fingerprint == fingerprint1, "for the caching mechanism to work, fingerprint should stay the same" dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="test", cache_dir=cache_dir2) assert dataset._fingerprint != fingerprint1 def test_load_dataset_readonly(dataset_loading_script_dir, dataset_loading_script_dir_readonly, data_dir, tmp_path): cache_dir1 = tmp_path / "cache1" cache_dir2 = tmp_path / "cache2" dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1) fingerprint1 = dataset._fingerprint del dataset # Load readonly dataset and check that the fingerprint is the same. dataset = load_dataset(dataset_loading_script_dir_readonly, data_dir=data_dir, split="train", cache_dir=cache_dir2) assert dataset._fingerprint == fingerprint1, "Cannot load a dataset in a readonly folder." @pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 50, 500]) def test_load_dataset_local_with_default_in_memory( max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, monkeypatch ): current_dataset_size = 148 if max_in_memory_dataset_size == "default": max_in_memory_dataset_size = 0 # default else: monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) if max_in_memory_dataset_size: expected_in_memory = current_dataset_size < max_in_memory_dataset_size else: expected_in_memory = False with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir) assert (dataset["train"].dataset_size < max_in_memory_dataset_size) is expected_in_memory @pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 100, 1000]) def test_load_from_disk_with_default_in_memory( max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, tmp_path, monkeypatch ): current_dataset_size = 512 # arrow file size = 512, in-memory dataset size = 148 if max_in_memory_dataset_size == "default": max_in_memory_dataset_size = 0 # default else: monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) if max_in_memory_dataset_size: expected_in_memory = current_dataset_size < max_in_memory_dataset_size else: expected_in_memory = False dset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=True) dataset_path = os.path.join(tmp_path, "saved_dataset") dset.save_to_disk(dataset_path) with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): _ = load_from_disk(dataset_path) @pytest.mark.integration def test_remote_data_files(): repo_id = "hf-internal-testing/raw_jsonl" filename = "wikiann-bn-validation.jsonl" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{filename}" ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item.keys() == {"langs", "ner_tags", "spans", "tokens"} @pytest.mark.parametrize("deleted", [False, True]) def test_load_dataset_deletes_extracted_files(deleted, jsonl_gz_path, tmp_path): data_files = jsonl_gz_path cache_dir = tmp_path / "cache" if deleted: download_config = DownloadConfig(delete_extracted=True, cache_dir=cache_dir / "downloads") ds = load_dataset( "json", split="train", data_files=data_files, cache_dir=cache_dir, download_config=download_config ) else: # default ds = load_dataset("json", split="train", data_files=data_files, cache_dir=cache_dir) assert ds[0] == {"col_1": "0", "col_2": 0, "col_3": 0.0} assert ( [path for path in (cache_dir / "downloads" / "extracted").iterdir() if path.suffix != ".lock"] == [] ) is deleted def distributed_load_dataset(args): data_name, tmp_dir, datafiles = args dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles) return dataset def test_load_dataset_distributed(tmp_path, csv_path): num_workers = 5 args = "csv", str(tmp_path), csv_path with Pool(processes=num_workers) as pool: # start num_workers processes datasets = pool.map(distributed_load_dataset, [args] * num_workers) assert len(datasets) == num_workers assert all(len(dataset) == len(datasets[0]) > 0 for dataset in datasets) assert len(datasets[0].cache_files) > 0 assert all(dataset.cache_files == datasets[0].cache_files for dataset in datasets) def test_load_dataset_with_storage_options(mockfs): with mockfs.open("data.txt", "w") as f: f.write("Hello there\n") f.write("General Kenobi !") data_files = {"train": ["mock://data.txt"]} ds = load_dataset("text", data_files=data_files, storage_options=mockfs.storage_options) assert list(ds["train"]) == [{"text": "Hello there"}, {"text": "General Kenobi !"}] @require_pil def test_load_dataset_with_storage_options_with_decoding(mockfs, image_file): import PIL.Image filename = os.path.basename(image_file) with mockfs.open(filename, "wb") as fout: with open(image_file, "rb") as fin: fout.write(fin.read()) data_files = {"train": ["mock://" + filename]} ds = load_dataset("imagefolder", data_files=data_files, storage_options=mockfs.storage_options) assert len(ds["train"]) == 1 assert isinstance(ds["train"][0]["image"], PIL.Image.Image) def test_load_dataset_without_script_with_zip(zip_csv_path): path = str(zip_csv_path.parent) ds = load_dataset(path) assert list(ds.keys()) == ["train"] assert ds["train"].column_names == ["col_1", "col_2", "col_3"] assert ds["train"].num_rows == 8 assert ds["train"][0] == {"col_1": 0, "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("trust_remote_code, expected", [(False, False), (True, True), (None, True)]) def test_resolve_trust_remote_code(trust_remote_code, expected): assert resolve_trust_remote_code(trust_remote_code, repo_id="dummy") is expected @pytest.mark.parametrize("trust_remote_code, expected", [(False, False), (True, True), (None, ValueError)]) def test_resolve_trust_remote_code_future(trust_remote_code, expected): with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None): # this will be the default soon if isinstance(expected, bool): resolve_trust_remote_code(trust_remote_code, repo_id="dummy") is expected else: with pytest.raises(expected): resolve_trust_remote_code(trust_remote_code, repo_id="dummy")
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_fingerprint.py
import json import os import pickle import subprocess from functools import partial from pathlib import Path from tempfile import gettempdir from textwrap import dedent from types import FunctionType from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from multiprocess import Pool import datasets from datasets import config from datasets.fingerprint import Hasher, fingerprint_transform from datasets.table import InMemoryTable from .utils import ( require_not_windows, require_regex, require_spacy, require_spacy_model, require_tiktoken, require_torch, require_transformers, ) class Foo: def __init__(self, foo): self.foo = foo def __call__(self): return self.foo class DatasetChild(datasets.Dataset): @fingerprint_transform(inplace=False) def func1(self, new_fingerprint, *args, **kwargs): return DatasetChild(self.data, fingerprint=new_fingerprint) @fingerprint_transform(inplace=False) def func2(self, new_fingerprint, *args, **kwargs): return DatasetChild(self.data, fingerprint=new_fingerprint) class UnpicklableCallable: def __init__(self, callable): self.callable = callable def __call__(self, *args, **kwargs): if self.callable is not None: return self.callable(*args, **kwargs) def __getstate__(self): raise pickle.PicklingError() if config.TORCH_AVAILABLE: import torch import torch.nn as nn import torch.nn.functional as F class TorchModule(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x)) else: TorchModule = None class TokenizersHashTest(TestCase): @require_transformers @pytest.mark.integration def test_hash_tokenizer(self): from transformers import AutoTokenizer def encode(x): return tokenizer(x) # TODO: add hash consistency tests across sessions tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") hash1 = Hasher.hash(tokenizer) hash1_lambda = Hasher.hash(lambda x: tokenizer(x)) hash1_encode = Hasher.hash(encode) tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") hash2 = Hasher.hash(tokenizer) hash2_lambda = Hasher.hash(lambda x: tokenizer(x)) hash2_encode = Hasher.hash(encode) tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") hash3 = Hasher.hash(tokenizer) hash3_lambda = Hasher.hash(lambda x: tokenizer(x)) hash3_encode = Hasher.hash(encode) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) self.assertEqual(hash1_lambda, hash3_lambda) self.assertNotEqual(hash1_lambda, hash2_lambda) self.assertEqual(hash1_encode, hash3_encode) self.assertNotEqual(hash1_encode, hash2_encode) @require_transformers @pytest.mark.integration def test_hash_tokenizer_with_cache(self): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("gpt2") hash1 = Hasher.hash(tokenizer) tokenizer("Hello world !") # call once to change the tokenizer's cache hash2 = Hasher.hash(tokenizer) self.assertEqual(hash1, hash2) @require_regex def test_hash_regex(self): import regex pat = regex.Regex("foo") hash1 = Hasher.hash(pat) pat = regex.Regex("bar") hash2 = Hasher.hash(pat) pat = regex.Regex("foo") hash3 = Hasher.hash(pat) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) class RecurseHashTest(TestCase): def test_recurse_hash_for_function(self): def func(): return foo foo = [0] hash1 = Hasher.hash(func) foo = [1] hash2 = Hasher.hash(func) foo = [0] hash3 = Hasher.hash(func) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) def test_hash_ignores_line_definition_of_function(self): def func(): pass hash1 = Hasher.hash(func) def func(): pass hash2 = Hasher.hash(func) self.assertEqual(hash1, hash2) def test_recurse_hash_for_class(self): hash1 = Hasher.hash(Foo([0])) hash2 = Hasher.hash(Foo([1])) hash3 = Hasher.hash(Foo([0])) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) def test_recurse_hash_for_method(self): hash1 = Hasher.hash(Foo([0]).__call__) hash2 = Hasher.hash(Foo([1]).__call__) hash3 = Hasher.hash(Foo([0]).__call__) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) def test_hash_ipython_function(self): def create_ipython_func(co_filename, returned_obj): def func(): return returned_obj code = func.__code__ # Use _create_code from dill in order to make it work for different python versions code = code.replace(co_filename=co_filename) return FunctionType(code, func.__globals__, func.__name__, func.__defaults__, func.__closure__) co_filename, returned_obj = "<ipython-input-2-e0383a102aae>", [0] hash1 = Hasher.hash(create_ipython_func(co_filename, returned_obj)) co_filename, returned_obj = "<ipython-input-2-e0383a102aae>", [1] hash2 = Hasher.hash(create_ipython_func(co_filename, returned_obj)) co_filename, returned_obj = "<ipython-input-5-713f6613acf3>", [0] hash3 = Hasher.hash(create_ipython_func(co_filename, returned_obj)) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) co_filename, returned_obj = os.path.join(gettempdir(), "ipykernel_12345", "321456789.py"), [0] hash4 = Hasher.hash(create_ipython_func(co_filename, returned_obj)) co_filename, returned_obj = os.path.join(gettempdir(), "ipykernel_12345", "321456789.py"), [1] hash5 = Hasher.hash(create_ipython_func(co_filename, returned_obj)) co_filename, returned_obj = os.path.join(gettempdir(), "ipykernel_12345", "654123987.py"), [0] hash6 = Hasher.hash(create_ipython_func(co_filename, returned_obj)) self.assertEqual(hash4, hash6) self.assertNotEqual(hash4, hash5) def test_recurse_hash_for_function_with_shuffled_globals(self): foo, bar = [0], [1] def func(): return foo, bar func.__module__ = "__main__" def globalvars_mock1_side_effect(func, *args, **kwargs): return {"foo": foo, "bar": bar} def globalvars_mock2_side_effect(func, *args, **kwargs): return {"bar": bar, "foo": foo} with patch("dill.detect.globalvars", side_effect=globalvars_mock1_side_effect) as globalvars_mock1: hash1 = Hasher.hash(func) self.assertGreater(globalvars_mock1.call_count, 0) with patch("dill.detect.globalvars", side_effect=globalvars_mock2_side_effect) as globalvars_mock2: hash2 = Hasher.hash(func) self.assertGreater(globalvars_mock2.call_count, 0) self.assertEqual(hash1, hash2) class HashingTest(TestCase): def test_hash_simple(self): hash1 = Hasher.hash("hello") hash2 = Hasher.hash("hello") hash3 = Hasher.hash("there") self.assertEqual(hash1, hash2) self.assertNotEqual(hash1, hash3) def test_hash_class_instance(self): hash1 = Hasher.hash(Foo("hello")) hash2 = Hasher.hash(Foo("hello")) hash3 = Hasher.hash(Foo("there")) self.assertEqual(hash1, hash2) self.assertNotEqual(hash1, hash3) def test_hash_update(self): hasher = Hasher() for x in ["hello", Foo("hello")]: hasher.update(x) hash1 = hasher.hexdigest() hasher = Hasher() for x in ["hello", Foo("hello")]: hasher.update(x) hash2 = hasher.hexdigest() hasher = Hasher() for x in ["there", Foo("there")]: hasher.update(x) hash3 = hasher.hexdigest() self.assertEqual(hash1, hash2) self.assertNotEqual(hash1, hash3) def test_hash_unpicklable(self): with self.assertRaises(pickle.PicklingError): Hasher.hash(UnpicklableCallable(Foo("hello"))) def test_hash_same_strings(self): string = "abc" obj1 = [string, string] # two strings have the same ids obj2 = [string, string] obj3 = json.loads(f'["{string}", "{string}"]') # two strings have different ids self.assertIs(obj1[0], string) self.assertIs(obj1[0], obj1[1]) self.assertIs(obj2[0], string) self.assertIs(obj2[0], obj2[1]) self.assertIsNot(obj3[0], string) self.assertIsNot(obj3[0], obj3[1]) hash1 = Hasher.hash(obj1) hash2 = Hasher.hash(obj2) hash3 = Hasher.hash(obj3) self.assertEqual(hash1, hash2) self.assertEqual(hash1, hash3) def test_set_stable(self): rng = np.random.default_rng(42) set_ = {rng.random() for _ in range(10_000)} expected_hash = Hasher.hash(set_) assert expected_hash == Pool(1).apply_async(partial(Hasher.hash, set(set_))).get() def test_set_doesnt_depend_on_order(self): set_ = set("abc") hash1 = Hasher.hash(set_) set_ = set("def") hash2 = Hasher.hash(set_) set_ = set("cba") hash3 = Hasher.hash(set_) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) @require_tiktoken def test_hash_tiktoken_encoding(self): import tiktoken enc = tiktoken.get_encoding("gpt2") hash1 = Hasher.hash(enc) enc = tiktoken.get_encoding("r50k_base") hash2 = Hasher.hash(enc) enc = tiktoken.get_encoding("gpt2") hash3 = Hasher.hash(enc) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) @require_torch def test_hash_torch_tensor(self): import torch t = torch.tensor([1.0]) hash1 = Hasher.hash(t) t = torch.tensor([2.0]) hash2 = Hasher.hash(t) t = torch.tensor([1.0]) hash3 = Hasher.hash(t) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) @require_spacy @require_spacy_model("en_core_web_sm") @require_spacy_model("fr_core_news_sm") @pytest.mark.integration def test_hash_spacy_model(self): import spacy nlp = spacy.load("en_core_web_sm") hash1 = Hasher.hash(nlp) nlp = spacy.load("fr_core_news_sm") hash2 = Hasher.hash(nlp) nlp = spacy.load("en_core_web_sm") hash3 = Hasher.hash(nlp) self.assertEqual(hash1, hash3) self.assertNotEqual(hash1, hash2) @require_not_windows @require_torch def test_hash_torch_compiled_function(self): import torch def f(x): return torch.sin(x) + torch.cos(x) hash1 = Hasher.hash(f) f = torch.compile(f) hash2 = Hasher.hash(f) self.assertEqual(hash1, hash2) @require_not_windows @require_torch def test_hash_torch_compiled_module(self): m = TorchModule() next(iter(m.parameters())).data.fill_(1.0) hash1 = Hasher.hash(m) m = torch.compile(m) hash2 = Hasher.hash(m) m = TorchModule() next(iter(m.parameters())).data.fill_(2.0) m = torch.compile(m) hash3 = Hasher.hash(m) self.assertEqual(hash1, hash2) self.assertNotEqual(hash1, hash3) self.assertNotEqual(hash2, hash3) @pytest.mark.integration def test_move_script_doesnt_change_hash(tmp_path: Path): dir1 = tmp_path / "dir1" dir2 = tmp_path / "dir2" dir1.mkdir() dir2.mkdir() script_filename = "script.py" code = dedent( """ from datasets.fingerprint import Hasher def foo(): pass print(Hasher.hash(foo)) """ ) script_path1 = dir1 / script_filename script_path2 = dir2 / script_filename with script_path1.open("w") as f: f.write(code) with script_path2.open("w") as f: f.write(code) fingerprint1 = subprocess.check_output(["python", str(script_path1)]) fingerprint2 = subprocess.check_output(["python", str(script_path2)]) assert fingerprint1 == fingerprint2 def test_fingerprint_in_multiprocessing(): data = {"a": [0, 1, 2]} dataset = DatasetChild(InMemoryTable.from_pydict(data)) expected_fingerprint = dataset.func1()._fingerprint assert expected_fingerprint == dataset.func1()._fingerprint assert expected_fingerprint != dataset.func2()._fingerprint with Pool(2) as p: assert expected_fingerprint == p.apply_async(dataset.func1).get()._fingerprint assert expected_fingerprint != p.apply_async(dataset.func2).get()._fingerprint def test_fingerprint_when_transform_version_changes(): data = {"a": [0, 1, 2]} class DummyDatasetChild(datasets.Dataset): @fingerprint_transform(inplace=False) def func(self, new_fingerprint): return DummyDatasetChild(self.data, fingerprint=new_fingerprint) fingeprint_no_version = DummyDatasetChild(InMemoryTable.from_pydict(data)).func() class DummyDatasetChild(datasets.Dataset): @fingerprint_transform(inplace=False, version="1.0.0") def func(self, new_fingerprint): return DummyDatasetChild(self.data, fingerprint=new_fingerprint) fingeprint_1 = DummyDatasetChild(InMemoryTable.from_pydict(data)).func() class DummyDatasetChild(datasets.Dataset): @fingerprint_transform(inplace=False, version="2.0.0") def func(self, new_fingerprint): return DummyDatasetChild(self.data, fingerprint=new_fingerprint) fingeprint_2 = DummyDatasetChild(InMemoryTable.from_pydict(data)).func() assert len({fingeprint_no_version, fingeprint_1, fingeprint_2}) == 3 def test_dependency_on_dill(): # AttributeError: module 'dill._dill' has no attribute 'stack' hasher = Hasher() hasher.update(lambda x: x)
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_extract.py
import os import zipfile import pytest from datasets.utils.extract import ( Bzip2Extractor, Extractor, GzipExtractor, Lz4Extractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lz4, require_py7zr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive", [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ], ) def test_base_extractors( compression_format, is_archive, bz2_file, gz_file, lz4_file, seven_zip_file, tar_file, xz_file, zip_file, zstd_file, tmp_path, text_file, ): input_paths_and_base_extractors = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bz2_file, Bzip2Extractor), "gzip": (gz_file, GzipExtractor), "lz4": (lz4_file, Lz4Extractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } input_path, base_extractor = input_paths_and_base_extractors[compression_format] if input_path is None: reason = f"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_py7zr.kwargs["reason"] elif compression_format == "lz4": reason += require_lz4.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) assert base_extractor.is_extractable(input_path) output_path = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(input_path, output_path) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name extracted_file_content = file_path.read_text(encoding="utf-8") else: extracted_file_content = output_path.read_text(encoding="utf-8") expected_file_content = text_file.read_text(encoding="utf-8") assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive", [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ], ) def test_extractor( compression_format, is_archive, bz2_file, gz_file, lz4_file, seven_zip_file, tar_file, xz_file, zip_file, zstd_file, tmp_path, text_file, ): input_paths = { "7z": seven_zip_file, "bz2": bz2_file, "gzip": gz_file, "lz4": lz4_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } input_path = input_paths[compression_format] if input_path is None: reason = f"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_py7zr.kwargs["reason"] elif compression_format == "lz4": reason += require_lz4.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) extractor_format = Extractor.infer_extractor_format(input_path) assert extractor_format is not None output_path = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(input_path, output_path, extractor_format) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name extracted_file_content = file_path.read_text(encoding="utf-8") else: extracted_file_content = output_path.read_text(encoding="utf-8") expected_file_content = text_file.read_text(encoding="utf-8") assert extracted_file_content == expected_file_content @pytest.fixture def tar_file_with_dot_dot(tmp_path, text_file): import tarfile directory = tmp_path / "data_dot_dot" directory.mkdir() path = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(path, "w") as f: f.add(text_file, arcname=os.path.join("..", text_file.name)) return path @pytest.fixture def tar_file_with_sym_link(tmp_path): import tarfile directory = tmp_path / "data_sym_link" directory.mkdir() path = directory / "tar_file_with_sym_link.tar" os.symlink("..", directory / "subdir", target_is_directory=True) with tarfile.TarFile(path, "w") as f: f.add(str(directory / "subdir"), arcname="subdir") # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], ) def test_tar_extract_insecure_files( insecure_tar_file, error_log, tar_file_with_dot_dot, tar_file_with_sym_link, tmp_path, caplog ): insecure_tar_files = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } input_path = insecure_tar_files[insecure_tar_file] output_path = tmp_path / "extracted" TarExtractor.extract(input_path, output_path) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def test_is_zipfile_false_positive(tmpdir): # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number not_a_zip_file = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 data = ( b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb") as f: f.write(data) assert zipfile.is_zipfile(str(not_a_zip_file)) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(not_a_zip_file) # but we're right
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_arrow_writer.py
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import Array2D, ClassLabel, Features, Image, Value from datasets.features.features import Array2DExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class TypedSequenceTest(TestCase): def test_no_type(self): arr = pa.array(TypedSequence([1, 2, 3])) self.assertEqual(arr.type, pa.int64()) def test_array_type_forbidden(self): with self.assertRaises(ValueError): _ = pa.array(TypedSequence([1, 2, 3]), type=pa.int64()) def test_try_type_and_type_forbidden(self): with self.assertRaises(ValueError): _ = pa.array(TypedSequence([1, 2, 3], try_type=Value("bool"), type=Value("int64"))) def test_compatible_type(self): arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32"))) self.assertEqual(arr.type, pa.int32()) def test_incompatible_type(self): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): _ = pa.array(TypedSequence(["foo", "bar"], type=Value("int64"))) def test_try_compatible_type(self): arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32"))) self.assertEqual(arr.type, pa.int32()) def test_try_incompatible_type(self): arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int64"))) self.assertEqual(arr.type, pa.string()) def test_compatible_extension_type(self): arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) def test_incompatible_extension_type(self): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): _ = pa.array(TypedSequence(["foo", "bar"], type=Array2D((1, 3), "int64"))) def test_try_compatible_extension_type(self): arr = pa.array(TypedSequence([[[1, 2, 3]]], try_type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) def test_try_incompatible_extension_type(self): arr = pa.array(TypedSequence(["foo", "bar"], try_type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, pa.string()) @require_pil def test_exhaustive_cast(self): import PIL.Image pil_image = PIL.Image.fromarray(np.arange(10, dtype=np.uint8).reshape(2, 5)) with patch( "datasets.arrow_writer.cast_to_python_objects", side_effect=cast_to_python_objects ) as mock_cast_to_python_objects: _ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image], type=Image())) args, kwargs = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting", kwargs) self.assertFalse(kwargs["optimize_list_casting"]) def _check_output(output, expected_num_chunks: int): stream = pa.BufferReader(output) if isinstance(output, pa.Buffer) else pa.memory_map(output) f = pa.ipc.open_stream(stream) pa_table: pa.Table = f.read_all() assert len(pa_table.to_batches()) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) def test_write_with_features(): output = pa.BufferOutputStream() features = Features({"labels": ClassLabel(names=["neg", "pos"])}) with ArrowWriter(stream=output, features=features) as writer: writer.write({"labels": 0}) writer.write({"labels": 1}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata stream = pa.BufferReader(output.getvalue()) f = pa.ipc.open_stream(stream) pa_table: pa.Table = f.read_all() schema = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(schema) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) def test_key_datatype(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: with pytest.raises(InvalidKeyError): writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2]) num_examples, num_bytes = writer.finalize() @pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) def test_duplicate_keys(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: with pytest.raises(DuplicatedKeysError): writer.write({"col_1": "foo", "col_2": 1}, key=10) writer.write({"col_1": "bar", "col_2": 2}, key=10) num_examples, num_bytes = writer.finalize() @pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) def test_write_with_keys(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: writer.write({"col_1": "foo", "col_2": 1}, key=1) writer.write({"col_1": "bar", "col_2": 2}, key=2) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_batch(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) writer.write_batch({"col_1": [], "col_2": []}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_table(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]})) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_row(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]})) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]})) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) def test_write_file(): with tempfile.TemporaryDirectory() as tmp_dir: fields = {"col_1": pa.string(), "col_2": pa.int64()} output = os.path.join(tmp_dir, "test.arrow") with ArrowWriter(path=output, schema=pa.schema(fields)) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output, 1) def get_base_dtype(arr_type): if pa.types.is_list(arr_type): return get_base_dtype(arr_type.value_type) else: return arr_type def change_first_primitive_element_in_list(lst, value): if isinstance(lst[0], list): change_first_primitive_element_in_list(lst[0], value) else: lst[0] = value @pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.int64()), (Value("int32"), pa.int32())]) @pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) def test_optimized_int_type_for_typed_sequence(sequence, optimized_int_type, expected_dtype): arr = pa.array(TypedSequence(sequence, optimized_int_type=optimized_int_type)) assert get_base_dtype(arr.type) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype", [ ("attention_mask", pa.int8()), ("special_tokens_mask", pa.int8()), ("token_type_ids", pa.int8()), ("input_ids", pa.int32()), ("other", pa.int64()), ], ) @pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) def test_optimized_typed_sequence(sequence, col, expected_dtype): # in range arr = pa.array(OptimizedTypedSequence(sequence, col=col)) assert get_base_dtype(arr.type) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications sequence = copy.deepcopy(sequence) value = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1 change_first_primitive_element_in_list(sequence, value) arr = pa.array(OptimizedTypedSequence(sequence, col=col)) assert get_base_dtype(arr.type) == pa.int64() @pytest.mark.parametrize("raise_exception", [False, True]) def test_arrow_writer_closes_stream(raise_exception, tmp_path): path = str(tmp_path / "dataset-train.arrow") try: with ArrowWriter(path=path) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def test_arrow_writer_with_filesystem(mockfs): path = "mock://dataset-train.arrow" with ArrowWriter(path=path, storage_options=mockfs.storage_options) as writer: assert isinstance(writer._fs, type(mockfs)) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(path) def test_parquet_writer_write(): output = pa.BufferOutputStream() with ParquetWriter(stream=output) as writer: writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 stream = pa.BufferReader(output.getvalue()) pa_table: pa.Table = pq.read_table(stream) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files", [False, True]) def test_writer_embed_local_files(tmp_path, embed_local_files): import PIL.Image image_path = str(tmp_path / "test_image_rgb.jpg") PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uint8)).save(image_path, format="png") output = pa.BufferOutputStream() with ParquetWriter( stream=output, features=Features({"image": Image()}), embed_local_files=embed_local_files ) as writer: writer.write({"image": image_path}) writer.finalize() stream = pa.BufferReader(output.getvalue()) pa_table: pa.Table = pq.read_table(stream) out = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"], str) with open(image_path, "rb") as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def test_always_nullable(): non_nullable_schema = pa.schema([pa.field("col_1", pa.string(), nullable=False)]) output = pa.BufferOutputStream() with ArrowWriter(stream=output) as writer: writer._build_writer(inferred_schema=non_nullable_schema) assert writer._schema == pa.schema([pa.field("col_1", pa.string())])
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_version.py
import pytest from datasets.utils.version import Version @pytest.mark.parametrize( "other, expected_equality", [ (Version("1.0.0"), True), ("1.0.0", True), (Version("2.0.0"), False), ("2.0.0", False), ("1", False), ("a", False), (1, False), (None, False), ], ) def test_version_equality_and_hash(other, expected_equality): version = Version("1.0.0") assert (version == other) is expected_equality assert (version != other) is not expected_equality assert (hash(version) == hash(other)) is expected_equality
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_tqdm.py
import unittest from unittest.mock import patch import pytest from pytest import CaptureFixture from datasets.utils import ( are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm, ) class TestTqdmUtils(unittest.TestCase): @pytest.fixture(autouse=True) def capsys(self, capsys: CaptureFixture) -> None: """Workaround to make capsys work in unittest framework. Capsys is a convenient pytest fixture to capture stdout. See https://waylonwalker.com/pytest-capsys/. Taken from https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790. """ self.capsys = capsys def setUp(self) -> None: """Get verbosity to set it back after the tests.""" self._previous_are_progress_bars_disabled = are_progress_bars_disabled() return super().setUp() def tearDown(self) -> None: """Set back progress bars verbosity as before testing.""" if self._previous_are_progress_bars_disabled: disable_progress_bars() else: enable_progress_bars() @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_helpers(self) -> None: """Test helpers to enable/disable progress bars.""" disable_progress_bars() self.assertTrue(are_progress_bars_disabled()) enable_progress_bars() self.assertFalse(are_progress_bars_disabled()) @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", True) def test_cannot_enable_tqdm_when_env_variable_is_set(self) -> None: """ Test helpers cannot enable/disable progress bars when `HF_DATASETS_DISABLE_PROGRESS_BARS` is set. """ disable_progress_bars() self.assertTrue(are_progress_bars_disabled()) with self.assertWarns(UserWarning): enable_progress_bars() self.assertTrue(are_progress_bars_disabled()) # Still disabled ! @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", False) def test_cannot_disable_tqdm_when_env_variable_is_set(self) -> None: """ Test helpers cannot enable/disable progress bars when `HF_DATASETS_DISABLE_PROGRESS_BARS` is set. """ enable_progress_bars() self.assertFalse(are_progress_bars_disabled()) with self.assertWarns(UserWarning): disable_progress_bars() self.assertFalse(are_progress_bars_disabled()) # Still enabled ! @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_disabled(self) -> None: """Test TQDM not outputting anything when globally disabled.""" disable_progress_bars() for _ in tqdm(range(10)): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertEqual(captured.err, "") @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_disabled_cannot_be_forced(self) -> None: """Test TQDM cannot be forced when globally disabled.""" disable_progress_bars() for _ in tqdm(range(10), disable=False): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertEqual(captured.err, "") @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_can_be_disabled_when_globally_enabled(self) -> None: """Test TQDM can still be locally disabled even when globally enabled.""" enable_progress_bars() for _ in tqdm(range(10), disable=True): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertEqual(captured.err, "") @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_enabled(self) -> None: """Test TQDM work normally when globally enabled.""" enable_progress_bars() for _ in tqdm(range(10)): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertIn("10/10", captured.err) # tqdm log
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_dataset_list.py
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class DatasetListTest(TestCase): def _create_example_records(self): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _create_example_dict(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} return Dataset.from_dict(data) def test_create(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) self.assertListEqual(dset.column_names, ["col_1", "col_2"]) for i, r in enumerate(dset): self.assertDictEqual(r, example_records[i]) def test_list_dict_equivalent(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) dset_from_dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info, dset_from_dict.info) def test_uneven_records(self): # checks what happens with missing columns uneven_records = [{"col_1": 1}, {"col_2": "x"}] dset = Dataset.from_list(uneven_records) self.assertDictEqual(dset[0], {"col_1": 1}) self.assertDictEqual(dset[1], {"col_1": None}) # NB: first record is used for columns def test_variable_list_records(self): # checks if the type can be inferred from the second record list_records = [{"col_1": []}, {"col_1": [1, 2]}] dset = Dataset.from_list(list_records) self.assertEqual(dset.info.features["col_1"], Sequence(Value("int64"))) def test_create_empty(self): dset = Dataset.from_list([]) self.assertEqual(len(dset), 0) self.assertListEqual(dset.column_names, [])
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/conftest.py
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ["integration", "unit"]): continue item.add_marker(pytest.mark.unit) def pytest_configure(config): config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12") @pytest.fixture(autouse=True) def set_test_cache_config(tmp_path_factory, monkeypatch): # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache" test_hf_datasets_cache = test_hf_cache_home / "datasets" test_hf_metrics_cache = test_hf_cache_home / "metrics" test_hf_modules_cache = test_hf_cache_home / "modules" monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache)) monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache)) monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache)) test_downloaded_datasets_path = test_hf_datasets_cache / "downloads" monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path)) test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted" monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path)) @pytest.fixture(autouse=True, scope="session") def disable_tqdm_output(): datasets.disable_progress_bar() @pytest.fixture(autouse=True) def set_update_download_counts_to_false(monkeypatch): # don't take tests into account when counting downloads monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False) @pytest.fixture def set_sqlalchemy_silence_uber_warning(monkeypatch): # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True) @pytest.fixture(autouse=True, scope="session") def zero_time_out_for_remote_code(): datasets.config.TIME_OUT_REMOTE_CODE = 0
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_builder.py
import importlib import os import tempfile import types from contextlib import nullcontext as does_not_raise from multiprocessing import Process from pathlib import Path from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from multiprocess.pool import Pool from datasets.arrow_dataset import Dataset from datasets.arrow_reader import DatasetNotOnHfGcsError from datasets.arrow_writer import ArrowWriter from datasets.builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.download.download_manager import DownloadMode from datasets.features import Features, Value from datasets.info import DatasetInfo, PostProcessedInfo from datasets.iterable_dataset import IterableDataset from datasets.splits import Split, SplitDict, SplitGenerator, SplitInfo from datasets.streaming import xjoin from datasets.utils.file_utils import is_local_path from datasets.utils.info_utils import VerificationMode from datasets.utils.logging import INFO, get_logger from .utils import ( assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_beam, require_faiss, set_current_working_directory_to_temp_dir, ) class DummyBuilder(DatasetBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _prepare_split(self, split_generator, **kwargs): fname = f"{self.dataset_name}-{split_generator.name}.arrow" with ArrowWriter(features=self.info.features, path=os.path.join(self._output_dir, fname)) as writer: writer.write_batch({"text": ["foo"] * 100}) num_examples, num_bytes = writer.finalize() split_generator.split_info.num_examples = num_examples split_generator.split_info.num_bytes = num_bytes class DummyGeneratorBasedBuilder(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"text": "foo"} class DummyArrowBasedBuilder(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_tables(self): for i in range(10): yield i, pa.table({"text": ["foo"] * 10}) class DummyBeamBasedBuilder(BeamBasedBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _build_pcollection(self, pipeline): import apache_beam as beam def _process(item): for i in range(10): yield f"{i}_{item}", {"text": "foo"} return pipeline | "Initialize" >> beam.Create(range(10)) | "Extract content" >> beam.FlatMap(_process) class DummyGeneratorBasedBuilderWithIntegers(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"id": i} class DummyGeneratorBasedBuilderConfig(BuilderConfig): def __init__(self, content="foo", times=2, *args, **kwargs): super().__init__(*args, **kwargs) self.content = content self.times = times class DummyGeneratorBasedBuilderWithConfig(GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = DummyGeneratorBasedBuilderConfig def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"text": self.config.content * self.config.times} class DummyBuilderWithMultipleConfigs(DummyBuilder): BUILDER_CONFIGS = [ DummyGeneratorBasedBuilderConfig(name="a"), DummyGeneratorBasedBuilderConfig(name="b"), ] class DummyBuilderWithDefaultConfig(DummyBuilderWithMultipleConfigs): DEFAULT_CONFIG_NAME = "a" class DummyBuilderWithDownload(DummyBuilder): def __init__(self, *args, rel_path=None, abs_path=None, **kwargs): super().__init__(*args, **kwargs) self._rel_path = rel_path self._abs_path = abs_path def _split_generators(self, dl_manager): if self._rel_path is not None: assert os.path.exists(dl_manager.download(self._rel_path)), "dl_manager must support relative paths" if self._abs_path is not None: assert os.path.exists(dl_manager.download(self._abs_path)), "dl_manager must support absolute paths" return [SplitGenerator(name=Split.TRAIN)] class DummyBuilderWithManualDownload(DummyBuilderWithMultipleConfigs): @property def manual_download_instructions(self): return "To use the dataset you have to download some stuff manually and pass the data path to data_dir" def _split_generators(self, dl_manager): if not os.path.exists(self.config.data_dir): raise FileNotFoundError(f"data_dir {self.config.data_dir} doesn't exist.") return [SplitGenerator(name=Split.TRAIN)] class DummyArrowBasedBuilderWithShards(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] def _generate_tables(self, filepaths): idx = 0 for filepath in filepaths: for i in range(10): yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) idx += 1 class DummyGeneratorBasedBuilderWithShards(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] def _generate_examples(self, filepaths): idx = 0 for filepath in filepaths: for i in range(100): yield idx, {"id": i, "filepath": filepath} idx += 1 class DummyArrowBasedBuilderWithAmbiguousShards(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator( name=Split.TRAIN, gen_kwargs={ "filepaths": [f"data{i}.txt" for i in range(4)], "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], }, ) ] def _generate_tables(self, filepaths, dummy_kwarg_with_different_length): idx = 0 for filepath in filepaths: for i in range(10): yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) idx += 1 class DummyGeneratorBasedBuilderWithAmbiguousShards(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator( name=Split.TRAIN, gen_kwargs={ "filepaths": [f"data{i}.txt" for i in range(4)], "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], }, ) ] def _generate_examples(self, filepaths, dummy_kwarg_with_different_length): idx = 0 for filepath in filepaths: for i in range(100): yield idx, {"id": i, "filepath": filepath} idx += 1 def _run_concurrent_download_and_prepare(tmp_dir): builder = DummyBuilder(cache_dir=tmp_dir) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) return builder def check_streaming(builder): builders_module = importlib.import_module(builder.__module__) assert builders_module._patched_for_streaming assert builders_module.os.path.join is xjoin class BuilderTest(TestCase): def test_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def test_download_and_prepare_checksum_computation(self): with tempfile.TemporaryDirectory() as tmp_dir: builder_no_verification = DummyBuilder(cache_dir=tmp_dir) builder_no_verification.download_and_prepare( try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD ) self.assertTrue( all(v["checksum"] is not None for _, v in builder_no_verification.info.download_checksums.items()) ) builder_with_verification = DummyBuilder(cache_dir=tmp_dir) builder_with_verification.download_and_prepare( try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.ALL_CHECKS, ) self.assertTrue( all(v["checksum"] is None for _, v in builder_with_verification.info.download_checksums.items()) ) def test_concurrent_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: processes = 2 with Pool(processes=processes) as pool: jobs = [ pool.apply_async(_run_concurrent_download_and_prepare, kwds={"tmp_dir": tmp_dir}) for _ in range(processes) ] builders = [job.get() for job in jobs] for builder in builders: self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists( os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json") ) ) def test_download_and_prepare_with_base_path(self): with tempfile.TemporaryDirectory() as tmp_dir: rel_path = "dummy1.data" abs_path = os.path.join(tmp_dir, "dummy2.data") # test relative path is missing builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path) with self.assertRaises(FileNotFoundError): builder.download_and_prepare( try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir ) # test absolute path is missing builder = DummyBuilderWithDownload(cache_dir=tmp_dir, abs_path=abs_path) with self.assertRaises(FileNotFoundError): builder.download_and_prepare( try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir ) # test that they are both properly loaded when they exist open(os.path.join(tmp_dir, rel_path), "w") open(abs_path, "w") builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path, abs_path=abs_path) builder.download_and_prepare( try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir ) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) def test_as_dataset_with_post_process(self): def _post_process(self, dataset, resources_paths): def char_tokenize(example): return {"tokens": list(example["text"])} return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) def _post_processing_resources(self, split): return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.info.post_processed = PostProcessedInfo( features=Features({"text": Value("string"), "tokens": [Value("string")]}) ) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"tokenized_dataset-{split}.arrow"), features=Features({"text": Value("string"), "tokens": [Value("string")]}), ) as writer: writer.write_batch({"text": ["foo"] * 10, "tokens": [list("foo")] * 10}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 10) self.assertEqual(len(dsets["test"]), 10) self.assertDictEqual( dsets["train"].features, Features({"text": Value("string"), "tokens": [Value("string")]}) ) self.assertDictEqual( dsets["test"].features, Features({"text": Value("string"), "tokens": [Value("string")]}) ) self.assertListEqual(dsets["train"].column_names, ["text", "tokens"]) self.assertListEqual(dsets["test"].column_names, ["text", "tokens"]) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) self.assertListEqual(dset.column_names, ["text", "tokens"]) self.assertGreater(builder.info.post_processing_size, 0) self.assertGreater( builder.info.post_processed.resources_checksums["train"]["tokenized_dataset"]["num_bytes"], 0 ) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 13) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) self.assertListEqual(dset.column_names, ["text", "tokens"]) del dset dset = builder.as_dataset("all") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test") self.assertEqual(len(dset), 20) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) self.assertListEqual(dset.column_names, ["text", "tokens"]) del dset def _post_process(self, dataset, resources_paths): return dataset.select([0, 1], keep_in_memory=True) with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 2}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 2) self.assertEqual(len(dsets["test"]), 2) self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) self.assertListEqual(dsets["train"].column_names, ["text"]) self.assertListEqual(dsets["test"].column_names, ["text"]) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 2) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 2) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) del dset @require_faiss def test_as_dataset_with_post_process_with_index(self): def _post_process(self, dataset, resources_paths): if os.path.exists(resources_paths["index"]): dataset.load_faiss_index("my_index", resources_paths["index"]) return dataset else: dataset.add_faiss_index_from_external_arrays( external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" ) dataset.save_faiss_index("my_index", resources_paths["index"]) return dataset def _post_processing_resources(self, split): return {"index": f"Flat-{split}.faiss"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 2}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 10) self.assertEqual(len(dsets["test"]), 10) self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) self.assertListEqual(dsets["train"].column_names, ["text"]) self.assertListEqual(dsets["test"].column_names, ["text"]) self.assertListEqual(dsets["train"].list_indexes(), ["my_index"]) self.assertListEqual(dsets["test"].list_indexes(), ["my_index"]) self.assertGreater(builder.info.post_processing_size, 0) self.assertGreater(builder.info.post_processed.resources_checksums["train"]["index"]["num_bytes"], 0) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) self.assertListEqual(dset.list_indexes(), ["my_index"]) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 13) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) self.assertListEqual(dset.list_indexes(), ["my_index"]) del dset def test_download_and_prepare_with_post_process(self): def _post_process(self, dataset, resources_paths): def char_tokenize(example): return {"tokens": list(example["text"])} return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) def _post_processing_resources(self, split): return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.info.post_processed = PostProcessedInfo( features=Features({"text": Value("string"), "tokens": [Value("string")]}) ) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertDictEqual( builder.info.post_processed.features, Features({"text": Value("string"), "tokens": [Value("string")]}), ) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def _post_process(self, dataset, resources_paths): return dataset.select([0, 1], keep_in_memory=True) with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertIsNone(builder.info.post_processed) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def _post_process(self, dataset, resources_paths): if os.path.exists(resources_paths["index"]): dataset.load_faiss_index("my_index", resources_paths["index"]) return dataset else: dataset = dataset.add_faiss_index_from_external_arrays( external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" ) dataset.save_faiss_index("my_index", resources_paths["index"]) return dataset def _post_processing_resources(self, split): return {"index": f"Flat-{split}.faiss"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertIsNone(builder.info.post_processed) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def test_error_download_and_prepare(self): def _prepare_split(self, split_generator, **kwargs): raise ValueError() with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._prepare_split = types.MethodType(_prepare_split, builder) self.assertRaises( ValueError, builder.download_and_prepare, try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, ) self.assertRaises(FileNotFoundError, builder.as_dataset) def test_generator_based_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) # Test that duplicated keys are ignored if verification_mode is "no_checks" with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) with patch("datasets.builder.ArrowWriter", side_effect=ArrowWriter) as mock_arrow_writer: builder.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS ) mock_arrow_writer.assert_called_once() args, kwargs = mock_arrow_writer.call_args_list[0] self.assertFalse(kwargs["check_duplicates"]) mock_arrow_writer.reset_mock() builder.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.BASIC_CHECKS ) mock_arrow_writer.assert_called_once() args, kwargs = mock_arrow_writer.call_args_list[0] self.assertTrue(kwargs["check_duplicates"]) def test_cache_dir_no_args(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_dir=None, data_files=None) relative_cache_dir_parts = Path(builder._relative_data_dir()).parts self.assertTupleEqual(relative_cache_dir_parts, (builder.dataset_name, "default", "0.0.0")) def test_cache_dir_for_data_files(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_data1 = os.path.join(tmp_dir, "dummy_data1.txt") with open(dummy_data1, "w", encoding="utf-8") as f: f.writelines("foo bar") dummy_data2 = os.path.join(tmp_dir, "dummy_data2.txt") with open(dummy_data2, "w", encoding="utf-8") as f: f.writelines("foo bar\n") builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1]) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": dummy_data1}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={Split.TRAIN: dummy_data1}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": [dummy_data1]}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"test": dummy_data1}) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2, dummy_data1]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} ) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} ) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": [dummy_data1], "test": dummy_data2} ) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "validation": dummy_data2} ) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": [dummy_data1, dummy_data2], "test": dummy_data2}, ) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_features(self): with tempfile.TemporaryDirectory() as tmp_dir: f1 = Features({"id": Value("int8")}) f2 = Features({"id": Value("int32")}) builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_config_kwargs(self): with tempfile.TemporaryDirectory() as tmp_dir: # create config on the fly builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo", times=2) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, times=2, content="foo") self.assertEqual(builder.cache_dir, other_builder.cache_dir) self.assertIn("content=foo", builder.cache_dir) self.assertIn("times=2", builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="bar", times=2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) with tempfile.TemporaryDirectory() as tmp_dir: # overwrite an existing config builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo", times=2) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", times=2, content="foo") self.assertEqual(builder.cache_dir, other_builder.cache_dir) self.assertIn("content=foo", builder.cache_dir) self.assertIn("times=2", builder.cache_dir) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="bar", times=2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_config_names(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as error_context: DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, data_files=None, data_dir=None) self.assertIn("Please pick one among the available configs", str(error_context.exception)) builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a") self.assertEqual(builder.config.name, "a") builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="b") self.assertEqual(builder.config.name, "b") with self.assertRaises(ValueError): DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir) builder = DummyBuilderWithDefaultConfig(cache_dir=tmp_dir) self.assertEqual(builder.config.name, "a") def test_cache_dir_for_data_dir(self): with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir: builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=tmp_dir) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_arrow_based_download_and_prepare(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare() assert os.path.exists( os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) assert builder.info.features, Features({"text": Value("string")}) assert builder.info.splits["train"].num_examples == 100 assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) @require_beam def test_beam_based_download_and_prepare(tmp_path): builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner") builder.download_and_prepare() assert os.path.exists( os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) assert builder.info.features, Features({"text": Value("string")}) assert builder.info.splits["train"].num_examples == 100 assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) @require_beam def test_beam_based_as_dataset(tmp_path): builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner") builder.download_and_prepare() dataset = builder.as_dataset() assert dataset assert isinstance(dataset["train"], Dataset) assert len(dataset["train"]) > 0 @pytest.mark.parametrize( "split, expected_dataset_class, expected_dataset_length", [ (None, DatasetDict, 10), ("train", Dataset, 10), ("train+test[:30%]", Dataset, 13), ], ) @pytest.mark.parametrize("in_memory", [False, True]) def test_builder_as_dataset(split, expected_dataset_class, expected_dataset_length, in_memory, tmp_path): cache_dir = str(tmp_path) builder = DummyBuilder(cache_dir=cache_dir) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for info_split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{info_split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset = builder.as_dataset(split=split, in_memory=in_memory) assert isinstance(dataset, expected_dataset_class) if isinstance(dataset, DatasetDict): assert list(dataset.keys()) == ["train", "test"] datasets = dataset.values() expected_splits = ["train", "test"] elif isinstance(dataset, Dataset): datasets = [dataset] expected_splits = [split] for dataset, expected_split in zip(datasets, expected_splits): assert dataset.split == expected_split assert len(dataset) == expected_dataset_length assert dataset.features == Features({"text": Value("string")}) dataset.column_names == ["text"] @pytest.mark.parametrize("in_memory", [False, True]) def test_generator_based_builder_as_dataset(in_memory, tmp_path): cache_dir = tmp_path / "data" cache_dir.mkdir() cache_dir = str(cache_dir) builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset = builder.as_dataset("train", in_memory=in_memory) assert dataset.data.to_pydict() == {"text": ["foo"] * 100} @pytest.mark.parametrize( "writer_batch_size, default_writer_batch_size, expected_chunks", [(None, None, 1), (None, 5, 20), (10, None, 10)] ) def test_custom_writer_batch_size(tmp_path, writer_batch_size, default_writer_batch_size, expected_chunks): cache_dir = str(tmp_path) if default_writer_batch_size: DummyGeneratorBasedBuilder.DEFAULT_WRITER_BATCH_SIZE = default_writer_batch_size builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir, writer_batch_size=writer_batch_size) assert builder._writer_batch_size == (writer_batch_size or default_writer_batch_size) builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) dataset = builder.as_dataset("train") assert len(dataset.data[0].chunks) == expected_chunks def test_builder_as_streaming_dataset(tmp_path): dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) check_streaming(dummy_builder) dsets = dummy_builder.as_streaming_dataset() assert isinstance(dsets, IterableDatasetDict) assert isinstance(dsets["train"], IterableDataset) assert len(list(dsets["train"])) == 100 dset = dummy_builder.as_streaming_dataset(split="train") assert isinstance(dset, IterableDataset) assert len(list(dset)) == 100 @require_beam def test_beam_based_builder_as_streaming_dataset(tmp_path): builder = DummyBeamBasedBuilder(cache_dir=tmp_path) check_streaming(builder) with pytest.raises(DatasetNotOnHfGcsError): builder.as_streaming_dataset() def _run_test_builder_streaming_works_in_subprocesses(builder): check_streaming(builder) dset = builder.as_streaming_dataset(split="train") assert isinstance(dset, IterableDataset) assert len(list(dset)) == 100 def test_builder_streaming_works_in_subprocess(tmp_path): dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) p = Process(target=_run_test_builder_streaming_works_in_subprocesses, args=(dummy_builder,)) p.start() p.join() class DummyBuilderWithVersion(GeneratorBasedBuilder): VERSION = "2.0.0" def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass class DummyBuilderWithBuilderConfigs(GeneratorBasedBuilder): BUILDER_CONFIGS = [BuilderConfig(name="custom", version="2.0.0")] def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass class CustomBuilderConfig(BuilderConfig): def __init__(self, date=None, language=None, version="2.0.0", **kwargs): name = f"{date}.{language}" super().__init__(name=name, version=version, **kwargs) self.date = date self.language = language class DummyBuilderWithCustomBuilderConfigs(GeneratorBasedBuilder): BUILDER_CONFIGS = [CustomBuilderConfig(date="20220501", language="en")] BUILDER_CONFIG_CLASS = CustomBuilderConfig def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass @pytest.mark.parametrize( "builder_class, kwargs", [ (DummyBuilderWithVersion, {}), (DummyBuilderWithBuilderConfigs, {"config_name": "custom"}), (DummyBuilderWithCustomBuilderConfigs, {"config_name": "20220501.en"}), (DummyBuilderWithCustomBuilderConfigs, {"date": "20220501", "language": "ca"}), ], ) def test_builder_config_version(builder_class, kwargs, tmp_path): cache_dir = str(tmp_path) builder = builder_class(cache_dir=cache_dir, **kwargs) assert builder.config.version == "2.0.0" def test_builder_download_and_prepare_with_absolute_output_dir(tmp_path): builder = DummyGeneratorBasedBuilder() output_dir = str(tmp_path) builder.download_and_prepare(output_dir) assert builder._output_dir.startswith(tmp_path.resolve().as_posix()) assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) assert not os.path.exists(os.path.join(output_dir + ".incomplete")) def test_builder_download_and_prepare_with_relative_output_dir(): with set_current_working_directory_to_temp_dir(): builder = DummyGeneratorBasedBuilder() output_dir = "test-out" builder.download_and_prepare(output_dir) assert Path(builder._output_dir).resolve().as_posix().startswith(Path(output_dir).resolve().as_posix()) assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) assert not os.path.exists(os.path.join(output_dir + ".incomplete")) def test_builder_with_filesystem_download_and_prepare(tmp_path, mockfs): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) assert builder._output_dir.startswith("mock://my_dataset") assert is_local_path(builder._cache_downloaded_dir) assert isinstance(builder._fs, type(mockfs)) assert builder._fs.storage_options == mockfs.storage_options assert mockfs.exists("my_dataset/dataset_info.json") assert mockfs.exists(f"my_dataset/{builder.dataset_name}-train.arrow") assert not mockfs.exists("my_dataset.incomplete") def test_builder_with_filesystem_download_and_prepare_reload(tmp_path, mockfs, caplog): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) mockfs.makedirs("my_dataset") DatasetInfo().write_to_directory("mock://my_dataset", storage_options=mockfs.storage_options) mockfs.touch(f"my_dataset/{builder.dataset_name}-train.arrow") caplog.clear() with caplog.at_level(INFO, logger=get_logger().name): builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) assert "Found cached dataset" in caplog.text def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" ) assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None def test_generator_based_builder_download_and_prepare_sharded(tmp_path): writer_batch_size = 25 builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard builder.download_and_prepare(file_format="parquet") expected_num_shards = 100 // writer_batch_size assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_generator_based_builder_download_and_prepare_with_max_shard_size(tmp_path): writer_batch_size = 25 builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one batch per shard expected_num_shards = 100 // writer_batch_size assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_generator_based_builder_download_and_prepare_with_num_proc(tmp_path): builder = DummyGeneratorBasedBuilderWithShards(cache_dir=tmp_path) builder.download_and_prepare(num_proc=2) expected_num_shards = 2 assert builder.info.splits["train"].num_examples == 400 assert builder.info.splits["train"].shard_lengths == [200, 200] arrow_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", ) assert os.path.exists(arrow_path) ds = builder.as_dataset("train") assert len(ds) == 400 assert ds.to_dict() == { "id": [i for _ in range(4) for i in range(100)], "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], } @pytest.mark.parametrize( "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] ) def test_generator_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): builder = DummyGeneratorBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) with expectation: builder.download_and_prepare(num_proc=num_proc) def test_arrow_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" ) assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None def test_arrow_based_builder_download_and_prepare_sharded(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard builder.download_and_prepare(file_format="parquet") expected_num_shards = 10 assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_arrow_based_builder_download_and_prepare_with_max_shard_size(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one table per shard expected_num_shards = 10 assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_arrow_based_builder_download_and_prepare_with_num_proc(tmp_path): builder = DummyArrowBasedBuilderWithShards(cache_dir=tmp_path) builder.download_and_prepare(num_proc=2) expected_num_shards = 2 assert builder.info.splits["train"].num_examples == 400 assert builder.info.splits["train"].shard_lengths == [200, 200] arrow_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", ) assert os.path.exists(arrow_path) ds = builder.as_dataset("train") assert len(ds) == 400 assert ds.to_dict() == { "id": [i for _ in range(4) for i in range(100)], "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], } @pytest.mark.parametrize( "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] ) def test_arrow_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): builder = DummyArrowBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) with expectation: builder.download_and_prepare(num_proc=num_proc) @require_beam def test_beam_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner") builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" ) assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_inspect.py
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) pytestmark = pytest.mark.integration @pytest.mark.parametrize("path", ["paws", "csv"]) def test_inspect_dataset(path, tmp_path): inspect_dataset(path, tmp_path) script_name = path + ".py" assert script_name in os.listdir(tmp_path) assert "__pycache__" not in os.listdir(tmp_path) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning") @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning") @pytest.mark.parametrize("path", ["accuracy"]) def test_inspect_metric(path, tmp_path): inspect_metric(path, tmp_path) script_name = path + ".py" assert script_name in os.listdir(tmp_path) assert "__pycache__" not in os.listdir(tmp_path) @pytest.mark.parametrize( "path, config_name, expected_splits", [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "default", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ], ) def test_get_dataset_config_info(path, config_name, expected_splits): info = get_dataset_config_info(path, config_name=config_name) assert info.config_name == config_name assert list(info.splits.keys()) == expected_splits def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data): info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token) assert list(info.splits.keys()) == ["train"] @pytest.mark.parametrize( "path, config_name, expected_exception", [ ("paws", None, ValueError), ], ) def test_get_dataset_config_info_error(path, config_name, expected_exception): with pytest.raises(expected_exception): get_dataset_config_info(path, config_name=config_name) @pytest.mark.parametrize( "path, expected", [ ("acronym_identification", ["default"]), ("squad", ["plain_text"]), ("hf-internal-testing/dataset_with_script", ["default"]), ("dalle-mini/wit", ["default"]), ("hf-internal-testing/librispeech_asr_dummy", ["clean", "other"]), ("hf-internal-testing/audiofolder_no_configs_in_metadata", ["default"]), ("hf-internal-testing/audiofolder_single_config_in_metadata", ["custom"]), ("hf-internal-testing/audiofolder_two_configs_in_metadata", ["v1", "v2"]), ], ) def test_get_dataset_config_names(path, expected): config_names = get_dataset_config_names(path) assert config_names == expected @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config", [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["default"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ], ) def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config): infos = get_dataset_infos(path) assert list(infos.keys()) == expected_configs expected_config = expected_configs[0] assert expected_config in infos info = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys()) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits", [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "default", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ], ) def test_get_dataset_split_names(path, expected_config, expected_splits): infos = get_dataset_infos(path) assert expected_config in infos info = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys()) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception", [ ("paws", None, ValueError), ], ) def test_get_dataset_split_names_error(path, config_name, expected_exception): with pytest.raises(expected_exception): get_dataset_split_names(path, config_name=config_name)
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/utils.py
import asyncio import importlib.metadata import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False) _run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True) _run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression require_lz4 = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") require_py7zr = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") require_zstandard = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio require_sndfile = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam require_beam = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility require_dill_gt_0_3_2 = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows require_not_windows = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def require_faiss(test_case): """ Decorator marking a test that requires Faiss. These tests are skipped when Faiss isn't installed. """ try: import faiss # noqa except ImportError: test_case = unittest.skip("test requires faiss")(test_case) return test_case def require_regex(test_case): """ Decorator marking a test that requires regex. These tests are skipped when Regex isn't installed. """ try: import regex # noqa except ImportError: test_case = unittest.skip("test requires regex")(test_case) return test_case def require_elasticsearch(test_case): """ Decorator marking a test that requires ElasticSearch. These tests are skipped when ElasticSearch isn't installed. """ try: import elasticsearch # noqa except ImportError: test_case = unittest.skip("test requires elasticsearch")(test_case) return test_case def require_sqlalchemy(test_case): """ Decorator marking a test that requires SQLAlchemy. These tests are skipped when SQLAlchemy isn't installed. """ try: import sqlalchemy # noqa except ImportError: test_case = unittest.skip("test requires sqlalchemy")(test_case) return test_case def require_torch(test_case): """ Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. """ if not config.TORCH_AVAILABLE: test_case = unittest.skip("test requires PyTorch")(test_case) return test_case def require_tf(test_case): """ Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed. """ if not config.TF_AVAILABLE: test_case = unittest.skip("test requires TensorFlow")(test_case) return test_case def require_jax(test_case): """ Decorator marking a test that requires JAX. These tests are skipped when JAX isn't installed. """ if not config.JAX_AVAILABLE: test_case = unittest.skip("test requires JAX")(test_case) return test_case def require_pil(test_case): """ Decorator marking a test that requires Pillow. These tests are skipped when Pillow isn't installed. """ if not config.PIL_AVAILABLE: test_case = unittest.skip("test requires Pillow")(test_case) return test_case def require_transformers(test_case): """ Decorator marking a test that requires transformers. These tests are skipped when transformers isn't installed. """ try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(test_case) else: return test_case def require_tiktoken(test_case): """ Decorator marking a test that requires tiktoken. These tests are skipped when transformers isn't installed. """ try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(test_case) else: return test_case def require_spacy(test_case): """ Decorator marking a test that requires spacy. These tests are skipped when they aren't installed. """ try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(test_case) else: return test_case def require_spacy_model(model): """ Decorator marking a test that requires a spacy model. These tests are skipped when they aren't installed. """ def _require_spacy_model(test_case): try: import spacy # noqa F401 spacy.load(model) except ImportError: return unittest.skip("test requires spacy")(test_case) except OSError: return unittest.skip("test requires spacy model '{}'".format(model))(test_case) else: return test_case return _require_spacy_model def require_pyspark(test_case): """ Decorator marking a test that requires pyspark. These tests are skipped when pyspark isn't installed. """ try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(test_case) else: return test_case def require_joblibspark(test_case): """ Decorator marking a test that requires joblibspark. These tests are skipped when pyspark isn't installed. """ try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(test_case) else: return test_case def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if not _run_slow_tests or _run_slow_tests == 0: test_case = unittest.skip("test is slow")(test_case) return test_case def local(test_case): """ Decorator marking a test as local Local tests are run by default. Set the RUN_LOCAL environment variable to a falsy value to not run them. """ if not _run_local_tests or _run_local_tests == 0: test_case = unittest.skip("test is local")(test_case) return test_case def packaged(test_case): """ Decorator marking a test as packaged Packaged tests are run by default. Set the RUN_PACKAGED environment variable to a falsy value to not run them. """ if not _run_packaged_tests or _run_packaged_tests == 0: test_case = unittest.skip("test is packaged")(test_case) return test_case def remote(test_case): """ Decorator marking a test as one that relies on GitHub or the Hugging Face Hub. Remote tests are skipped by default. Set the RUN_REMOTE environment variable to a falsy value to not run them. """ if not _run_remote_tests or _run_remote_tests == 0: test_case = unittest.skip("test requires remote")(test_case) return test_case def for_all_test_methods(*decorators): def decorate(cls): for name, fn in cls.__dict__.items(): if callable(fn) and name.startswith("test"): for decorator in decorators: fn = decorator(fn) setattr(cls, name, fn) return cls return decorate class RequestWouldHangIndefinitelyError(Exception): pass class OfflineSimulationMode(Enum): CONNECTION_FAILS = 0 CONNECTION_TIMES_OUT = 1 HF_DATASETS_OFFLINE_SET_TO_1 = 2 @contextmanager def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16): """ Simulate offline mode. There are three offline simulatiom modes: CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call. Connection errors are created by mocking socket.socket CONNECTION_TIMES_OUT: the connection hangs until it times out. The default timeout value is low (1e-16) to speed up the tests. Timeout errors are created by mocking requests.request HF_DATASETS_OFFLINE_SET_TO_1: the HF_DATASETS_OFFLINE environment variable is set to 1. This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error. """ online_request = requests.Session().request def timeout_request(session, method, url, **kwargs): # Change the url to an invalid url so that the connection hangs invalid_url = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) kwargs["timeout"] = timeout try: return online_request(method, invalid_url, **kwargs) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier e.request.url = url max_retry_error = e.args[0] max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),) e.args = (max_retry_error,) raise def raise_connection_error(session, prepared_request, **kwargs): raise requests.ConnectionError("Offline mode is enabled.", request=prepared_request) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send", raise_connection_error): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request", timeout_request): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE", True): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def set_current_working_directory_to_temp_dir(*args, **kwargs): original_working_dir = str(Path().resolve()) with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir: try: os.chdir(tmp_dir) yield finally: os.chdir(original_working_dir) @contextmanager def assert_arrow_memory_increases(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def assert_arrow_memory_doesnt_increase(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def is_rng_equal(rng1, rng2): return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist() def xfail_if_500_502_http_error(func): import decorator from requests.exceptions import HTTPError def _wrapper(func, *args, **kwargs): try: return func(*args, **kwargs) except HTTPError as err: if str(err).startswith("500") or str(err).startswith("502"): pytest.xfail(str(err)) raise err return decorator.decorator(_wrapper, func) # --- distributed testing functions --- # # copied from transformers # originally adapted from https://stackoverflow.com/a/59041913/9201239 class _RunOutput: def __init__(self, returncode, stdout, stderr): self.returncode = returncode self.stdout = stdout self.stderr = stderr async def _read_stream(stream, callback): while True: line = await stream.readline() if line: callback(line) else: break async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: if echo: print("\nRunning: ", " ".join(cmd)) p = await asyncio.create_subprocess_exec( cmd[0], *cmd[1:], stdin=stdin, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=env, ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) out = [] err = [] def tee(line, sink, pipe, label=""): line = line.decode("utf-8").rstrip() sink.append(line) if not quiet: print(label, line, file=pipe) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout, lambda line: tee(line, out, sys.stdout, label="stdout:")), _read_stream(p.stderr, lambda line: tee(line, err, sys.stderr, label="stderr:")), ], timeout=timeout, ) return _RunOutput(await p.wait(), out, err) def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: loop = asyncio.get_event_loop() result = loop.run_until_complete( _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) ) cmd_str = " ".join(cmd) if result.returncode > 0: stderr = "\n".join(result.stderr) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output.") return result def pytest_xdist_worker_id(): """ Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0 if `-n 1` or `pytest-xdist` isn't being used. """ worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0") worker = re.sub(r"^gw", "", worker, 0, re.M) return int(worker) def get_torch_dist_unique_port(): """ Returns a port number that can be fed to `torchrun`'s `--master_port` argument. Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same port at once. """ port = 29500 uniq_delta = pytest_xdist_worker_id() return port + uniq_delta
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_beam.py
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class DummyBeamDataset(datasets.BeamBasedBuilder): """Dummy beam dataset.""" def _info(self): return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string")}), # No default supervised_keys. supervised_keys=None, ) def _split_generators(self, dl_manager, pipeline): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_dummy_examples()})] def _build_pcollection(self, pipeline, examples): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(examples) class NestedBeamDataset(datasets.BeamBasedBuilder): """Dummy beam dataset.""" def _info(self): return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}), # No default supervised_keys. supervised_keys=None, ) def _split_generators(self, dl_manager, pipeline): return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_nested_examples()}) ] def _build_pcollection(self, pipeline, examples): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(examples) def get_test_dummy_examples(): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])] def get_test_nested_examples(): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])] class BeamBuilderTest(TestCase): @require_beam def test_download_and_prepare(self): expected_num_examples = len(get_test_dummy_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow") ) ) self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")})) dset = builder.as_dataset() self.assertEqual(dset["train"].num_rows, expected_num_examples) self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) self.assertDictEqual(dset["train"][0], get_test_dummy_examples()[0][1]) self.assertDictEqual( dset["train"][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) ) del dset @require_beam def test_download_and_prepare_sharded(self): import apache_beam as beam original_write_parquet = beam.io.parquetio.WriteToParquet expected_num_examples = len(get_test_dummy_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") with patch("apache_beam.io.parquetio.WriteToParquet") as write_parquet_mock: write_parquet_mock.side_effect = partial(original_write_parquet, num_shards=2) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")})) dset = builder.as_dataset() self.assertEqual(dset["train"].num_rows, expected_num_examples) self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"]), sorted(["foo", "bar", "foobar"])) self.assertTrue( os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) ) del dset @require_beam def test_no_beam_options(self): with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = DummyBeamDataset(cache_dir=tmp_cache_dir) self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare) @require_beam def test_nested_features(self): expected_num_examples = len(get_test_nested_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = NestedBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow") ) ) self.assertDictEqual( builder.info.features, datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}) ) dset = builder.as_dataset() self.assertEqual(dset["train"].num_rows, expected_num_examples) self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) self.assertDictEqual(dset["train"][0], get_test_nested_examples()[0][1]) self.assertDictEqual( dset["train"][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) ) del dset
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_filesystem.py
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem from .utils import require_lz4, require_zstandard def test_mockfs(mockfs): assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def test_non_mockfs(): assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def test_extract_path_from_uri(): mock_bucket = "mock-s3-bucket" dataset_path = f"s3://{mock_bucket}" dataset_path = extract_path_from_uri(dataset_path) assert dataset_path.startswith("s3://") is False dataset_path = "./local/path" new_dataset_path = extract_path_from_uri(dataset_path) assert dataset_path == new_dataset_path def test_is_remote_filesystem(mockfs): is_remote = is_remote_filesystem(mockfs) assert is_remote is True fs = fsspec.filesystem("file") is_remote = is_remote_filesystem(fs) assert is_remote is False @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = input_paths[compression_fs_class.protocol] if input_path is None: reason = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lz4.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) assert isinstance(fs, compression_fs_class) expected_filename = os.path.basename(input_path) expected_filename = expected_filename[: expected_filename.rindex(".")] assert fs.glob("*") == [expected_filename] with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol", ["zip", "gzip"]) def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path): compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} compressed_file_path = compressed_file_paths[protocol] member_file_path = "dataset.jsonl" path = f"{protocol}://{member_file_path}::{compressed_file_path}" fs, *_ = fsspec.get_fs_token_paths(path) assert fs.isfile(member_file_path) assert not fs.isfile("non_existing_" + member_file_path) def test_fs_overwrites(): protocol = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(protocol, None, clobber=True) with pytest.warns(UserWarning) as warning_info: importlib.reload(datasets.filesystems) assert len(warning_info) == 1 assert ( str(warning_info[0].message) == f"A filesystem protocol was already set for {protocol} and will be overwritten." )
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_table.py
import copy import pickle import warnings from typing import List, Union import numpy as np import pyarrow as pa import pytest import datasets from datasets import Sequence, Value from datasets.features.features import Array2D, Array2DExtensionType, ClassLabel, Features, Image from datasets.table import ( ConcatenationTable, InMemoryTable, MemoryMappedTable, Table, TableBlock, _in_memory_arrow_table_from_buffer, _in_memory_arrow_table_from_file, _interpolation_search, _is_extension_type, _memory_mapped_arrow_table_from_file, array_concat, cast_array_to_feature, concat_tables, embed_array_storage, embed_table_storage, inject_arrow_table_documentation, table_cast, table_iter, ) from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, slow @pytest.fixture(scope="session") def in_memory_pa_table(arrow_file) -> pa.Table: return pa.ipc.open_stream(arrow_file).read_all() def _to_testing_blocks(table: TableBlock) -> List[List[TableBlock]]: assert len(table) > 2 blocks = [ [table.slice(0, 2)], [table.slice(2).drop([c for c in table.column_names if c != "tokens"]), table.slice(2).drop(["tokens"])], ] return blocks @pytest.fixture(scope="session") def in_memory_blocks(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) return _to_testing_blocks(table) @pytest.fixture(scope="session") def memory_mapped_blocks(arrow_file): table = MemoryMappedTable.from_file(arrow_file) return _to_testing_blocks(table) @pytest.fixture(scope="session") def mixed_in_memory_and_memory_mapped_blocks(in_memory_blocks, memory_mapped_blocks): return in_memory_blocks[:1] + memory_mapped_blocks[1:] def assert_deepcopy_without_bringing_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_doesnt_increase(): copied_table = copy.deepcopy(table) assert isinstance(copied_table, MemoryMappedTable) assert copied_table.table == table.table def assert_deepcopy_does_bring_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_increases(): copied_table = copy.deepcopy(table) assert isinstance(copied_table, MemoryMappedTable) assert copied_table.table == table.table def assert_pickle_without_bringing_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_doesnt_increase(): pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert isinstance(unpickled_table, MemoryMappedTable) assert unpickled_table.table == table.table def assert_pickle_does_bring_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_increases(): pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert isinstance(unpickled_table, MemoryMappedTable) assert unpickled_table.table == table.table def assert_index_attributes_equal(table: Table, other: Table): assert table._batches == other._batches np.testing.assert_array_equal(table._offsets, other._offsets) assert table._schema == other._schema def add_suffix_to_column_names(table, suffix): return table.rename_columns([f"{name}{suffix}" for name in table.column_names]) def test_inject_arrow_table_documentation(in_memory_pa_table): method = pa.Table.slice def function_to_wrap(*args): return method(*args) args = (0, 1) wrapped_method = inject_arrow_table_documentation(method)(function_to_wrap) assert method(in_memory_pa_table, *args) == wrapped_method(in_memory_pa_table, *args) assert "pyarrow.Table" not in wrapped_method.__doc__ assert "Table" in wrapped_method.__doc__ def test_in_memory_arrow_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_increases(): pa_table = _in_memory_arrow_table_from_file(arrow_file) assert in_memory_pa_table == pa_table def test_in_memory_arrow_table_from_buffer(in_memory_pa_table): with assert_arrow_memory_increases(): buf_writer = pa.BufferOutputStream() writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) writer.write_table(in_memory_pa_table) writer.close() buf_writer.close() pa_table = _in_memory_arrow_table_from_buffer(buf_writer.getvalue()) assert in_memory_pa_table == pa_table def test_memory_mapped_arrow_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_doesnt_increase(): pa_table = _memory_mapped_arrow_table_from_file(arrow_file) assert in_memory_pa_table == pa_table def test_table_init(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.table == in_memory_pa_table def test_table_validate(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.validate() == in_memory_pa_table.validate() def test_table_equals(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.equals(in_memory_pa_table) def test_table_to_batches(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_batches() == in_memory_pa_table.to_batches() def test_table_to_pydict(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_pydict() == in_memory_pa_table.to_pydict() def test_table_to_string(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_string() == in_memory_pa_table.to_string() def test_table_field(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names table = Table(in_memory_pa_table) assert table.field("tokens") == in_memory_pa_table.field("tokens") def test_table_column(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names table = Table(in_memory_pa_table) assert table.column("tokens") == in_memory_pa_table.column("tokens") def test_table_itercolumns(in_memory_pa_table): table = Table(in_memory_pa_table) assert isinstance(table.itercolumns(), type(in_memory_pa_table.itercolumns())) assert list(table.itercolumns()) == list(in_memory_pa_table.itercolumns()) def test_table_getitem(in_memory_pa_table): table = Table(in_memory_pa_table) assert table[0] == in_memory_pa_table[0] def test_table_len(in_memory_pa_table): table = Table(in_memory_pa_table) assert len(table) == len(in_memory_pa_table) def test_table_str(in_memory_pa_table): table = Table(in_memory_pa_table) assert str(table) == str(in_memory_pa_table).replace("pyarrow.Table", "Table") assert repr(table) == repr(in_memory_pa_table).replace("pyarrow.Table", "Table") @pytest.mark.parametrize( "attribute", ["schema", "columns", "num_columns", "num_rows", "shape", "nbytes", "column_names"] ) def test_table_attributes(in_memory_pa_table, attribute): table = Table(in_memory_pa_table) assert getattr(table, attribute) == getattr(in_memory_pa_table, attribute) def test_in_memory_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_increases(): table = InMemoryTable.from_file(arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_buffer(in_memory_pa_table): with assert_arrow_memory_increases(): buf_writer = pa.BufferOutputStream() writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) writer.write_table(in_memory_pa_table) writer.close() buf_writer.close() table = InMemoryTable.from_buffer(buf_writer.getvalue()) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_pandas(in_memory_pa_table): df = in_memory_pa_table.to_pandas() with assert_arrow_memory_increases(): # with no schema it might infer another order of the fields in the schema table = InMemoryTable.from_pandas(df) assert isinstance(table, InMemoryTable) # by specifying schema we get the same order of features, and so the exact same table table = InMemoryTable.from_pandas(df, schema=in_memory_pa_table.schema) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_arrays(in_memory_pa_table): arrays = list(in_memory_pa_table.columns) names = list(in_memory_pa_table.column_names) table = InMemoryTable.from_arrays(arrays, names=names) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_pydict(in_memory_pa_table): pydict = in_memory_pa_table.to_pydict() with assert_arrow_memory_increases(): table = InMemoryTable.from_pydict(pydict) assert isinstance(table, InMemoryTable) assert table.table == pa.Table.from_pydict(pydict) def test_in_memory_table_from_pylist(in_memory_pa_table): pylist = InMemoryTable(in_memory_pa_table).to_pylist() table = InMemoryTable.from_pylist(pylist) assert isinstance(table, InMemoryTable) assert pylist == table.to_pylist() def test_in_memory_table_from_batches(in_memory_pa_table): batches = list(in_memory_pa_table.to_batches()) table = InMemoryTable.from_batches(batches) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_deepcopy(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) def test_in_memory_table_pickle(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert_index_attributes_equal(table, unpickled_table) @slow def test_in_memory_table_pickle_big_table(): big_table_4GB = InMemoryTable.from_pydict({"col": [0] * ((4 * 8 << 30) // 64)}) length = len(big_table_4GB) big_table_4GB = pickle.dumps(big_table_4GB) big_table_4GB = pickle.loads(big_table_4GB) assert len(big_table_4GB) == length def test_in_memory_table_slice(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, InMemoryTable) def test_in_memory_table_filter(in_memory_pa_table): mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = InMemoryTable(in_memory_pa_table).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, InMemoryTable) def test_in_memory_table_flatten(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, InMemoryTable) def test_in_memory_table_combine_chunks(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, InMemoryTable) def test_in_memory_table_cast(in_memory_pa_table): assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = InMemoryTable(in_memory_pa_table).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, InMemoryTable) def test_in_memory_table_cast_reorder_struct(): table = InMemoryTable( pa.Table.from_pydict( { "top": [ { "foo": "a", "bar": "b", } ] } ) ) schema = pa.schema({"top": pa.struct({"bar": pa.string(), "foo": pa.string()})}) assert table.cast(schema).schema == schema def test_in_memory_table_cast_with_hf_features(): table = InMemoryTable(pa.Table.from_pydict({"labels": [0, 1]})) features = Features({"labels": ClassLabel(names=["neg", "pos"])}) schema = features.arrow_schema assert table.cast(schema).schema == schema assert Features.from_arrow_schema(table.cast(schema).schema) == features def test_in_memory_table_replace_schema_metadata(in_memory_pa_table): metadata = {"huggingface": "{}"} table = InMemoryTable(in_memory_pa_table).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, InMemoryTable) def test_in_memory_table_add_column(in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).add_column(i, field_, column) assert table.table == in_memory_pa_table.add_column(i, field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_append_column(in_memory_pa_table): field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).append_column(field_, column) assert table.table == in_memory_pa_table.append_column(field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_remove_column(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, InMemoryTable) def test_in_memory_table_set_column(in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).set_column(i, field_, column) assert table.table == in_memory_pa_table.set_column(i, field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_rename_columns(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = InMemoryTable(in_memory_pa_table).rename_columns(names) assert table.table == in_memory_pa_table.rename_columns(names) assert isinstance(table, InMemoryTable) def test_in_memory_table_drop(in_memory_pa_table): names = [in_memory_pa_table.column_names[0]] table = InMemoryTable(in_memory_pa_table).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, InMemoryTable) def test_memory_mapped_table_init(arrow_file, in_memory_pa_table): table = MemoryMappedTable(_memory_mapped_arrow_table_from_file(arrow_file), arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, MemoryMappedTable) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, MemoryMappedTable) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_from_file_with_replay(arrow_file, in_memory_pa_table): replays = [("slice", (0, 1), {}), ("flatten", (), {})] with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file, replays=replays) assert len(table) == 1 for method, args, kwargs in replays: in_memory_pa_table = getattr(in_memory_pa_table, method)(*args, **kwargs) assert table.table == in_memory_pa_table assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_deepcopy(arrow_file): table = MemoryMappedTable.from_file(arrow_file) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert table.path == copied_table.path assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) def test_memory_mapped_table_pickle(arrow_file): table = MemoryMappedTable.from_file(arrow_file) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert unpickled_table.path == table.path assert_index_attributes_equal(table, unpickled_table) def test_memory_mapped_table_pickle_doesnt_fill_memory(arrow_file): with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_pickle_applies_replay(arrow_file): replays = [("slice", (0, 1), {}), ("flatten", (), {})] with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file, replays=replays) assert isinstance(table, MemoryMappedTable) assert table.replays == replays assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_slice(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, MemoryMappedTable) assert table.replays == [("slice", (1, 2), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_filter(arrow_file, in_memory_pa_table): mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = MemoryMappedTable.from_file(arrow_file).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, MemoryMappedTable) assert table.replays == [("filter", (mask,), {})] assert_deepcopy_without_bringing_data_in_memory(table) # filter DOES increase memory # assert_pickle_without_bringing_data_in_memory(table) assert_pickle_does_bring_data_in_memory(table) def test_memory_mapped_table_flatten(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, MemoryMappedTable) assert table.replays == [("flatten", (), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_combine_chunks(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, MemoryMappedTable) assert table.replays == [("combine_chunks", (), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_cast(arrow_file, in_memory_pa_table): assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = MemoryMappedTable.from_file(arrow_file).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, MemoryMappedTable) assert table.replays == [("cast", (schema,), {})] assert_deepcopy_without_bringing_data_in_memory(table) # cast DOES increase memory when converting integers precision for example # assert_pickle_without_bringing_data_in_memory(table) assert_pickle_does_bring_data_in_memory(table) def test_memory_mapped_table_replace_schema_metadata(arrow_file, in_memory_pa_table): metadata = {"huggingface": "{}"} table = MemoryMappedTable.from_file(arrow_file).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, MemoryMappedTable) assert table.replays == [("replace_schema_metadata", (metadata,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_add_column(arrow_file, in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).add_column(i, field_, column) assert table.table == in_memory_pa_table.add_column(i, field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("add_column", (i, field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_append_column(arrow_file, in_memory_pa_table): field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).append_column(field_, column) assert table.table == in_memory_pa_table.append_column(field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("append_column", (field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_remove_column(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, MemoryMappedTable) assert table.replays == [("remove_column", (0,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_set_column(arrow_file, in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).set_column(i, field_, column) assert table.table == in_memory_pa_table.set_column(i, field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("set_column", (i, field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_rename_columns(arrow_file, in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = MemoryMappedTable.from_file(arrow_file).rename_columns(names) assert table.table == in_memory_pa_table.rename_columns(names) assert isinstance(table, MemoryMappedTable) assert table.replays == [("rename_columns", (names,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_drop(arrow_file, in_memory_pa_table): names = [in_memory_pa_table.column_names[0]] table = MemoryMappedTable.from_file(arrow_file).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, MemoryMappedTable) assert table.replays == [("drop", (names,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_init( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = ( in_memory_blocks if blocks_type == "in_memory" else memory_mapped_blocks if blocks_type == "memory_mapped" else mixed_in_memory_and_memory_mapped_blocks ) table = ConcatenationTable(in_memory_pa_table, blocks) assert table.table == in_memory_pa_table assert table.blocks == blocks def test_concatenation_table_from_blocks(in_memory_pa_table, in_memory_blocks): assert len(in_memory_pa_table) > 2 in_memory_table = InMemoryTable(in_memory_pa_table) t1, t2 = in_memory_table.slice(0, 2), in_memory_table.slice(2) table = ConcatenationTable.from_blocks(in_memory_table) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks([t1, t2]) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks([[t1], [t2]]) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks(in_memory_blocks) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_from_blocks_doesnt_increase_memory( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] with assert_arrow_memory_doesnt_increase(): table = ConcatenationTable.from_blocks(blocks) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table if blocks_type == "in_memory": assert table.blocks == [[InMemoryTable(in_memory_pa_table)]] else: assert table.blocks == blocks @pytest.mark.parametrize("axis", [0, 1]) def test_concatenation_table_from_tables(axis, in_memory_pa_table, arrow_file): in_memory_table = InMemoryTable(in_memory_pa_table) concatenation_table = ConcatenationTable.from_blocks(in_memory_table) memory_mapped_table = MemoryMappedTable.from_file(arrow_file) tables = [in_memory_pa_table, in_memory_table, concatenation_table, memory_mapped_table] if axis == 0: expected_table = pa.concat_tables([in_memory_pa_table] * len(tables)) else: # avoids error due to duplicate column names tables[1:] = [add_suffix_to_column_names(table, i) for i, table in enumerate(tables[1:], 1)] expected_table = in_memory_pa_table for table in tables[1:]: for name, col in zip(table.column_names, table.columns): expected_table = expected_table.append_column(name, col) with assert_arrow_memory_doesnt_increase(): table = ConcatenationTable.from_tables(tables, axis=axis) assert isinstance(table, ConcatenationTable) assert table.table == expected_table # because of consolidation, we end up with 1 InMemoryTable and 1 MemoryMappedTable assert len(table.blocks) == 1 if axis == 1 else 2 assert len(table.blocks[0]) == 1 if axis == 0 else 2 assert axis == 1 or len(table.blocks[1]) == 1 assert isinstance(table.blocks[0][0], InMemoryTable) assert isinstance(table.blocks[1][0] if axis == 0 else table.blocks[0][1], MemoryMappedTable) def test_concatenation_table_from_tables_axis1_misaligned_blocks(arrow_file): table = MemoryMappedTable.from_file(arrow_file) t1 = table.slice(0, 2) t2 = table.slice(0, 3).rename_columns([col + "_1" for col in table.column_names]) concatenated = ConcatenationTable.from_tables( [ ConcatenationTable.from_blocks([[t1], [t1], [t1]]), ConcatenationTable.from_blocks([[t2], [t2]]), ], axis=1, ) assert len(concatenated) == 6 assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] concatenated = ConcatenationTable.from_tables( [ ConcatenationTable.from_blocks([[t2], [t2]]), ConcatenationTable.from_blocks([[t1], [t1], [t1]]), ], axis=1, ) assert len(concatenated) == 6 assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_deepcopy( blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert table.blocks == copied_table.blocks assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_pickle( blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert unpickled_table.blocks == table.blocks assert_index_attributes_equal(table, unpickled_table) def test_concat_tables_with_features_metadata(arrow_file, in_memory_pa_table): input_features = Features.from_arrow_schema(in_memory_pa_table.schema) input_features["id"] = Value("int64", id="my_id") intput_schema = input_features.arrow_schema t0 = in_memory_pa_table.replace_schema_metadata(intput_schema.metadata) t1 = MemoryMappedTable.from_file(arrow_file) tables = [t0, t1] concatenated_table = concat_tables(tables, axis=0) output_schema = concatenated_table.schema output_features = Features.from_arrow_schema(output_schema) assert output_schema == intput_schema assert output_schema.metadata == intput_schema.metadata assert output_features == input_features assert output_features["id"].id == "my_id" @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_slice( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_filter( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = ConcatenationTable.from_blocks(blocks).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_flatten( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_combine_chunks( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_cast( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types assert pa.int64() in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = ConcatenationTable.from_blocks(blocks).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, ConcatenationTable) schema = pa.schema( { k: v if v != pa.int64() else pa.int32() for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = ConcatenationTable.from_blocks(blocks).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concat_tables_cast_with_features_metadata( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] input_features = Features.from_arrow_schema(in_memory_pa_table.schema) input_features["id"] = Value("int64", id="my_id") intput_schema = input_features.arrow_schema concatenated_table = ConcatenationTable.from_blocks(blocks).cast(intput_schema) output_schema = concatenated_table.schema output_features = Features.from_arrow_schema(output_schema) assert output_schema == intput_schema assert output_schema.metadata == intput_schema.metadata assert output_features == input_features assert output_features["id"].id == "my_id" @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_replace_schema_metadata( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] metadata = {"huggingface": "{}"} table = ConcatenationTable.from_blocks(blocks).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_add_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).add_column(i, field_, column) # assert table.table == in_memory_pa_table.add_column(i, field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.add_column(i, field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_append_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).append_column(field_, column) # assert table.table == in_memory_pa_table.append_column(field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.append_column(field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_remove_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_set_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).set_column(i, field_, column) # assert table.table == in_memory_pa_table.set_column(i, field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.set_column(i, field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_rename_columns( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = ConcatenationTable.from_blocks(blocks).rename_columns(names) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table.rename_columns(names) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_drop( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] names = [in_memory_pa_table.column_names[0]] table = ConcatenationTable.from_blocks(blocks).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, ConcatenationTable) def test_concat_tables(arrow_file, in_memory_pa_table): t0 = in_memory_pa_table t1 = InMemoryTable(t0) t2 = MemoryMappedTable.from_file(arrow_file) t3 = ConcatenationTable.from_blocks(t1) tables = [t0, t1, t2, t3] concatenated_table = concat_tables(tables, axis=0) assert concatenated_table.table == pa.concat_tables([t0] * 4) assert concatenated_table.table.shape == (40, 4) assert isinstance(concatenated_table, ConcatenationTable) assert len(concatenated_table.blocks) == 3 # t0 and t1 are consolidated as a single InMemoryTable assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) assert isinstance(concatenated_table.blocks[1][0], MemoryMappedTable) assert isinstance(concatenated_table.blocks[2][0], InMemoryTable) # add suffix to avoid error due to duplicate column names concatenated_table = concat_tables( [add_suffix_to_column_names(table, i) for i, table in enumerate(tables)], axis=1 ) assert concatenated_table.table.shape == (10, 16) assert len(concatenated_table.blocks[0]) == 3 # t0 and t1 are consolidated as a single InMemoryTable assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) assert isinstance(concatenated_table.blocks[0][1], MemoryMappedTable) assert isinstance(concatenated_table.blocks[0][2], InMemoryTable) def _interpolation_search_ground_truth(arr: List[int], x: int) -> Union[int, IndexError]: for i in range(len(arr) - 1): if arr[i] <= x < arr[i + 1]: return i return IndexError class _ListWithGetitemCounter(list): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.unique_getitem_calls = set() def __getitem__(self, i): out = super().__getitem__(i) self.unique_getitem_calls.add(i) return out @property def getitem_unique_count(self): return len(self.unique_getitem_calls) @pytest.mark.parametrize( "arr, x", [(np.arange(0, 14, 3), x) for x in range(-1, 22)] + [(list(np.arange(-5, 5)), x) for x in range(-6, 6)] + [([0, 1_000, 1_001, 1_003], x) for x in [-1, 0, 2, 100, 999, 1_000, 1_001, 1_002, 1_003, 1_004]] + [(list(range(1_000)), x) for x in [-1, 0, 1, 10, 666, 999, 1_000, 1_0001]], ) def test_interpolation_search(arr, x): ground_truth = _interpolation_search_ground_truth(arr, x) if isinstance(ground_truth, int): arr = _ListWithGetitemCounter(arr) output = _interpolation_search(arr, x) assert ground_truth == output # 4 maximum unique getitem calls is expected for the cases of this test # but it can be bigger for large and messy arrays. assert arr.getitem_unique_count <= 4 else: with pytest.raises(ground_truth): _interpolation_search(arr, x) def test_indexed_table_mixin(): n_rows_per_chunk = 10 n_chunks = 4 pa_table = pa.Table.from_pydict({"col": [0] * n_rows_per_chunk}) pa_table = pa.concat_tables([pa_table] * n_chunks) table = Table(pa_table) assert all(table._offsets.tolist() == np.cumsum([0] + [n_rows_per_chunk] * n_chunks)) assert table.fast_slice(5) == pa_table.slice(5) assert table.fast_slice(2, 13) == pa_table.slice(2, 13) @pytest.mark.parametrize( "arrays", [ [pa.array([[1, 2, 3, 4]]), pa.array([[10, 2]])], [ pa.array([[[1, 2], [3]]], pa.list_(pa.list_(pa.int32()), 2)), pa.array([[[10, 2, 3], [2]]], pa.list_(pa.list_(pa.int32()), 2)), ], [pa.array([[[1, 2, 3]], [[2, 3], [20, 21]], [[4]]]).slice(1), pa.array([[[1, 2, 3]]])], ], ) def test_concat_arrays(arrays): assert array_concat(arrays) == pa.concat_arrays(arrays) def test_concat_arrays_nested_with_nulls(): arrays = [pa.array([{"a": 21, "b": [[1, 2], [3]]}]), pa.array([{"a": 100, "b": [[1], None]}])] concatenated_arrays = array_concat(arrays) assert concatenated_arrays == pa.array([{"a": 21, "b": [[1, 2], [3]]}, {"a": 100, "b": [[1], None]}]) def test_concat_extension_arrays(): arrays = [pa.array([[[1, 2], [3, 4]]]), pa.array([[[10, 2], [3, 4]]])] extension_type = Array2DExtensionType((2, 2), "int64") assert array_concat([extension_type.wrap_array(array) for array in arrays]) == extension_type.wrap_array( pa.concat_arrays(arrays) ) def test_cast_array_to_features(): arr = pa.array([[0, 1]]) assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string()) with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(Value("string")), allow_number_to_str=False) def test_cast_array_to_features_nested(): arr = pa.array([[{"foo": [0]}]]) assert cast_array_to_feature(arr, [{"foo": Sequence(Value("string"))}]).type == pa.list_( pa.struct({"foo": pa.list_(pa.string())}) ) def test_cast_array_to_features_to_nested_with_no_fields(): arr = pa.array([{}]) assert cast_array_to_feature(arr, {}).type == pa.struct({}) assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist() def test_cast_array_to_features_nested_with_null_values(): # same type arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) casted_array = cast_array_to_feature(arr, {"foo": [[Value("int64")]]}) assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}) assert casted_array.to_pylist() == arr.to_pylist() # different type arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) if datasets.config.PYARROW_VERSION.major < 10: with pytest.warns(UserWarning, match="None values are converted to empty lists.+"): casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]}) assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))}) assert casted_array.to_pylist() == [ {"foo": [[], [0]]} ] # empty list because of https://github.com/huggingface/datasets/issues/3676 else: with warnings.catch_warnings(): warnings.simplefilter("error") casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]}) assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))}) assert casted_array.to_pylist() == [{"foo": [None, [0]]}] def test_cast_array_to_features_to_null_type(): # same type arr = pa.array([[None, None]]) assert cast_array_to_feature(arr, Sequence(Value("null"))).type == pa.list_(pa.null()) # different type arr = pa.array([[None, 1]]) with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(Value("null"))) def test_cast_array_to_features_array_xd(): # same storage type arr = pa.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], pa.list_(pa.list_(pa.int32(), 2), 2)) casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="int32")) assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="int32") # different storage type casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="float32")) assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="float32") def test_cast_array_to_features_sequence_classlabel(): arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) arr = pa.array([[], ["bar"], ["foo", "bar"]], pa.list_(pa.string())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) # Test empty arrays arr = pa.array([[], []], pa.list_(pa.int64())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) arr = pa.array([[], []], pa.list_(pa.string())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) # Test invalid class labels arr = pa.array([[2]], pa.list_(pa.int64())) with pytest.raises(ValueError): assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))) arr = pa.array([["baz"]], pa.list_(pa.string())) with pytest.raises(ValueError): assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))) def test_cast_fixed_size_array_to_features_sequence(): arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3)) # Fixed size list casted_array = cast_array_to_feature(arr, Sequence(Value("int64"), length=3)) assert casted_array.type == pa.list_(pa.int64(), 3) assert casted_array.to_pylist() == arr.to_pylist() # Variable size list casted_array = cast_array_to_feature(arr, Sequence(Value("int64"))) assert casted_array.type == pa.list_(pa.int64()) assert casted_array.to_pylist() == arr.to_pylist() def test_cast_sliced_fixed_size_array_to_features(): arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3)) casted_array = cast_array_to_feature(arr[1:], Sequence(Value("int64"), length=3)) assert casted_array.type == pa.list_(pa.int64(), 3) assert casted_array.to_pylist() == arr[1:].to_pylist() def test_embed_array_storage(image_file): array = pa.array([{"bytes": None, "path": image_file}], type=Image.pa_type) embedded_images_array = embed_array_storage(array, Image()) assert isinstance(embedded_images_array.to_pylist()[0]["path"], str) assert embedded_images_array.to_pylist()[0]["path"] == "test_image_rgb.jpg" assert isinstance(embedded_images_array.to_pylist()[0]["bytes"], bytes) def test_embed_array_storage_nested(image_file): array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type)) embedded_images_array = embed_array_storage(array, [Image()]) assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes) array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type})) embedded_images_array = embed_array_storage(array, {"foo": Image()}) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes) def test_embed_table_storage(image_file): features = Features({"image": Image()}) table = table_cast(pa.table({"image": [image_file]}), features.arrow_schema) embedded_images_table = embed_table_storage(table) assert isinstance(embedded_images_table.to_pydict()["image"][0]["path"], str) assert isinstance(embedded_images_table.to_pydict()["image"][0]["bytes"], bytes) @pytest.mark.parametrize( "table", [ InMemoryTable(pa.table({"foo": range(10)})), InMemoryTable(pa.concat_tables([pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})])), InMemoryTable(pa.concat_tables([pa.table({"foo": [i]}) for i in range(10)])), ], ) @pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_table_iter(table, batch_size, drop_last_batch): num_rows = len(table) if not drop_last_batch else len(table) // batch_size * batch_size num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size subtables = list(table_iter(table, batch_size=batch_size, drop_last_batch=drop_last_batch)) assert len(subtables) == num_batches if drop_last_batch: assert all(len(subtable) == batch_size for subtable in subtables) else: assert all(len(subtable) == batch_size for subtable in subtables[:-1]) assert len(subtables[-1]) <= batch_size if num_rows > 0: reloaded = pa.concat_tables(subtables) assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() @pytest.mark.parametrize( "pa_type, expected", [ (pa.int8(), False), (pa.struct({"col1": pa.int8(), "col2": pa.int64()}), False), (pa.struct({"col1": pa.list_(pa.int8()), "col2": Array2DExtensionType((1, 3), "int64")}), True), (pa.list_(pa.int8()), False), (pa.list_(Array2DExtensionType((1, 3), "int64"), 4), True), ], ) def test_is_extension_type(pa_type, expected): assert _is_extension_type(pa_type) == expected
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_metadata_util.py
import re import sys import tempfile import unittest from pathlib import Path import pytest import yaml from huggingface_hub import DatasetCard, DatasetCardData from datasets.config import METADATA_CONFIGS_FIELD from datasets.utils.metadata import MetadataConfigs def _dedent(string: str) -> str: indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines()) return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)]) README_YAML = """\ --- language: - zh - en task_ids: - sentiment-classification --- # Begin of markdown Some cool dataset card """ README_EMPTY_YAML = """\ --- --- # Begin of markdown Some cool dataset card """ README_NO_YAML = """\ # Begin of markdown Some cool dataset card """ README_METADATA_CONFIG_INCORRECT_FORMAT = f"""\ --- {METADATA_CONFIGS_FIELD}: data_dir: v1 drop_labels: true --- """ README_METADATA_SINGLE_CONFIG = f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom data_dir: v1 drop_labels: true --- """ README_METADATA_TWO_CONFIGS_WITH_DEFAULT_FLAG = f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: v1 data_dir: v1 drop_labels: true - config_name: v2 data_dir: v2 drop_labels: false default: true --- """ README_METADATA_TWO_CONFIGS_WITH_DEFAULT_NAME = f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom data_dir: custom drop_labels: true - config_name: default data_dir: data drop_labels: false --- """ EXPECTED_METADATA_SINGLE_CONFIG = {"custom": {"data_dir": "v1", "drop_labels": True}} EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_FLAG = { "v1": {"data_dir": "v1", "drop_labels": True}, "v2": {"data_dir": "v2", "drop_labels": False, "default": True}, } EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_NAME = { "custom": {"data_dir": "custom", "drop_labels": True}, "default": {"data_dir": "data", "drop_labels": False}, } @pytest.fixture def data_dir_with_two_subdirs(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") return str(data_dir) class TestMetadataUtils(unittest.TestCase): def test_metadata_dict_from_readme(self): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) / "README.md" with open(path, "w+") as readme_file: readme_file.write(README_YAML) dataset_card_data = DatasetCard.load(path).data self.assertDictEqual( dataset_card_data.to_dict(), {"language": ["zh", "en"], "task_ids": ["sentiment-classification"]} ) with open(path, "w+") as readme_file: readme_file.write(README_EMPTY_YAML) if ( sys.platform != "win32" ): # there is a bug on windows, see https://github.com/huggingface/huggingface_hub/issues/1546 dataset_card_data = DatasetCard.load(path).data self.assertDictEqual(dataset_card_data.to_dict(), {}) with open(path, "w+") as readme_file: readme_file.write(README_NO_YAML) dataset_card_data = DatasetCard.load(path).data self.assertEqual(dataset_card_data.to_dict(), {}) def test_from_yaml_string(self): valid_yaml_string = _dedent( """\ annotations_creators: - found language_creators: - found language: - en license: - unknown multilinguality: - monolingual pretty_name: Test Dataset size_categories: - 10K<n<100K source_datasets: - extended|other-yahoo-webscope-l6 task_categories: - question-answering task_ids: - open-domain-qa """ ) assert DatasetCardData(**yaml.safe_load(valid_yaml_string)).to_dict() valid_yaml_with_optional_keys = _dedent( """\ annotations_creators: - found language_creators: - found language: - en license: - unknown multilinguality: - monolingual pretty_name: Test Dataset size_categories: - 10K<n<100K source_datasets: - extended|other-yahoo-webscope-l6 task_categories: - text-classification task_ids: - multi-class-classification paperswithcode_id: - squad configs: - en train-eval-index: - config: en task: text-classification task_id: multi_class_classification splits: train_split: train eval_split: test col_mapping: text: text label: target metrics: - type: accuracy name: Accuracy extra_gated_prompt: | By clicking on “Access repository” below, you also agree to ImageNet Terms of Access: [RESEARCHER_FULLNAME] (the "Researcher") has requested permission to use the ImageNet database (the "Database") at Princeton University and Stanford University. In exchange for such permission, Researcher hereby agrees to the following terms and conditions: 1. Researcher shall use the Database only for non-commercial research and educational purposes. extra_gated_fields: Company: text Country: text I agree to use this model for non-commerical use ONLY: checkbox """ ) assert DatasetCardData(**yaml.safe_load(valid_yaml_with_optional_keys)).to_dict() @pytest.mark.parametrize( "readme_content, expected_metadata_configs_dict, expected_default_config_name", [ (README_METADATA_SINGLE_CONFIG, EXPECTED_METADATA_SINGLE_CONFIG, None), (README_METADATA_TWO_CONFIGS_WITH_DEFAULT_FLAG, EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_FLAG, "v2"), (README_METADATA_TWO_CONFIGS_WITH_DEFAULT_NAME, EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_NAME, "default"), ], ) def test_metadata_configs_dataset_card_data( readme_content, expected_metadata_configs_dict, expected_default_config_name ): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) / "README.md" with open(path, "w+") as readme_file: readme_file.write(readme_content) dataset_card_data = DatasetCard.load(path).data metadata_configs_dict = MetadataConfigs.from_dataset_card_data(dataset_card_data) assert metadata_configs_dict == expected_metadata_configs_dict assert metadata_configs_dict.get_default_config_name() == expected_default_config_name def test_metadata_configs_incorrect_yaml(): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) / "README.md" with open(path, "w+") as readme_file: readme_file.write(README_METADATA_CONFIG_INCORRECT_FORMAT) dataset_card_data = DatasetCard.load(path).data with pytest.raises(ValueError): _ = MetadataConfigs.from_dataset_card_data(dataset_card_data)
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_search.py
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss pytestmark = pytest.mark.integration @require_faiss class IndexableDatasetTest(TestCase): def _create_dummy_dataset(self): dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]}) return dset def test_add_faiss_index(self): import faiss dset: Dataset = self._create_dummy_dataset() dset = dset.map( lambda ex, i: {"vecs": i * np.ones(5, dtype=np.float32)}, with_indices=True, keep_in_memory=True ) dset = dset.add_faiss_index("vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT) scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32)) self.assertEqual(examples["filename"][0], "my_name-train_29") dset.drop_index("vecs") def test_add_faiss_index_from_external_arrays(self): import faiss dset: Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, ) scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32)) self.assertEqual(examples["filename"][0], "my_name-train_29") def test_serialization(self): import faiss dset: Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs", metric_type=faiss.METRIC_INNER_PRODUCT, ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=False) as tmp_file: dset.save_faiss_index("vecs", tmp_file.name) dset.load_faiss_index("vecs2", tmp_file.name) os.unlink(tmp_file.name) scores, examples = dset.get_nearest_examples("vecs2", np.ones(5, dtype=np.float32)) self.assertEqual(examples["filename"][0], "my_name-train_29") def test_drop_index(self): dset: Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs" ) dset.drop_index("vecs") self.assertRaises(MissingIndex, partial(dset.get_nearest_examples, "vecs2", np.ones(5, dtype=np.float32))) def test_add_elasticsearch_index(self): from elasticsearch import Elasticsearch dset: Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk: mocked_index_create.return_value = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30) mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} es_client = Elasticsearch() dset.add_elasticsearch_index("filename", es_client=es_client) scores, examples = dset.get_nearest_examples("filename", "my_name-train_29") self.assertEqual(examples["filename"][0], "my_name-train_29") @require_faiss class FaissIndexTest(TestCase): def test_flat_ip(self): import faiss index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) # add vectors index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsNotNone(index.faiss_index) self.assertEqual(index.faiss_index.ntotal, 5) index.add_vectors(np.zeros((5, 5), dtype=np.float32)) self.assertEqual(index.faiss_index.ntotal, 10) # single query query = np.zeros(5, dtype=np.float32) query[1] = 1 scores, indices = index.search(query) self.assertRaises(ValueError, index.search, query.reshape(-1, 1)) self.assertGreater(scores[0], 0) self.assertEqual(indices[0], 1) # batched queries queries = np.eye(5, dtype=np.float32)[::-1] total_scores, total_indices = index.search_batch(queries) self.assertRaises(ValueError, index.search_batch, queries[0]) best_scores = [scores[0] for scores in total_scores] best_indices = [indices[0] for indices in total_indices] self.assertGreater(np.min(best_scores), 0) self.assertListEqual([4, 3, 2, 1, 0], best_indices) def test_factory(self): import faiss index = FaissIndex(string_factory="Flat") index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsInstance(index.faiss_index, faiss.IndexFlat) index = FaissIndex(string_factory="LSH") index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsInstance(index.faiss_index, faiss.IndexLSH) with self.assertRaises(ValueError): _ = FaissIndex(string_factory="Flat", custom_index=faiss.IndexFlat(5)) def test_custom(self): import faiss custom_index = faiss.IndexFlat(5) index = FaissIndex(custom_index=custom_index) index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsInstance(index.faiss_index, faiss.IndexFlat) def test_serialization(self): import faiss index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5, dtype=np.float32)) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=False) as tmp_file: index.save(tmp_file.name) index = FaissIndex.load(tmp_file.name) os.unlink(tmp_file.name) query = np.zeros(5, dtype=np.float32) query[1] = 1 scores, indices = index.search(query) self.assertGreater(scores[0], 0) self.assertEqual(indices[0], 1) @require_faiss def test_serialization_fs(mockfs): import faiss index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5, dtype=np.float32)) index_name = "index.faiss" path = f"mock://{index_name}" index.save(path, storage_options=mockfs.storage_options) index = FaissIndex.load(path, storage_options=mockfs.storage_options) query = np.zeros(5, dtype=np.float32) query[1] = 1 scores, indices = index.search(query) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class ElasticSearchIndexTest(TestCase): def test_elasticsearch(self): from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk: es_client = Elasticsearch() mocked_index_create.return_value = {"acknowledged": True} index = ElasticSearchIndex(es_client=es_client) mocked_bulk.return_value([(True, None)] * 3) index.add_documents(["foo", "bar", "foobar"]) # single query query = "foo" mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} scores, indices = index.search(query) self.assertEqual(scores[0], 1) self.assertEqual(indices[0], 0) # single query with timeout query = "foo" mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} scores, indices = index.search(query, request_timeout=30) self.assertEqual(scores[0], 1) self.assertEqual(indices[0], 0) # batched queries queries = ["foo", "bar", "foobar"] mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} total_scores, total_indices = index.search_batch(queries) best_scores = [scores[0] for scores in total_scores] best_indices = [indices[0] for indices in total_indices] self.assertGreater(np.min(best_scores), 0) self.assertListEqual([1, 1, 1], best_indices) # batched queries with timeout queries = ["foo", "bar", "foobar"] mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} total_scores, total_indices = index.search_batch(queries, request_timeout=30) best_scores = [scores[0] for scores in total_scores] best_indices = [indices[0] for indices in total_indices] self.assertGreater(np.min(best_scores), 0) self.assertListEqual([1, 1, 1], best_indices)
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_info.py
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def test_from_dir(files, tmp_path_factory): dataset_infos_dir = tmp_path_factory.mktemp("dset_infos_dir") if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w") as f: f.write("---\ndataset_info:\n dataset_size: 42\n---") if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w") as f: f.write("") # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w") as f: f.write('{"default": {"dataset_size": 42}}') dataset_infos = DatasetInfosDict.from_directory(dataset_infos_dir) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def test_dataset_info_dump_and_reload(tmp_path, dataset_info: DatasetInfo): tmp_path = str(tmp_path) dataset_info.write_to_directory(tmp_path) reloaded = DatasetInfo.from_directory(tmp_path) assert dataset_info == reloaded assert os.path.exists(os.path.join(tmp_path, "dataset_info.json")) def test_dataset_info_to_yaml_dict(): dataset_info = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32")}), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1337, post_processing_size=442, dataset_size=1234, size_in_bytes=1337 + 442 + 1234, ) dataset_info_yaml_dict = dataset_info._to_yaml_dict() assert sorted(dataset_info_yaml_dict) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str)) dataset_info_yaml = yaml.safe_dump(dataset_info_yaml_dict) reloaded = yaml.safe_load(dataset_info_yaml) assert dataset_info_yaml_dict == reloaded def test_dataset_info_to_yaml_dict_empty(): dataset_info = DatasetInfo() dataset_info_yaml_dict = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()}), DatasetInfosDict({"my_config_name": DatasetInfo()}), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42), "v2": DatasetInfo(dataset_size=1337), } ), ], ) def test_dataset_infos_dict_dump_and_reload(tmp_path, dataset_infos_dict: DatasetInfosDict): tmp_path = str(tmp_path) dataset_infos_dict.write_to_directory(tmp_path) reloaded = DatasetInfosDict.from_directory(tmp_path) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): dataset_info.config_name = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml dataset_infos_dict[config_name] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict()) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(tmp_path, "README.md"))
0
hf_public_repos/datasets
hf_public_repos/datasets/tests/test_streaming_download_manager.py
import json import os import re from pathlib import Path import pytest from fsspec.registry import _registry as _fsspec_registry from fsspec.spec import AbstractBufferedFile, AbstractFileSystem from datasets.download.download_config import DownloadConfig from datasets.download.streaming_download_manager import ( StreamingDownloadManager, _get_extraction_protocol, xbasename, xexists, xgetsize, xglob, xisdir, xisfile, xjoin, xlistdir, xnumpy_load, xopen, xPath, xrelpath, xsplit, xsplitext, xwalk, ) from datasets.filesystems import COMPRESSION_FILESYSTEMS from datasets.utils.hub import hf_hub_url from .utils import require_lz4, require_zstandard, slow TEST_URL = "https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/raw/main/some_text.txt" TEST_URL_CONTENT = "foo\nbar\nfoobar" TEST_GG_DRIVE_FILENAME = "train.tsv" TEST_GG_DRIVE_URL = "https://drive.google.com/uc?export=download&id=17bOgBDc3hRCoPZ89EYtKDzK-yXAWat94" TEST_GG_DRIVE_GZIPPED_URL = "https://drive.google.com/uc?export=download&id=1Bt4Garpf0QLiwkJhHJzXaVa0I0H5Qhwz" TEST_GG_DRIVE_ZIPPED_URL = "https://drive.google.com/uc?export=download&id=1k92sUfpHxKq8PXWRr7Y5aNHXwOCNUmqh" TEST_GG_DRIVE_CONTENT = """\ pokemon_name, type Charmander, fire Squirtle, water Bulbasaur, grass""" class DummyTestFS(AbstractFileSystem): protocol = "mock" _file_class = AbstractBufferedFile _fs_contents = ( {"name": "top_level", "type": "directory"}, {"name": "top_level/second_level", "type": "directory"}, {"name": "top_level/second_level/date=2019-10-01", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-01/a.parquet", "type": "file", "size": 100, }, { "name": "top_level/second_level/date=2019-10-01/b.parquet", "type": "file", "size": 100, }, {"name": "top_level/second_level/date=2019-10-02", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-02/a.parquet", "type": "file", "size": 100, }, {"name": "top_level/second_level/date=2019-10-04", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-04/a.parquet", "type": "file", "size": 100, }, {"name": "misc", "type": "directory"}, {"name": "misc/foo.txt", "type": "file", "size": 100}, {"name": "glob_test", "type": "directory", "size": 0}, {"name": "glob_test/hat", "type": "directory", "size": 0}, {"name": "glob_test/hat/^foo.txt", "type": "file", "size": 100}, {"name": "glob_test/dollar", "type": "directory", "size": 0}, {"name": "glob_test/dollar/$foo.txt", "type": "file", "size": 100}, {"name": "glob_test/lbrace", "type": "directory", "size": 0}, {"name": "glob_test/lbrace/{foo.txt", "type": "file", "size": 100}, {"name": "glob_test/rbrace", "type": "directory", "size": 0}, {"name": "glob_test/rbrace/}foo.txt", "type": "file", "size": 100}, ) def __getitem__(self, name): for item in self._fs_contents: if item["name"] == name: return item raise IndexError(f"{name} not found!") def ls(self, path, detail=True, refresh=True, **kwargs): if kwargs.pop("strip_proto", True): path = self._strip_protocol(path) files = not refresh and self._ls_from_cache(path) if not files: files = [file for file in self._fs_contents if path == self._parent(file["name"])] files.sort(key=lambda file: file["name"]) self.dircache[path.rstrip("/")] = files if detail: return files return [file["name"] for file in files] def _open( self, path, mode="rb", block_size=None, autocommit=True, cache_options=None, **kwargs, ): return self._file_class( self, path, mode, block_size, autocommit, cache_options=cache_options, **kwargs, ) @pytest.fixture def mock_fsspec(): _fsspec_registry["mock"] = DummyTestFS yield del _fsspec_registry["mock"] def _readd_double_slash_removed_by_path(path_as_posix: str) -> str: """Path(...) on an url path like zip://file.txt::http://host.com/data.zip converts the :// to :/ This function readds the :// It handles cases like: - https://host.com/data.zip - C://data.zip - zip://file.txt::https://host.com/data.zip - zip://file.txt::/Users/username/data.zip - zip://file.txt::C://data.zip Args: path_as_posix (str): output of Path(...).as_posix() Returns: str: the url path with :// instead of :/ """ return re.sub("([A-z]:/)([A-z:])", r"\g<1>/\g<2>", path_as_posix) @pytest.mark.parametrize( "input_path, paths_to_join, expected_path", [ ( "https://host.com/archive.zip", ("file.txt",), "https://host.com/archive.zip/file.txt", ), ( "zip://::https://host.com/archive.zip", ("file.txt",), "zip://file.txt::https://host.com/archive.zip", ), ( "zip://folder::https://host.com/archive.zip", ("file.txt",), "zip://folder/file.txt::https://host.com/archive.zip", ), ( ".", ("file.txt",), os.path.join(".", "file.txt"), ), ( str(Path().resolve()), ("file.txt",), str((Path().resolve() / "file.txt")), ), ], ) def test_xjoin(input_path, paths_to_join, expected_path): output_path = xjoin(input_path, *paths_to_join) assert output_path == expected_path output_path = xPath(input_path).joinpath(*paths_to_join) assert output_path == xPath(expected_path) @pytest.mark.parametrize( "input_path, expected_path", [ (str(Path(__file__).resolve()), str(Path(__file__).resolve().parent)), ("https://host.com/archive.zip", "https://host.com"), ( "zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", ), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://folder::https://host.com/archive.zip", ), ], ) def test_xdirname(input_path, expected_path): from datasets.download.streaming_download_manager import xdirname output_path = xdirname(input_path) output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) @pytest.mark.parametrize( "input_path, exists", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), ], ) def test_xexists(input_path, exists, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xexists(input_path) is exists @pytest.mark.integration def test_xexists_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xexists(root_url + "data/text_data.txt", download_config=download_config) assert not xexists(root_url + "file_that_doesnt_exist.txt", download_config=download_config) @pytest.mark.parametrize( "input_path, expected_head_and_tail", [ ( str(Path(__file__).resolve()), (str(Path(__file__).resolve().parent), str(Path(__file__).resolve().name)), ), ("https://host.com/archive.zip", ("https://host.com", "archive.zip")), ("zip://file.txt::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "file.txt")), ("zip://folder::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "folder")), ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), ], ) def test_xsplit(input_path, expected_head_and_tail): output_path, tail = xsplit(input_path) expected_path, expected_tail = expected_head_and_tail output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) assert output_path == expected_path assert tail == expected_tail @pytest.mark.parametrize( "input_path, expected_path_and_ext", [ ( str(Path(__file__).resolve()), (str(Path(__file__).resolve().with_suffix("")), str(Path(__file__).resolve().suffix)), ), ("https://host.com/archive.zip", ("https://host.com/archive", ".zip")), ("zip://file.txt::https://host.com/archive.zip", ("zip://file::https://host.com/archive.zip", ".txt")), ("zip://folder::https://host.com/archive.zip", ("zip://folder::https://host.com/archive.zip", "")), ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), ], ) def test_xsplitext(input_path, expected_path_and_ext): output_path, ext = xsplitext(input_path) expected_path, expected_ext = expected_path_and_ext output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) assert output_path == expected_path assert ext == expected_ext def test_xopen_local(text_path): with xopen(text_path, "r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert list(f) == list(expected_file) with xPath(text_path).open("r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert list(f) == list(expected_file) @pytest.mark.integration def test_xopen_remote(): with xopen(TEST_URL, "r", encoding="utf-8") as f: assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) with xPath(TEST_URL).open("r", encoding="utf-8") as f: assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) @pytest.mark.parametrize( "input_path, expected_paths", [ ("tmp_path", ["file1.txt", "file2.txt"]), ("mock://", ["glob_test", "misc", "top_level"]), ("mock://top_level", ["second_level"]), ("mock://top_level/second_level/date=2019-10-01", ["a.parquet", "b.parquet"]), ], ) def test_xlistdir(input_path, expected_paths, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) for file in ["file1.txt", "file2.txt"]: (tmp_path / file).touch() output_paths = sorted(xlistdir(input_path)) assert output_paths == expected_paths @pytest.mark.integration def test_xlistdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(xlistdir("zip://::" + root_url, download_config=download_config)) == 1 assert len(xlistdir("zip://main_dir::" + root_url, download_config=download_config)) == 2 with pytest.raises(FileNotFoundError): xlistdir("zip://qwertyuiop::" + root_url, download_config=download_config) with pytest.raises(FileNotFoundError): xlistdir(root_url, download_config=download_config) @pytest.mark.parametrize( "input_path, isdir", [ ("tmp_path", True), ("tmp_path/file.txt", False), ("mock://", True), ("mock://top_level", True), ("mock://dir_that_doesnt_exist", False), ], ) def test_xisdir(input_path, isdir, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xisdir(input_path) == isdir @pytest.mark.integration def test_xisdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert xisdir("zip://::" + root_url, download_config=download_config) is True assert xisdir("zip://main_dir::" + root_url, download_config=download_config) is True assert xisdir("zip://qwertyuiop::" + root_url, download_config=download_config) is False assert xisdir(root_url, download_config=download_config) is False @pytest.mark.parametrize( "input_path, isfile", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ], ) def test_xisfile(input_path, isfile, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xisfile(input_path) == isfile @pytest.mark.integration def test_xisfile_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xisfile(root_url + "data/text_data.txt", download_config=download_config) is True assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False @pytest.mark.parametrize( "input_path, size", [ ("tmp_path/file.txt", 100), ("mock://", 0), ("mock://top_level/second_level/date=2019-10-01/a.parquet", 100), ], ) def test_xgetsize(input_path, size, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() (tmp_path / "file.txt").write_bytes(b"x" * 100) assert xgetsize(input_path) == size @pytest.mark.integration def test_xgetsize_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xgetsize(root_url + "data/text_data.txt", download_config=download_config) == 39 with pytest.raises(FileNotFoundError): xgetsize(root_url + "qwertyuiop", download_config=download_config) @pytest.mark.parametrize( "input_path, expected_paths", [ ("tmp_path/*.txt", ["file1.txt", "file2.txt"]), ("mock://*", ["mock://glob_test", "mock://misc", "mock://top_level"]), ("mock://top_*", ["mock://top_level"]), ( "mock://top_level/second_level/date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level/second_level/date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xglob(input_path, expected_paths, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) expected_paths = [str(tmp_path / file) for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() output_paths = sorted(xglob(input_path)) assert output_paths == expected_paths @pytest.mark.integration def test_xglob_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(xglob("zip://**::" + root_url, download_config=download_config)) == 3 assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0 @pytest.mark.parametrize( "input_path, expected_outputs", [ ("tmp_path", [("", [], ["file1.txt", "file2.txt", "README.md"])]), ( "mock://top_level/second_level", [ ("mock://top_level/second_level", ["date=2019-10-01", "date=2019-10-02", "date=2019-10-04"], []), ("mock://top_level/second_level/date=2019-10-01", [], ["a.parquet", "b.parquet"]), ("mock://top_level/second_level/date=2019-10-02", [], ["a.parquet"]), ("mock://top_level/second_level/date=2019-10-04", [], ["a.parquet"]), ], ), ], ) def test_xwalk(input_path, expected_outputs, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) expected_outputs = sorted( [ (str(tmp_path / dirpath).rstrip("/"), sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in expected_outputs ] ) for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() outputs = sorted(xwalk(input_path)) outputs = [(dirpath, sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in outputs] assert outputs == expected_outputs @pytest.mark.integration def test_xwalk_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(list(xwalk("zip://::" + root_url, download_config=download_config))) == 2 assert len(list(xwalk("zip://main_dir::" + root_url, download_config=download_config))) == 1 assert len(list(xwalk("zip://qwertyuiop::" + root_url, download_config=download_config))) == 0 @pytest.mark.parametrize( "input_path, start_path, expected_path", [ ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1", "dir2/file.txt".replace("/", os.path.sep)), ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1/dir2".replace("/", os.path.sep), "file.txt"), ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "file.txt"), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "folder/file.txt", ), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://folder::https://host.com/archive.zip", "file.txt", ), ], ) def test_xrelpath(input_path, start_path, expected_path): output_path = xrelpath(input_path, start=start_path) assert output_path == expected_path class TestxPath: @pytest.mark.parametrize( "input_path", [ "https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip", "file.txt", str(Path().resolve() / "file.txt"), ], ) def test_xpath_str(self, input_path): assert str(xPath(input_path)) == input_path @pytest.mark.parametrize( "input_path, expected_path", [ ("https://host.com/archive.zip", "https://host.com/archive.zip"), ("zip://file.txt::https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip"), ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip"), ("file.txt", "file.txt"), (str(Path().resolve() / "file.txt"), (Path().resolve() / "file.txt").as_posix()), ], ) def test_xpath_as_posix(self, input_path, expected_path): assert xPath(input_path).as_posix() == expected_path @pytest.mark.parametrize( "input_path, exists", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), ], ) def test_xpath_exists(self, input_path, exists, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xexists(input_path) is exists @pytest.mark.parametrize( "input_path, pattern, expected_paths", [ ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), ("mock://", "*", ["mock://glob_test", "mock://misc", "mock://top_level"]), ("mock://", "top_*", ["mock://top_level"]), ( "mock://top_level/second_level", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level/second_level", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xpath_glob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec): if input_path == "tmp_path": input_path = tmp_path expected_paths = [tmp_path / file for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() else: expected_paths = [Path(file) for file in expected_paths] output_paths = sorted(xPath(input_path).glob(pattern)) assert output_paths == expected_paths @pytest.mark.parametrize( "input_path, pattern, expected_paths", [ ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), ( "mock://", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ( "mock://top_level", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xpath_rglob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec): if input_path == "tmp_path": input_path = tmp_path dir_path = tmp_path / "dir" dir_path.mkdir() expected_paths = [dir_path / file for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (dir_path / file).touch() else: expected_paths = [Path(file) for file in expected_paths] output_paths = sorted(xPath(input_path).rglob(pattern)) assert output_paths == expected_paths @pytest.mark.parametrize( "input_path, expected_path", [ ("https://host.com/archive.zip", "https://host.com"), ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip"), ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir::https://host.com/archive.zip"), ("file.txt", ""), (str(Path().resolve() / "file.txt"), str(Path().resolve())), ], ) def test_xpath_parent(self, input_path, expected_path): assert xPath(input_path).parent == xPath(expected_path) @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", "archive.zip"), ("zip://file.txt::https://host.com/archive.zip", "file.txt"), ("zip://dir/file.txt::https://host.com/archive.zip", "file.txt"), ("file.txt", "file.txt"), (str(Path().resolve() / "file.txt"), "file.txt"), ], ) def test_xpath_name(self, input_path, expected): assert xPath(input_path).name == expected @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", "archive"), ("zip://file.txt::https://host.com/archive.zip", "file"), ("zip://dir/file.txt::https://host.com/archive.zip", "file"), ("file.txt", "file"), (str(Path().resolve() / "file.txt"), "file"), ], ) def test_xpath_stem(self, input_path, expected): assert xPath(input_path).stem == expected @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", ".zip"), ("zip://file.txt::https://host.com/archive.zip", ".txt"), ("zip://dir/file.txt::https://host.com/archive.zip", ".txt"), ("file.txt", ".txt"), (str(Path().resolve() / "file.txt"), ".txt"), ], ) def test_xpath_suffix(self, input_path, expected): assert xPath(input_path).suffix == expected @pytest.mark.parametrize( "input_path, suffix, expected", [ ("https://host.com/archive.zip", ".ann", "https://host.com/archive.ann"), ("zip://file.txt::https://host.com/archive.zip", ".ann", "zip://file.ann::https://host.com/archive.zip"), ( "zip://dir/file.txt::https://host.com/archive.zip", ".ann", "zip://dir/file.ann::https://host.com/archive.zip", ), ("file.txt", ".ann", "file.ann"), (str(Path().resolve() / "file.txt"), ".ann", str(Path().resolve() / "file.ann")), ], ) def test_xpath_with_suffix(self, input_path, suffix, expected): assert xPath(input_path).with_suffix(suffix) == xPath(expected) @pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"]) def test_streaming_dl_manager_download_dummy_path(urlpath): dl_manager = StreamingDownloadManager() assert dl_manager.download(urlpath) == urlpath def test_streaming_dl_manager_download(text_path): dl_manager = StreamingDownloadManager() out = dl_manager.download(text_path) assert out == text_path with xopen(out, encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"]) def test_streaming_dl_manager_download_and_extract_no_extraction(urlpath): dl_manager = StreamingDownloadManager() assert dl_manager.download_and_extract(urlpath) == urlpath def test_streaming_dl_manager_extract(text_gz_path, text_path): dl_manager = StreamingDownloadManager() output_path = dl_manager.extract(text_gz_path) path = os.path.basename(text_gz_path) path = path[: path.rindex(".")] assert output_path == f"gzip://{path}::{text_gz_path}" fsspec_open_file = xopen(output_path, encoding="utf-8") with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() def test_streaming_dl_manager_download_and_extract_with_extraction(text_gz_path, text_path): dl_manager = StreamingDownloadManager() output_path = dl_manager.download_and_extract(text_gz_path) path = os.path.basename(text_gz_path) path = path[: path.rindex(".")] assert output_path == f"gzip://{path}::{text_gz_path}" fsspec_open_file = xopen(output_path, encoding="utf-8") with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize( "input_path, filename, expected_path", [("https://domain.org/archive.zip", "filename.jsonl", "zip://filename.jsonl::https://domain.org/archive.zip")], ) def test_streaming_dl_manager_download_and_extract_with_join(input_path, filename, expected_path): dl_manager = StreamingDownloadManager() extracted_path = dl_manager.download_and_extract(input_path) output_path = xjoin(extracted_path, filename) assert output_path == expected_path @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_streaming_dl_manager_extract_all_supported_single_file_compression_types( compression_fs_class, gz_file, xz_file, zstd_file, bz2_file, lz4_file, text_file ): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = input_paths[compression_fs_class.protocol] if input_path is None: reason = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lz4.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) dl_manager = StreamingDownloadManager() output_path = dl_manager.extract(input_path) path = os.path.basename(input_path) path = path[: path.rindex(".")] assert output_path == f"{compression_fs_class.protocol}://{path}::{input_path}" fsspec_open_file = xopen(output_path, encoding="utf-8") with fsspec_open_file as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize( "urlpath, expected_protocol", [ ("zip://train-00000.json.gz::https://foo.bar/data.zip", "gzip"), ("https://foo.bar/train.json.gz?dl=1", "gzip"), ("http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip", "zip"), ("https://github.com/user/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true", "zip"), ("https://github.com/user/repo/blob/master/data/morph_train.tsv?raw=true", None), ("https://repo.org/bitstream/handle/20.500.12185/346/annotated_corpus.zip?sequence=3&isAllowed=y", "zip"), ("https://zenodo.org/record/2787612/files/SICK.zip?download=1", "zip"), ], ) def test_streaming_dl_manager_get_extraction_protocol(urlpath, expected_protocol): assert _get_extraction_protocol(urlpath) == expected_protocol @pytest.mark.parametrize( "urlpath, expected_protocol", [ (TEST_GG_DRIVE_GZIPPED_URL, "gzip"), (TEST_GG_DRIVE_ZIPPED_URL, "zip"), ], ) @slow # otherwise it spams Google Drive and the CI gets banned def test_streaming_dl_manager_get_extraction_protocol_gg_drive(urlpath, expected_protocol): assert _get_extraction_protocol(urlpath) == expected_protocol @pytest.mark.parametrize( "urlpath", [ "zip://train-00000.tar.gz::https://foo.bar/data.zip", "https://foo.bar/train.tar.gz", "https://foo.bar/train.tgz", "https://foo.bar/train.tar", ], ) def test_streaming_dl_manager_extract_throws(urlpath): with pytest.raises(NotImplementedError): _ = StreamingDownloadManager().extract(urlpath) @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive(): with xopen(TEST_GG_DRIVE_URL) as f: assert f.read() == TEST_GG_DRIVE_CONTENT @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive_no_extract(): urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_URL) with xopen(urlpath) as f: assert f.read() == TEST_GG_DRIVE_CONTENT @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive_gzipped(): urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_GZIPPED_URL) with xopen(urlpath) as f: assert f.read() == TEST_GG_DRIVE_CONTENT @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive_zipped(): urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_ZIPPED_URL) all_files = list(xglob(xjoin(urlpath, "*"))) assert len(all_files) == 1 assert xbasename(all_files[0]) == TEST_GG_DRIVE_FILENAME with xopen(all_files[0]) as f: assert f.read() == TEST_GG_DRIVE_CONTENT def _test_jsonl(path, file): assert path.endswith(".jsonl") for num_items, line in enumerate(file, start=1): item = json.loads(line.decode("utf-8")) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"]) def test_iter_archive_path(archive_jsonl, request): archive_jsonl_path = request.getfixturevalue(archive_jsonl) dl_manager = StreamingDownloadManager() archive_iterable = dl_manager.iter_archive(archive_jsonl_path) num_jsonl = 0 for num_jsonl, (path, file) in enumerate(archive_iterable, start=1): _test_jsonl(path, file) assert num_jsonl == 2 # do it twice to make sure it's reset correctly num_jsonl = 0 for num_jsonl, (path, file) in enumerate(archive_iterable, start=1): _test_jsonl(path, file) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"]) def test_iter_archive_file(archive_nested_jsonl, request): archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl) dl_manager = StreamingDownloadManager() files_iterable = dl_manager.iter_archive(archive_nested_jsonl_path) num_tar, num_jsonl = 0, 0 for num_tar, (path, file) in enumerate(files_iterable, start=1): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): _test_jsonl(subpath, subfile) assert num_tar == 1 assert num_jsonl == 2 # do it twice to make sure it's reset correctly num_tar, num_jsonl = 0, 0 for num_tar, (path, file) in enumerate(files_iterable, start=1): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): _test_jsonl(subpath, subfile) assert num_tar == 1 assert num_jsonl == 2 def test_iter_files(data_dir_with_hidden_files): dl_manager = StreamingDownloadManager() for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1): assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2 def test_xnumpy_load(tmp_path): import numpy as np expected_x = np.arange(10) npy_path = tmp_path / "data-x.npy" np.save(npy_path, expected_x) x = xnumpy_load(npy_path) assert np.array_equal(x, expected_x) npz_path = tmp_path / "data.npz" np.savez(npz_path, x=expected_x) with xnumpy_load(npz_path) as f: x = f["x"] assert np.array_equal(x, expected_x)
0