diff --git a/venv/lib/python3.10/site-packages/accelerate/__init__.py b/venv/lib/python3.10/site-packages/accelerate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91452edbb23d673578d43365858526c4ec48c28e --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/__init__.py @@ -0,0 +1,51 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +__version__ = "1.10.0" + +from .accelerator import Accelerator +from .big_modeling import ( + cpu_offload, + cpu_offload_with_hook, + disk_offload, + dispatch_model, + init_empty_weights, + init_on_device, + load_checkpoint_and_dispatch, +) +from .data_loader import skip_first_batches +from .inference import prepare_pippy +from .launchers import debug_launcher, notebook_launcher +from .parallelism_config import ParallelismConfig +from .state import PartialState +from .utils import ( + AutocastKwargs, + DataLoaderConfiguration, + DDPCommunicationHookType, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + FullyShardedDataParallelPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + ProfileKwargs, + find_executable_batch_size, + infer_auto_device_map, + is_rich_available, + load_checkpoint_in_model, + synchronize_rng_states, +) + + +if is_rich_available(): + from .utils import rich diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bba388948e9783cff0c8f8fbd5671dd5a6ff546 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..428e998a85c199a8fd73ac0df4b79a19f3088606 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1be68e101fd10de0206e9abf6ea5c544dff810a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad832a5d2d5753d88d1e26337b88770ae21fd0ca Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f715ab10e7d59eabd2f4ee6c0c4304a174a9f341 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a2c9586d4401f50caacce696ec6c3af642359f Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27c21a1a2803f44c72ec54961c9b7eaba1948fbe Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dda9f4c6c4ef86f1318b45b64fbdcbfcb5af87a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22e0f9a0b69f754d9d2de93cd877005880e81f04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09a56d4c89f449324708675c0d26c33b2a2776fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9524f9bdf1d0a38b285587941c9558c8a6628ba8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/parallelism_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/parallelism_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ca7b9ceea5c5aa191d7038f2aa1caa98cf9cef7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/parallelism_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8935e3ed8b210e78bc67cb3e400280c6827a6fac Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f295282918c2ffdf8fb1ed72060d508d44157863 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbe43d7145a390c9501013698cf0a8e9a5480264 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/accelerator.py b/venv/lib/python3.10/site-packages/accelerate/accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..bd7757b721df3382068b3b2b98750177834819ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/accelerator.py @@ -0,0 +1,4229 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import contextlib +import functools +import json +import math +import os +import re +import shutil +import sys +import warnings +from collections import OrderedDict +from contextlib import contextmanager +from functools import partial +from types import MethodType +from typing import Any, Callable, Union + +import torch +import torch.utils.hooks as hooks +from huggingface_hub import split_torch_state_dict_into_shards + +from accelerate.utils.dataclasses import FP8BackendType + +from .big_modeling import _attach_context_parallel_hooks +from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state +from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches +from .logging import get_logger +from .optimizer import AcceleratedOptimizer +from .parallelism_config import ParallelismConfig +from .scheduler import AcceleratedScheduler +from .state import AcceleratorState, GradientState, PartialState +from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers +from .utils import ( + MODEL_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + SAFE_WEIGHTS_PATTERN_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + WEIGHTS_PATTERN_NAME, + AORecipeKwargs, + AutocastKwargs, + DataLoaderConfiguration, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FP8RecipeKwargs, + FullyShardedDataParallelPlugin, + GradientAccumulationPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + MSAMPRecipeKwargs, + PrecisionType, + ProfileKwargs, + ProjectConfiguration, + RNGType, + TERecipeKwargs, + TorchDynamoPlugin, + TorchTensorParallelPlugin, + apply_fp8_autowrap, + check_os_kernel, + clean_state_dict_for_safetensors, + compare_versions, + convert_model, + convert_model_to_fp8_ao, + convert_outputs_to_fp32, + ensure_weights_retied, + extract_model_from_parallel, + fsdp2_apply_ac, + fsdp2_canonicalize_names, + fsdp2_prepare_model, + fsdp2_switch_optimizer_parameters, + gather, + gather_object, + get_fsdp2_grad_scaler, + get_grad_scaler, + get_mixed_precision_context_manager, + get_pretty_name, + has_offloaded_params, + is_bf16_available, + is_bitsandbytes_multi_backend_available, + is_deepspeed_available, + is_ipex_available, + is_lomo_available, + is_megatron_lm_available, + is_mlu_available, + is_msamp_available, + is_musa_available, + is_npu_available, + is_torch_version, + is_torch_xla_available, + is_torchao_available, + is_transformer_engine_available, + is_xpu_available, + load_fsdp_model, + load_fsdp_optimizer, + model_has_dtensor, + pad_across_processes, + parse_choice_from_env, + recursively_apply, + reduce, + release_memory, + save, + save_fsdp_model, + save_fsdp_optimizer, + wait_for_everyone, +) +from .utils.constants import ( + FSDP2_PYTORCH_VERSION, + FSDP_PYTORCH_VERSION, + PROFILE_PATTERN_NAME, + SCALER_NAME, +) +from .utils.modeling import get_state_dict_offloaded_model +from .utils.other import compile_regions, compile_regions_deepspeed, is_compiled_module + + +if is_deepspeed_available(): + from .utils import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + map_pytorch_optim_to_deepspeed, + ) + +if is_megatron_lm_available(): + from .utils import ( + MegatronEngine, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + megatron_lm_initialize, + megatron_lm_prepare_data_loader, + megatron_lm_prepare_model_optimizer_scheduler, + ) + +from torch.distributed.algorithms.join import Join + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + import torch_xla.distributed.xla_multiprocessing as xmp + + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + + +try: + from torch.optim.lr_scheduler import LRScheduler +except ImportError: + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler + +logger = get_logger(__name__) + +# Sentinel values for defaults +_split_batches = object() +_dispatch_batches = object() +_even_batches = object() +_use_seedable_sampler = object() + + +class Accelerator: + """ + Creates an instance of an accelerator for distributed training or mixed precision training. + + Args: + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model, + etc...). + mixed_precision (`str`, *optional*): + Whether or not to use mixed precision training. Choose from 'no','fp16','bf16' or 'fp8'. Will default to + the value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the + accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8' + requires the installation of transformers-engine. + gradient_accumulation_steps (`int`, *optional*, default to 1): + The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with + `Accelerator.accumulate`. If not passed, will default to the value in the environment variable + `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`. + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force + the execution on one process only. + dataloader_config (`DataLoaderConfiguration`, *optional*): + A configuration for how the dataloaders should be handled in distributed scenarios. + deepspeed_plugin ([`~utils.DeepSpeedPlugin`] or dict of `str`: [`~utils.DeepSpeedPlugin`], *optional*): + Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured + directly using *accelerate config*. If using multiple plugins, use the configured `key` property of each + plugin to access them from `accelerator.state.get_deepspeed_plugin(key)`. Alias for `deepspeed_plugins`. + fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*): + Tweak your FSDP related args using this argument. This argument is optional and can be configured directly + using *accelerate config* + torch_tp_plugin ([`~utils.TorchTensorParallelPlugin`], *optional*): + Deprecated: use `parallelism_config` with `tp_size` instead. + megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*): + Tweak your MegatronLM related args using this argument. This argument is optional and can be configured + directly using *accelerate config* + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration in your prepared + dataloaders. Should be one or several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your + dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. + + Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6. + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"trackio"` + - `"aim"` + - `"comet_ml"` + - `"mlflow"` + - `"dvclive"` + - `"swanlab"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + project_config ([`~utils.ProjectConfiguration`], *optional*): + A configuration for how saving the state can be handled. + project_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved + checkpoints. + step_scheduler_with_optimizer (`bool`, *optional*, defaults to `True`): + Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only + done under certain circumstances (at the end of each epoch, for instance). + kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*) + A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training, profiling + or mixed precision are created. See [kwargs](kwargs) for more information. + dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`): + Set to one of the possible dynamo backends to optimize your training with torch dynamo. + dynamo_plugin ([`~utils.TorchDynamoPlugin`], *optional*): + A configuration for how torch dynamo should be handled, if more tweaking than just the `backend` or `mode` + is needed. + gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*): + A configuration for how gradient accumulation should be handled, if more tweaking than just the + `gradient_accumulation_steps` is needed. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration. + - **local_process_index** (`int`) -- The process index on the current machine. + - **mixed_precision** (`str`) -- The configured mixed precision mode. + - **num_processes** (`int`) -- The total number of processes used for training. + - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of + gradient overflow in mixed precision), in which + case the learning rate should not be changed. + - **process_index** (`int`) -- The overall index of the current process among all processes. + - **state** ([`~state.AcceleratorState`]) -- The distributed setup state. + - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes. + - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training. + """ + + def __init__( + self, + device_placement: bool = True, + split_batches: bool = _split_batches, + mixed_precision: PrecisionType | str | None = None, + gradient_accumulation_steps: int = 1, + cpu: bool = False, + dataloader_config: DataLoaderConfiguration | None = None, + deepspeed_plugin: DeepSpeedPlugin | dict[str, DeepSpeedPlugin] | None = None, + fsdp_plugin: FullyShardedDataParallelPlugin | None = None, + torch_tp_plugin: TorchTensorParallelPlugin | None = None, # Deprecate later, warning in `post_init` + megatron_lm_plugin: MegatronLMPlugin | None = None, + rng_types: list[str | RNGType] | None = None, + log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None = None, + project_dir: str | os.PathLike | None = None, + project_config: ProjectConfiguration | None = None, + gradient_accumulation_plugin: GradientAccumulationPlugin | None = None, + step_scheduler_with_optimizer: bool = True, + kwargs_handlers: list[KwargsHandler] | None = None, + dynamo_backend: DynamoBackend | str | None = None, + dynamo_plugin: TorchDynamoPlugin | None = None, + deepspeed_plugins: DeepSpeedPlugin | dict[str, DeepSpeedPlugin] | None = None, + parallelism_config: ParallelismConfig | None = None, + ): + self.trackers = [] + if project_config is not None: + self.project_configuration = project_config + else: + self.project_configuration = ProjectConfiguration(project_dir=project_dir) + if project_dir is not None and self.project_dir is None: + self.project_configuration.set_directories(project_dir) + + if mixed_precision is not None: + mixed_precision = str(mixed_precision) + if mixed_precision not in PrecisionType: + raise ValueError( + f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}" + ) + if torch_tp_plugin is not None: + warnings.warn( + "`TorchTensorParallelPlugin` is deprecated and will be removed in a future version of Accelerate. " + "Please use the `ParallelismConfig` with `tp_size` instead.", + FutureWarning, + ) + + if dynamo_plugin is not None and dynamo_backend is not None: + raise ValueError("You cannot pass in both `dynamo_plugin` and `dynamo_backend`, please only pass in one.") + if dynamo_backend is not None: + dynamo_plugin = TorchDynamoPlugin(backend=dynamo_backend) + elif dynamo_plugin is None: + dynamo_plugin = TorchDynamoPlugin() + + if deepspeed_plugins is not None and deepspeed_plugin is not None: + raise ValueError("You cannot pass in both `deepspeed_plugins` and `deepspeed_plugin`.") + elif deepspeed_plugin is not None: + deepspeed_plugins = deepspeed_plugin + + if deepspeed_plugins is None: + # First check if we're creating another `Accelerator` w/o setting `deepspeed_plugin` + if ( + AcceleratorState._shared_state != {} + and AcceleratorState().distributed_type == DistributedType.DEEPSPEED + ): + deepspeed_plugins = AcceleratorState().deepspeed_plugins + else: + # init from env variables + deepspeed_plugins = ( + DeepSpeedPlugin() + if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false").lower() == "true" + else None + ) + else: + # If we're creating a second `Accelerator`, users shouldn't be passing in a `deepspeed_plugin` + if ( + AcceleratorState._shared_state != {} + and AcceleratorState().distributed_type == DistributedType.DEEPSPEED + and AcceleratorState().deepspeed_plugins is not None + ): + raise NotImplementedError( + "You cannot pass in a `deepspeed_plugin` when creating a second `Accelerator`. " + "Please make sure the first `Accelerator` is initialized with all the plugins you want to use." + ) + if isinstance(deepspeed_plugins, dict): + for plugin in deepspeed_plugins.values(): + if not isinstance(plugin, DeepSpeedPlugin): + raise TypeError("`deepspeed_plugin` must be a DeepSpeedPlugin object.") + + if deepspeed_plugins is not None: + os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.") + if is_mlu_available(): + if compare_versions("deepspeed", "<", "0.15.2"): + raise ImportError("DeepSpeed MLU version must be >= 0.15.2. Please update DeepSpeed.") + elif is_musa_available(): + if compare_versions("deepspeed", "<", "0.14.3"): + raise ImportError("DeepSpeed MUSA version must be >= 0.14.3. Please update DeepSpeed.") + elif compare_versions("deepspeed", "<", "0.9.3"): + raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.") + + self.deepspeed_engine_wrapped = None + + if os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true" or isinstance( + fsdp_plugin, FullyShardedDataParallelPlugin + ): + if not is_torch_version(">=", FSDP_PYTORCH_VERSION): + raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}") + + if fsdp_plugin is None: # init from env variables + fsdp_plugin = ( + FullyShardedDataParallelPlugin() + if os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true" + else None + ) + else: + if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin): + raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.") + os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided + + if fsdp_plugin is not None and fsdp_plugin.fsdp_version == 2: + if not is_torch_version(">=", FSDP2_PYTORCH_VERSION): + raise ImportError(f"FSDP2 requires PyTorch >= {FSDP2_PYTORCH_VERSION}") + + if megatron_lm_plugin is None: # init from env variables + megatron_lm_plugin = ( + MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false").lower() == "true" else None + ) + else: + if not isinstance(megatron_lm_plugin, MegatronLMPlugin): + raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.") + os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided + + if megatron_lm_plugin: + if not is_megatron_lm_available(): + raise ImportError("Megatron is not installed. please build it from source.") + + # Kwargs handlers + self.ddp_handler = None + self.scaler_handler = None + self.init_handler = None + self.fp8_recipe_handler = None + self.ao_recipe_handler = None + self.te_recipe_handler = None + self.msamp_recipe_handler = None + self.autocast_handler = None + self.profile_handler = None + self.has_lomo_optimizer = False + + found_handlers = set() + handler_class_to_attr = { + DistributedDataParallelKwargs: "ddp_handler", + GradScalerKwargs: "scaler_handler", + InitProcessGroupKwargs: "init_handler", + FP8RecipeKwargs: "fp8_recipe_handler", + AutocastKwargs: "autocast_handler", + ProfileKwargs: "profile_handler", + AORecipeKwargs: "ao_recipe_handler", + TERecipeKwargs: "te_recipe_handler", + MSAMPRecipeKwargs: "msamp_recipe_handler", + } + self.has_fp8_handler = False + if kwargs_handlers is not None: + for handler in kwargs_handlers: + assert isinstance(handler, KwargsHandler), ( + f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`." + ) + # Add the handler class to the set of found handlers + if handler.__class__ in found_handlers: + raise ValueError(f"You can only pass one {handler.__class__} in `kwargs_handlers`.") + found_handlers.add(handler.__class__) + handler_attr = handler_class_to_attr[handler.__class__] + setattr(self, handler_attr, handler) + if "recipe_handler" in handler_attr and not self.has_fp8_handler: + self.has_fp8_handler = True + + if parallelism_config is None: + # TODO: Remove after deprecating tp_plugin + if torch_tp_plugin is not None: + parallelism_config = ParallelismConfig(tp_size=torch_tp_plugin.tp_size) + elif os.environ.get("ACCELERATE_USE_PARALLELISM_CONFIG", "false").lower() == "true": + parallelism_config = ParallelismConfig() + + kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {} + self.state = AcceleratorState( + mixed_precision=mixed_precision, + cpu=cpu, + dynamo_plugin=dynamo_plugin, + deepspeed_plugin=deepspeed_plugins, + fsdp_plugin=fsdp_plugin, + megatron_lm_plugin=megatron_lm_plugin, + parallelism_config=parallelism_config, + _from_accelerator=True, + **kwargs, + ) + + if self.parallelism_config: + self.state.device_mesh = parallelism_config.get_device_mesh(self.device.type) + self.parallelism_config._validate_accelerator(self) + + self.fp8_enabled = self.state.mixed_precision == "fp8" or mixed_precision == "fp8" + + # Check for automatic FP8 recipe creation + if self.fp8_enabled and not self.has_fp8_handler: + if self.fp8_backend == FP8BackendType.AO: + self.ao_recipe_handler = AORecipeKwargs() + elif self.fp8_backend == FP8BackendType.TE: + self.te_recipe_handler = TERecipeKwargs() + elif self.fp8_backend == FP8BackendType.MSAMP: + self.msamp_recipe_handler = MSAMPRecipeKwargs() + elif self.fp8_backend == FP8BackendType.NO: + # Prioritize AO -> TE -> MSAMP + if is_torchao_available(): + logger.info("Found `torchao` installed, using it for FP8 training.") + self.ao_recipe_handler = AORecipeKwargs() + elif is_transformer_engine_available(): + logger.info("Found `transformer-engine` installed, using it for FP8 training.") + self.te_recipe_handler = TERecipeKwargs() + elif is_msamp_available(): + logger.info("Found `msamp` installed, using it for FP8 training.") + self.msamp_recipe_handler = MSAMPRecipeKwargs() + else: + raise ImportError( + "Tried to train with `fp8` and auto-detect backend, but no FP8-compatible backend was installed. " + "Valid backends are: `torchao`, `transformer-engine`, and `msamp`." + ) + self.has_fp8_handler = True + + self.delayed_fp8_autocast = False + if self.has_fp8_handler: + # We already check if FP8 is available during `self.state` + if not self.fp8_enabled and ( + self.distributed_type not in (DistributedType.FSDP, DistributedType.DEEPSPEED) + ): + raise ValueError("Passing in an FP8 configuration requires setting `mixed_precision='fp8'`.") + self.delayed_fp8_autocast = self.fp8_backend == "TE" and self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.FSDP, + ) + + # TODO: S1ro - this is probably gonna be a problem with other fp8 backends too + if ( + self.fp8_backend == FP8BackendType.AO + and self.state.distributed_type == DistributedType.FSDP + and self.state.fsdp_plugin.cpu_ram_efficient_loading + ): + raise ValueError( + "torchao with FSDP2 and cpu_ram_efficient_loading is not supported, setting `cpu_ram_efficient_loading` to False will fix the issue and work as intended." + ) + + trackers = filter_trackers(log_with, self.logging_dir) + if len(trackers) < 1 and log_with is not None: + warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.") + self.log_with = trackers + + if ( + (mixed_precision != "bf16") + and getattr(self.state, "downcast_bfloat", False) + and (self.state.distributedType != DistributedType.XLA) + ): + raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU") + + if gradient_accumulation_plugin is not None: + if gradient_accumulation_steps != 1: + raise ValueError( + "You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object." + ) + else: + gradient_accumulation_steps = int( + parse_choice_from_env("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", gradient_accumulation_steps) + ) + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps) + + # If using DeepSpeed, update gradient accumulation steps from the DeepSpeed plugin + self.gradient_state = GradientState( + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + + self.device_placement = device_placement + if dataloader_config is None: + dataloader_config = DataLoaderConfiguration() + self.dataloader_config = dataloader_config + self.step_scheduler_with_optimizer = step_scheduler_with_optimizer + + # Mixed precision attributes + self.scaler = None + self.native_amp = False + if ( + self.state.mixed_precision == "fp16" + and self.device.type != "cpu" + and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM) + ): + self.native_amp = True + if self.device.type not in ( + "xpu", + "cuda", + "npu", + "xla", + "mlu", + "musa", + "hpu", + "sdaa", + ) or is_torch_xla_available(check_is_tpu=True): + raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).") + kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {} + + # FSDP2 doesn't use ShardedGradScaler, don't want to modify `get_grad_scaler`, rather create a simple utility + if self.is_fsdp2: + self.scaler = get_fsdp2_grad_scaler(**kwargs) + else: + self.scaler = get_grad_scaler(self.distributed_type, **kwargs) + + elif self.state.mixed_precision == "bf16" and self.distributed_type not in ( + DistributedType.DEEPSPEED, + DistributedType.MEGATRON_LM, + ): + if self.device.type in ["cpu", "xpu", "hpu"]: + self.native_amp = True + else: + self.native_amp = is_bf16_available(True) + if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available(): + raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") + + # for DeepSpeed, self.state.mixed_precision is always "bf16", + # see https://github.com/huggingface/accelerate/blob/main/src/accelerate/state.py#L968 and + # https://github.com/huggingface/accelerate/blob/main/src/accelerate/utils/dataclasses.py#L1263. + elif self.fp8_enabled: + # We always enable `native_amp` for FP8 + self.native_amp = True + if self.fp8_backend == FP8BackendType.MSAMP: + if self.distributed_type == DistributedType.FSDP: + raise NotImplementedError( + "`accelerate` + `MS-AMP` + `FSDP` is not supported at this time. " + "Please consider using deepspeed, which is supported." + ) + elif self.distributed_type != DistributedType.DEEPSPEED: + # MS-AMP requires `GradScaler` even with bf16 autocast w/ single GPU or DDP: + self.scaler = get_grad_scaler(**kwargs) + + # Start of internal step tracking + self.step = 0 + + # Internal references to the training objects + self._optimizers = [] + self._models = [] + self._schedulers = [] + self._dataloaders = [] + self._custom_objects = [] + + # Hooks + self._load_model_state_pre_hook = OrderedDict() + self._save_model_state_pre_hook = OrderedDict() + + # RNG Types + self.rng_types = rng_types + if self.rng_types is None: + self.rng_types = ["generator"] + + # Set a flag tensor for early stopping and other breakpoints + self.flag_tensor = None + + check_os_kernel() + + @property + def deepspeed_plugin(self): + """ + Returns the currently active DeepSpeedPlugin. + + If using multiple plugins, the first one will be the active one by default. Manually call + `accelerator.state.select_deepspeed_plugin(key)` to activate a different plugin. + + If deepspeed is not enabled, this will return `None`. + """ + return self.state.deepspeed_plugin + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.state.use_distributed + + @property + def multi_device(self): + return self.use_distributed and self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + ) + + @property + def distributed_type(self): + return self.state.distributed_type + + @property + def num_processes(self): + return self.state.num_processes + + @property + def process_index(self): + return self.state.process_index + + @property + def local_process_index(self): + return self.state.local_process_index + + @property + def device(self): + return self.state.device + + @property + def split_batches(self): + return self.dataloader_config.split_batches + + @property + def dispatch_batches(self): + return self.dataloader_config.dispatch_batches + + @property + def even_batches(self): + return self.dataloader_config.even_batches + + @even_batches.setter + def even_batches(self, value: bool): + self.dataloader_config.even_batches = value + + @property + def use_seedable_sampler(self): + return self.dataloader_config.use_seedable_sampler + + @property + def non_blocking(self): + return self.dataloader_config.non_blocking + + @property + def use_stateful_dataloader(self): + if hasattr(self.dataloader_config, "use_stateful_dataloader"): + return self.dataloader_config.use_stateful_dataloader + return False + + @property + def project_dir(self): + return self.project_configuration.project_dir + + @property + def logging_dir(self): + return self.project_configuration.logging_dir + + @property + def save_iteration(self): + return self.project_configuration.iteration + + @property + def is_main_process(self): + """True for one process only.""" + return self.state.is_main_process + + @property + def is_local_main_process(self): + """True for one process per server.""" + return self.state.is_local_main_process + + @property + def is_last_process(self): + return self.process_index == self.num_processes - 1 + + @property + def mixed_precision(self): + return self.state.mixed_precision + + @property + def is_fsdp2(self): + return self.state.is_fsdp2 + + @property + def is_composable_parallelism_enabled(self): + return self.is_fsdp2 + + @property + def parallelism_config(self) -> Union[ParallelismConfig, None]: + return self.state.parallelism_config + + @property + def torch_device_mesh(self): + return self.state.device_mesh + + @property + def should_save_model(self): + if (pc := self.parallelism_config) is None: + # shouldn't even happen + return self.state.is_local_main_process + _non_model_shard_dims = { + pc.dp_replicate_enabled: "dp_replicate", + pc.cp_enabled: "cp", + } + + # return all( + # self.torch_device_mesh[dim].get_local_rank() == 0 for key, dim in non_model_shard_dims.items() if key + # ) + # TODO: S1ro - this is a temporary solution until we figure out why `save_safe_file` is slow when not all processes + return True + + @property + def tensor_parallel_rank(self) -> int: + """ + Returns the local rank for tensor parallelism. If tensor parallelism is configured but not enabled, returns 0 + since all ranks are assumed to be the same. + """ + if self.parallelism_config: + if self.parallelism_config.tp_enabled: + return self.torch_device_mesh.get_local_rank("tp") + return 0 + raise RuntimeError("Tensor parallelism is not configured. Set `parallelism_config` first.") + + @property + def pipeline_parallel_rank(self) -> int: + """ + Pipeline parallelism is not supported yet. + """ + raise NotImplementedError("Pipeline parallelism is currently not supported in Accelerate.") + + @property + def context_parallel_rank(self) -> int: + """ + Context parallelism is not supported yet. + """ + raise NotImplementedError("Context parallelism is currently not supported in Accelerate.") + + @property + def data_parallel_rank(self) -> int: + """ + Returns the local rank for replicate-based data parallelism. If replicate-based data parallelism is configured + but not enabled, returns 0 since all ranks are assumed to be the same. + """ + if self.parallelism_config: + if self.parallelism_config.dp_replicate_enabled: + return self.torch_device_mesh.get_local_rank("dp_replicate") + return 0 + raise RuntimeError("Data parallelism is not configured. Set `parallelism_config` first.") + + @property + def data_parallel_shard_rank(self) -> int: + """ + Returns the local rank for shard-based data parallelism. If shard-based data parallelism is configured but not + enabled, returns 0 since all ranks are assumed to be the same. + """ + if self.parallelism_config: + if self.parallelism_config.dp_shard_enabled: + return self.torch_device_mesh.get_local_rank("dp_shard") + return 0 + raise RuntimeError("Shard-based data parallelism is not configured. Set `parallelism_config` first.") + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs + or passing in less inputs than there are processes. If so, just remember to drop the padded elements + afterwards. + + Example: + + ```python + # Assume there are two processes + from accelerate import Accelerator + + accelerator = Accelerator() + with accelerator.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: + yield inputs + + def on_main_process(self, function: Callable[..., Any] = None): + """ + A decorator that will run the decorated function on the main process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + + + >>> @accelerator.on_main_process + ... def print_something(): + ... print("This will be printed by process 0 only.") + + + >>> print_something() + "This will be printed by process 0 only" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_main_process(function)(*args, **kwargs) + + return _inner + + def on_local_main_process(self, function: Callable[..., Any] = None): + """ + A decorator that will run the decorated function on the local main process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_main_process + def print_something(): + print("This will be printed by process 0 only on each server.") + + + print_something() + # On server 1: + "This will be printed by process 0 only" + # On server 2: + "This will be printed by process 0 only" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_local_main_process(function)(*args, **kwargs) + + return _inner + + def on_last_process(self, function: Callable[..., Any]): + """ + A decorator that will run the decorated function on the last process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 4 processes. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_last_process + def print_something(): + print(f"Printed on process {accelerator.process_index}") + + + print_something() + "Printed on process 3" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_last_process(function)(*args, **kwargs) + + return _inner + + def on_process(self, function: Callable[..., Any] = None, process_index: int = None): + """ + A decorator that will run the decorated function on a given process index only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`, `optional`): + The function to decorate. + process_index (`int`, `optional`): + The index of the process on which to run the function. + + Example: + ```python + # Assume we have 4 processes. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_process(process_index=2) + def print_something(): + print(f"Printed on process {accelerator.process_index}") + + + print_something() + "Printed on process 2" + ``` + """ + # Initial construction of the decorator. + if (self is not None) and (process_index is not None) and (function is None): + return partial(self.on_process, process_index=process_index) + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_process(function, process_index)(*args, **kwargs) + + return _inner + + def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): + """ + A decorator that will run the decorated function on a given local process index only. Can also be called using + the `PartialState` class. + + Args: + function (`Callable`, *optional*): + The function to decorate. + local_process_index (`int`, *optional*): + The index of the local process on which to run the function. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_process(local_process_index=2) + def print_something(): + print(f"Printed on process {accelerator.local_process_index}") + + + print_something() + # On server 1: + "Printed on process 2" + # On server 2: + "Printed on process 2" + ``` + """ + # Initial construction of the decorator. + if (self is not None) and (local_process_index is not None) and (function is None): + return partial(self.on_local_process, local_process_index=local_process_index) + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_local_process(function, local_process_index)(*args, **kwargs) + + return _inner + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.main_process_first(): + ... # This will be printed first by process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.process_index}") + ``` + """ + with self.state.main_process_first(): + yield + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.local_main_process_first(): + ... # This will be printed first by local process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.local_process_index}") + ``` + """ + with self.state.local_main_process_first(): + yield + + @contextmanager + def no_sync(self, model): + """ + A context manager to disable gradient synchronizations across DDP processes by calling + `torch.nn.parallel.DistributedDataParallel.no_sync`. + + If `model` is not in DDP, this context manager does nothing + + Args: + model (`torch.nn.Module`): + PyTorch Module that was prepared with `Accelerator.prepare` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) + >>> input_a = next(iter(dataloader)) + >>> input_b = next(iter(dataloader)) + + >>> with accelerator.no_sync(): + ... outputs = model(input_a) + ... loss = loss_func(outputs) + ... accelerator.backward(loss) + ... # No synchronization across processes, only accumulate gradients + >>> outputs = model(input_b) + >>> accelerator.backward(loss) + >>> # Synchronization across all processes + >>> optimizer.step() + >>> optimizer.zero_grad() + ``` + """ + context = contextlib.nullcontext + if self.use_distributed: + if self.distributed_type != DistributedType.DEEPSPEED or self.state.deepspeed_plugin.zero_stage < 2: + context = getattr(model, "no_sync", context) + + with context(): + yield + + @staticmethod + @contextmanager + def trigger_sync_in_backward(model): + """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under + `Accelerator.no_sync` (only applicable in multi-GPU scenarios). + + If the script is not launched in distributed mode, this context manager does nothing. + + Args: + model (`torch.nn.Module`): + The model for which to trigger the gradient synchronization. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) + + >>> with accelerator.no_sync(): + ... loss_a = loss_func(model(input_a)) # first forward pass + ... loss_b = loss_func(model(input_b)) # second forward pass + >>> accelerator.backward(loss_a) # No synchronization across processes, only accumulate gradients + >>> with accelerator.trigger_sync_in_backward(model): + ... accelerator.backward(loss_b) # Synchronization across all processes + >>> optimizer.step() + >>> optimizer.zero_grad() + ``` + """ + if not isinstance(model, torch.nn.parallel.DistributedDataParallel): + yield + return + + old_require_backward_grad_sync = model.require_backward_grad_sync + old_require_forward_param_sync = model.require_forward_param_sync + + # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features. + # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466 + model.require_backward_grad_sync = True + model.require_forward_param_sync = True + # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/csrc/distributed/c10d/reducer.cpp#L1371-L1402 + model.reducer.prepare_for_backward([]) + try: + yield + finally: + model.require_backward_grad_sync = old_require_backward_grad_sync + model.require_forward_param_sync = old_require_forward_param_sync + + def _do_sync(self): + "Sets the right `sync_gradients` context and either resets or increases `self.step`" + if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader: + self.step = 0 + self.gradient_state._set_sync_gradients(True) + else: + self.step += 1 + self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0) + + @property + def sync_gradients(self): + return self.gradient_state.sync_gradients + + @sync_gradients.setter + def sync_gradients(self, sync_gradients): + self.gradient_state.sync_gradients = sync_gradients + + @property + def gradient_accumulation_steps(self): + return self.gradient_state.num_steps + + @gradient_accumulation_steps.setter + def gradient_accumulation_steps(self, gradient_accumulation_steps): + self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps}) + + @contextmanager + def accumulate(self, *models): + """ + A context manager that will lightly wrap around and perform gradient accumulation automatically + + Args: + *models (list of `torch.nn.Module`): + PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will + skip gradient syncing during backward pass in distributed training + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=1) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, output in dataloader: + ... with accelerator.accumulate(model): + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... scheduler.step() + ... optimizer.zero_grad() + ``` + """ + self._do_sync() + + allow_gradient_sync = ( + self.sync_gradients # must sync if sync gradients need to complete an optimizer step + or ( + # the no_sync context stops the gradients from reducing during distributed training + # bringing speedup (potentially at some costs). Here, no_sync can be prevented + # by setting sync_each_batch = True. + self.use_distributed # only relevant in distributed settings + and self.gradient_state.plugin_kwargs.get("sync_each_batch", False) + ) + ) + with contextlib.ExitStack() as cm_stack: + for m in models: + cm_stack.enter_context(contextlib.nullcontext() if allow_gradient_sync else self.no_sync(m)) + yield + + @contextmanager + def join_uneven_inputs(self, joinables, even_batches=None): + """ + A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper + around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the + length of the dataset. + + Args: + joinables (`list[torch.distributed.algorithms.Joinable]`): + A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a + PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training. + even_batches (`bool`, *optional*) + If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided, + the default `Accelerator` value wil be used. + + + + `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other + configuration, this method will have no effect. + + + + + + Overidding `even_batches` will not affect iterable-style data loaders. + + + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(even_batches=True) + >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader) + + >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + ... for input, output in dataloader: + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... optimizer.zero_grad() + ``` + """ + if self.multi_device: + dl_even_batches_values = [] + + if even_batches is not None: + iterable_dl_seen = False + # override value in batch sampler for map-style datasets + for dl_idx, dl in enumerate(self._dataloaders): + if isinstance(dl, DataLoaderDispatcher): + iterable_dl_seen = True + continue + dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches)) + dl.batch_sampler.even_batches = even_batches + + if iterable_dl_seen: + warnings.warn( + "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable" + ) + else: + even_batches = self.even_batches + + enable_join = False if even_batches else True + try: + with Join(joinables, enable=enable_join, throw_on_early_termination=False): + yield + finally: + # reset any batch samplers that have been modified + for dl_idx, even_batches_value in dl_even_batches_values: + self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value + else: + # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs + if self.distributed_type != DistributedType.NO: + warnings.warn( + "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect." + ) + + with contextlib.nullcontext(joinables): + yield + + def print(self, *args, **kwargs): + """ + Drop in replacement of `print()` to only print once per server. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> accelerator.print("Hello world!") + ``` + """ + self.state.print(*args, **kwargs) + + def _prepare_one(self, obj, first_pass=False, device_placement=None): + # First pass of preparation: DataLoader, model, optimizer + if first_pass: + if isinstance(obj, torch.utils.data.DataLoader): + return self.prepare_data_loader(obj, device_placement=device_placement) + elif isinstance(obj, torch.nn.Module): + return self.prepare_model(obj, device_placement=device_placement) + elif isinstance(obj, torch.optim.Optimizer): + optimizer = self.prepare_optimizer(obj, device_placement=device_placement) + return optimizer + # Second pass of preparation: LR scheduler (which need the full list of optimizers) + elif isinstance(obj, LRScheduler): + scheduler = self.prepare_scheduler(obj) + return scheduler + # Return the unprocessed object if previous criteria was not met + return obj + + def prepare(self, *args, device_placement=None): + """ + Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same + order. + + Args: + *args (list of objects): + Any of the following type of objects: + + - `torch.utils.data.DataLoader`: PyTorch Dataloader + - `torch.nn.Module`: PyTorch Module + - `torch.optim.Optimizer`: PyTorch Optimizer + - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler + + device_placement (`list[bool]`, *optional*): + Used to customize whether automatic device placement should be performed for each object passed. Needs + to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP. + + + + You don't need to prepare a model if you only use it for inference without any kind of mixed precision + + + + Examples: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model, optimizer, data_loader and scheduler are defined + >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler) + ``` + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model, optimizer, data_loader and scheduler are defined + >>> device_placement = [True, True, False, False] + >>> # Will place the first two items passed in automatically to the right device but not the last two. + >>> model, optimizer, data_loader, scheduler = accelerator.prepare( + ... model, optimizer, data_loader, scheduler, device_placement=device_placement + ... ) + ``` + """ + if device_placement is None: + device_placement = [None for _ in args] + elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM): + raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.") + elif len(device_placement) != len(args): + raise ValueError( + f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)." + ) + + for obj in args: + # TODO: Look at enabling native TP training directly with a proper config + if ( + isinstance(obj, torch.nn.Module) + and self.verify_device_map(obj) + and self.distributed_type != DistributedType.NO + and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" + ): + raise ValueError( + "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." + " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." + ) + + if self.distributed_type == DistributedType.DEEPSPEED: + model_count = 0 + for obj in args: + if isinstance(obj, torch.nn.Module): + model_count += 1 + if model_count > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" + ) + + # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will + # have parameters disconnected from the model (so no training :-( ). + # If the model and optimizer have parameters on different devices we raise an error. + if self.distributed_type == DistributedType.XLA: + model_device, optimizer_device = self._get_devices() + if model_device is not None and optimizer_device is not None and model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you " + "created an optimizer around your model **before** putting on the device. Make sure the line " + "model.to(device) is before the optimizer creation in your script or remove it entirely and use " + "the flag default value for `device_placement` in your `Accelerator` to let it handle that " + "part for you." + ) + + if self.is_fsdp2: + model_count = 0 + optimizer_count = 0 + for i, obj in enumerate(args): + if isinstance(obj, torch.nn.Module): + model_count += 1 + elif isinstance(obj, torch.optim.Optimizer): + optimizer_count += 1 + + # This needs to be written as such, so that passing other objects other than models/optimizers doesn't raise an error + if (model_count < 1 and optimizer_count > 0) or (model_count > 0 and optimizer_count < 1): + raise ValueError( + "When using FSDP2, a model and optimizer must be passed together to `Accelerator.prepare()`" + " as the optimizer needs to have its parameters modified after the model is converted." + ) + if model_count > 1: + raise ValueError("Only one model is supported when using FSDP2") + + # If we're dealing with device placement, this deals with that by... + tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA + + if tpu_should_fix_optimizer: + # 1. grabbing old model parameters + old_named_params = self._get_named_parameters(*args, drop_refs=False) + + if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: + if ( + is_torch_version("<", "2.7.0") + and (self.device.type == "cpu" or self.device.type == "xpu") + and self.state.use_ipex + ): + logger.warning( + "You are using lower version of PyTorch(< 2.7.0) with ipex acceleration on Intel CPU or XPU, Intel has upstreamed most of the optimizations into stock PyTorch from 2.7.0, we enourage you to install the latest stock PyTorch and enjoy the out-of-experience on Intel CPU/XPU." + ) + args = self._prepare_ipex(*args) + if self.parallelism_config and self.parallelism_config.tp_enabled: + args = self._prepare_tp(*args) + + if self.parallelism_config and self.parallelism_config.cp_enabled: + args = self._prepare_cp(*args) + + if self.fp8_backend == FP8BackendType.TE: + args = self._prepare_te(*args) + elif self.fp8_backend == FP8BackendType.AO: + args = self._prepare_ao(*args) + if self.distributed_type == DistributedType.DEEPSPEED: + result = self._prepare_deepspeed(*args) + elif self.distributed_type == DistributedType.MEGATRON_LM: + result = self._prepare_megatron_lm(*args) + elif self.is_fsdp2: + result = self._prepare_fsdp2(*args) + else: + if self.fp8_backend == FP8BackendType.MSAMP: + args, device_placement = self._prepare_msamp(*args, device_placement=device_placement) + result = tuple( + self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) + ) + result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) + if tpu_should_fix_optimizer: + # 2. grabbing new model parameters + new_named_params = self._get_named_parameters(*result) + # 3. building a map from the first to the second + mapping = {p: new_named_params[n] for n, p in old_named_params.items()} + # 4. using that map to update the parameters of the optimizer + for obj in result: + if isinstance(obj, torch.optim.Optimizer): + obj._switch_parameters(mapping) + + for item in result: + if any( + item in container + for container in (self._dataloaders, self._models, self._optimizers, self._schedulers) + ): + item._is_accelerate_prepared = True + + return result if len(result) > 1 else result[0] + + def _prepare_tp(self, *args): + # First pass: prepare everything except schedulers (and model, which is prepared separately below) + result = [ + self._prepare_one(obj, first_pass=True) if not isinstance(obj, torch.nn.Module) else obj for obj in args + ] + + # Second pass: prepare schedulers + result = [self._prepare_one(obj) if not isinstance(obj, torch.nn.Module) else obj for obj in result] + + device_mesh = self.torch_device_mesh + + for arg in result: + if not isinstance(arg, torch.nn.Module): + continue + + from torch.distributed.tensor import DTensor, Replicate + from transformers.integrations.tensor_parallel import ReplicateParallel + + model: torch.nn.Module = arg + tp_plan = ReplicateParallel + + for name, param in model.named_parameters(): + if isinstance(param, DTensor): + continue + + dp = DTensor.from_local(param, device_mesh=device_mesh["tp"], placements=[Replicate()]) + param_name, param_type = name.rsplit(".", 1) + module_to_tp = model.get_submodule(param_name) + + tp_plan().prepare_module_tp(module_to_tp, device_mesh["tp"]) + if not isinstance(dp, torch.nn.Parameter): + dp = torch.nn.Parameter(dp, requires_grad=param.requires_grad) + setattr(module_to_tp, param_type, dp) + + return args + + def _prepare_cp(self, *args): + from torch.distributed.tensor.experimental import context_parallel + from torch.distributed.tensor.experimental._attention import set_rotate_method + + cp_comm_strategy = self.parallelism_config.cp_handler.cp_comm_strategy + set_rotate_method(cp_comm_strategy) + + self._cp_context = functools.partial(context_parallel, mesh=self.torch_device_mesh["cp"]) + + for arg in args: + if isinstance(arg, torch.nn.Module): + _attach_context_parallel_hooks(arg) + + return args + + def _prepare_fsdp2(self, *args): + # First pass: prepare everything except schedulers (and model, which is prepared separately below) + result = [ + self._prepare_one(obj, first_pass=True) if not isinstance(obj, torch.nn.Module) else obj for obj in args + ] + + # Second pass: prepare schedulers + result = [self._prepare_one(obj) if not isinstance(obj, torch.nn.Module) else obj for obj in result] + + # Prepare the model + model_index, model = None, None + for i, obj in enumerate(result): + if isinstance(obj, torch.nn.Module): + model_index, model = i, obj + + # Invariant: if we have a model, we also have an optimizer (checked in `prepare`) + if model_index is None: + return tuple(result) + + # Needs to be done first, to make sure AC + fully_shard will work as expected + self.state.fsdp_plugin.set_auto_wrap_policy(model) + + # Apply AC if needed + if self.state.fsdp_plugin.activation_checkpointing: + model = fsdp2_apply_ac(self, model) + + # Apply compile if needed, has to be *after* applying AC + # Copied from: `accelerator.prepare_model` ~ L1804 + if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model): + if self.state.dynamo_plugin.use_regional_compilation: + model = compile_regions(model, **self.state.dynamo_plugin.to_kwargs()) + else: + model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs()) + + # Get old params and canonicalize - we cannonicalize to have the mapping easy + old_named_params = fsdp2_canonicalize_names(self._get_named_parameters(*tuple(result), drop_refs=True)) + + # Swap the optimizer parameters with empty, so `fully_shard` after will not allocate too much memory + from torch.distributed.tensor import DTensor + + for obj in result: + if isinstance(obj, torch.optim.Optimizer): + for param_group in obj.param_groups: + for i, p in enumerate(param_group["params"]): + # We drop a reference to the original param here, so that _move_states_to_device triggers a reallocation + # We reassign the data_ptr to the original param, so that we preserve the mapping to the new ones + param_group["params"][i] = torch.empty(1, dtype=p.dtype, device=p.device) + param_group["params"][i].data_ptr = ( + p._local_tensor.data_ptr() if isinstance(p, DTensor) else p.data_ptr() + ) + + self._models.append(model) + + # Prepare everything FSDP2 related for the model (except AC) + model = fsdp2_prepare_model(self, model) + + # Remove the old model from the list + if len(self._models) > 1 and (self._models[-2] is self._models[-1]): + del self._models[-2] + + # Replace the old model with the new one (shouldn't be needed as everything should be in place) + result[model_index] = model + + # Get new params and canonicalize + new_named_params = fsdp2_canonicalize_names(self._get_named_parameters(*result)) + # Build a map from old to new params + mapping = {p: new_named_params[n] for n, p in old_named_params.items()} + # Update the optimizer parameters + for obj in result: + if isinstance(obj, torch.optim.Optimizer): + fsdp2_switch_optimizer_parameters(obj, mapping) + + return result + + def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False): + """ + Prepares a PyTorch model for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + model (`torch.nn.Module`): + A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without + any kind of mixed precision + device_placement (`bool`, *optional*): + Whether or not to place the model on the proper device. Will default to `self.device_placement`. + evaluation_mode (`bool`, *optional*, defaults to `False`): + Whether or not to set the model for evaluation only, by just applying mixed precision and + `torch.compile` (if configured in the `Accelerator` object). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model is defined + >>> model = accelerator.prepare_model(model) + ``` + """ + if device_placement is None: + device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP + + self._models.append(model) + + # TODO: Look at enabling native TP training directly with a proper config + if ( + self.verify_device_map(model) + and self.distributed_type != DistributedType.NO + and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" + ): + raise ValueError( + "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." + " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." + ) + + if self.native_amp: + model._original_forward = model.forward + autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler) + # NOTE: MS-AMP adds `__func__` already to `model.forward`, so we should always use `model.forward` + if self.fp8_backend == FP8BackendType.MSAMP or not hasattr(model.forward, "__func__"): + model_forward_func = model.forward + model.forward = convert_outputs_to_fp32(autocast_context(model_forward_func)) + else: + model_forward_func = model.forward.__func__ + new_forward = autocast_context(model_forward_func) + model.forward = MethodType(new_forward, model) + model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model) + + # We prepare TE after, allowing for bf16 autocast to happen first + if self.fp8_backend == FP8BackendType.TE and not self.delayed_fp8_autocast: + model = apply_fp8_autowrap(model, self.te_recipe_handler or self.fp8_recipe_handler) + + if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr( + model, "hf_device_map", False + ): + model_devices = set(model.hf_device_map.values()) + if len(model_devices) > 1 and self.distributed_type != DistributedType.NO: + raise ValueError( + "You can't train a model that has been loaded in 8-bit or 4-bit precision on multiple devices in any distributed mode." + " In order to use 8-bit or 4-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism." + " Therefore you should not specify that you are under any distributed regime in your accelerate config." + ) + elif len(model_devices) == 1: + current_device = list(model_devices)[0] + if isinstance(current_device, torch.device): + current_device_index = current_device.index + elif isinstance(current_device, str): + current_device_index = torch.device(current_device).index + else: + current_device_index = current_device + + if self.device.type == "cpu" and is_bitsandbytes_multi_backend_available(): + # bnb with multi-backend supports CPU which don't need to check index. + pass + elif torch.device(current_device_index) != self.device: + # if on the first device (GPU 0) we don't care + if (self.device.index is not None) or (current_device_index != 0): + raise ValueError( + "You can't train a model that has been loaded in 8-bit or 4-bit precision on a different device than the one " + "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device()}` or `device_map={'':torch.xpu.current_device()}`" + ) + if ( + ("cpu" in model_devices and not is_bitsandbytes_multi_backend_available()) + or ("cpu" in model_devices and is_xpu_available()) + or "disk" in model_devices + ): + raise ValueError( + "You can't train a model that has been loaded in 8-bit or 4-bit precision with CPU or disk offload. " + "If you want train the 8-bit or 4-bit model in CPU, please install bitsandbytes with multi-backend, see https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend" + ) + elif device_placement and not self.verify_device_map(model): + model = model.to(self.device) + if not evaluation_mode: + if self.multi_device and not (self.parallelism_config and self.parallelism_config.tp_enabled): + if model_has_dtensor(model): + raise ValueError( + "Your model contains `DTensor` parameters, which is incompatible with DDP. Maybe you loaded your model with `device_map='auto'`? Specify `device_map='cuda'` or 'cpu' instead." + ) + if any(p.requires_grad for p in model.parameters()): + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} + # TODO: Look at enabling native TP training directly with a proper config + if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true": + if self.device.type == "hpu": + device_ids, output_device = [self.device.index], self.device.index + else: + device_ids, output_device = [self.local_process_index], self.local_process_index + else: + device_ids, output_device = None, None + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=device_ids, output_device=output_device, **kwargs + ) + if self.ddp_handler is not None: + self.ddp_handler.register_comm_hook(model) + elif self.parallelism_config and self.parallelism_config.tp_enabled: + if not hasattr(model, "tp_size"): + raise NotImplementedError( + "Model should undergo tensor parallel before passing it to accelerate." + "You can use .from_pretrained(..., tp_plan='auto') if the model supports" + ) + if model.tp_size != self.parallelism_config.tp_size: + raise ValueError( + f"tp_size in the plugin {self.parallelism_config.tp_size} should be same as model's tp size {model.tp_size}" + ) + elif self.is_fsdp2: + raise ValueError( + "FSDP2 preparation should be done via `accelerate.prepare()`, as it requires a model and an optimizer." + ) + + elif self.distributed_type == DistributedType.FSDP: + # We need to fix the optimizer *before* sharding the model + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + # Check if the model is already a FSDP model due to `Manual Wrapping` and if so, + # don't wrap it again + # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it + # is a FSDP model, don't wrap it again + is_type_fsdp = isinstance(model, FSDP) or ( + is_compiled_module(model) and isinstance(model._orig_mod, FSDP) + ) + + if not is_type_fsdp: + self.state.fsdp_plugin.set_auto_wrap_policy(model) + fsdp_plugin = self.state.fsdp_plugin + + # need to ensure that params are re-tied after running + # param_init_fn + fsdp_plugin.param_init_fn = ensure_weights_retied( + fsdp_plugin.param_init_fn, + model, + self.device, + ) + + kwargs = { + # We fallback to reshard_after_forward if sharding_strategy is not set. + # We prerfer sharding_strategy to not break the behavior of the existing code. + # Deprecation warning has already been issued in `utils.dataclasses.py` + "sharding_strategy": fsdp_plugin.sharding_strategy or fsdp_plugin.reshard_after_forward, + "cpu_offload": fsdp_plugin.cpu_offload, + "auto_wrap_policy": fsdp_plugin.auto_wrap_policy, + "mixed_precision": fsdp_plugin.mixed_precision_policy, + "sync_module_states": fsdp_plugin.sync_module_states, + "backward_prefetch": fsdp_plugin.backward_prefetch, + "forward_prefetch": fsdp_plugin.forward_prefetch, + "use_orig_params": fsdp_plugin.use_orig_params, + "param_init_fn": fsdp_plugin.param_init_fn, + "ignored_modules": fsdp_plugin.ignored_modules, + "limit_all_gathers": fsdp_plugin.limit_all_gathers, + "device_id": self.device, + } + + if isinstance(kwargs["ignored_modules"], str): + reg = re.compile(kwargs["ignored_modules"]) + ignored = [] + for name, module in model.named_modules(): + if reg.fullmatch(name): + # ensure that the device for these modules is still set correctly + module.to(self.device) + ignored.append(module) + kwargs["ignored_modules"] = ignored + + model = FSDP(model, **kwargs) + if fsdp_plugin.activation_checkpointing: + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + CheckpointImpl, + apply_activation_checkpointing, + checkpoint_wrapper, + ) + + apply_activation_checkpointing( + model, + checkpoint_wrapper_fn=functools.partial( + checkpoint_wrapper, + checkpoint_impl=CheckpointImpl.NO_REENTRANT, + ), + auto_wrap_policy=fsdp_plugin.auto_wrap_policy, + ) + + # In the event the model had been loaded in low precision, but + # mixed precision had also been activated, then we follow DeepSpeed's + # strategy to hold the parameters in full precision. + # - assume that trainer.args.bf16 and trainer.args.fp16 are already checked against + # fsdp_plugin.mixed_precision_policy. + # - NOTE: we do not check the mixed_precision attribute on the FSDP root wrapper. + # * this attribute will always set by init_utils.init_core_state so its always not None. + # * mixed_precision.param_dtype only regards _fwd_bwd_param_dtype + # * if model is loaded in 16bit, and even if mixed_precision.param_dtype is None, + # we still want to upcast the flat_param. + if self.mixed_precision != "no": # if mixed precision is set + upcasted_log = [] + for module in FSDP.fsdp_modules(model): + # Referencing DeepSpeed Zero3 + # - in Init, params are converted to 16bit while partitioning. + # - in accelerator.prepare, deepspeed.initialize is called to: + # * creates the DeepSpeedEngine. + # * since zero_optimization() is True , calls engine._configure_zero_optimizer. + # + # Inside the DeepSpeed Zero3 optimizer configuration, which initializes + # DeepSpeedZeroOptimizer_Stage3, during which: + # * trainable_param_groups are obtained from the attached optimizer + # (already partitioned in 16bit). + # * then _setup_for_real_optimizer -> _create_fp32_partitions + # which performs the fp32 upcasting. + + # To mimic DeepSeepds's casting in FSDP, we look at the (single) FlatParameter held + # within an FSDP wrapper. This FlatParameter will be seen by the optimizer. + # - even though there is a torch.device('meta') guard below, we + # expect _init_utils._init_param_handle_from_module to already + # sync the parameter. + + if not module._has_params: + continue # skip if FSDP module not managing parameters + param = module._flat_param + if ( + param.dtype != torch.float32 + and param.device != torch.device("meta") + and param.requires_grad + ): + # keep log of names_params that was upcasted + # NOTE: resorted to this because warnings.simplefilter("once") is somehow not working + name_param_log = (module.module.__class__.__name__, ", ".join(module._flat_param._fqns)) + if name_param_log not in upcasted_log: + upcasted_log.append(name_param_log) + + # this works because of FSDP's _runtime_utils.lazy_init. + # Have to be careful not to call anything before this that + # triggers lazy_init (e.g., _is_fsdp_root). + param.data = param.data.to(torch.float32) # upcasting + module._handle._orig_param_dtype = torch.float32 # update + + # report the warnings + # some messages can be quite repetitive, especially when reporting about layers that have identical architecture. + if self.is_main_process: + for name_log, param_log in upcasted_log: + warnings.warn( + f"Upcasted low precision parameters in {name_log} because mixed precision turned on in FSDP. " + f"Affects: {param_log}." + ) + + if len(upcasted_log) > 0: + warnings.warn( + "FSDP upcast of low precision parameters may affect the precision of model checkpoints." + ) + + # if the previous and current models are same, delete the previous one + if len(self._models) > 1 and (self._models[-2] is self._models[-1]): + del self._models[-2] + self._models[-1] = model + elif self.distributed_type == DistributedType.MULTI_CPU: + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler else {} + model = torch.nn.parallel.DistributedDataParallel(model, **kwargs) + if self.ddp_handler is not None: + self.ddp_handler.register_comm_hook(model) + elif self.distributed_type == DistributedType.XLA and self.state.fork_launched: + model = xmp.MpModelWrapper(model).to(self.device) + # Now we can apply the FP8 autocast + if self.fp8_backend == FP8BackendType.TE and self.delayed_fp8_autocast: + model = apply_fp8_autowrap(model, self.te_recipe_handler or self.fp8_recipe_handler) + # torch.compile should be called last and only if the model isn't already compiled + if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model): + if self.state.dynamo_plugin.use_regional_compilation: + model = compile_regions(model, **self.state.dynamo_plugin.to_kwargs()) + else: + model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs()) + return model + + def _prepare_ao(self, *args): + if not is_torchao_available(): + raise ImportError( + "`torchao` was not found on your system or is too old of a version. Please ensure that `torchao >= 0.6.1` is installed" + ) + + if self.is_fsdp2: + models = [x for x in args if isinstance(x, torch.nn.Module)] + optimizers = [x for x in args if isinstance(x, torch.optim.Optimizer)] + for arg in args: + if isinstance(arg, torch.nn.Module): + convert_model_to_fp8_ao( + arg, + config=self.ao_recipe_handler.config, + module_filter_func=self.ao_recipe_handler.module_filter_func, + ) + + # Invariant: with FSDP2, optimizer is always passed to `prepare()` together with model + # We only precompute scales if float8 all gather is enabled, possibly can add a flag for this later + if self.is_fsdp2 and len(optimizers) > 0 and self.ao_recipe_handler.config.enable_fsdp_float8_all_gather: + from torchao.float8 import precompute_float8_dynamic_scale_for_fsdp + + optimizers[0].register_step_post_hook( + lambda *args, **kwargs: precompute_float8_dynamic_scale_for_fsdp(models[0]) + ) + + return args + + def _prepare_te(self, *args): + if not is_transformer_engine_available(): + raise ImportError( + "`transformer_engine` was not found on your system. Please ensure that `transformer_engine` is installed" + ) + model, optimizer = None, None + num_models, num_optimizers = 0, 0 + result = [obj for obj in args] + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + num_models += 1 + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + num_optimizers += 1 + if optimizer is None and model is None: + return result + elif optimizer is None or model is None: + raise ValueError( + "You must pass a model and an optimizer together to `accelerate.prepare()` when using TransformerEngine." + ) + elif num_models > 1 or num_optimizers > 1: + raise ValueError( + f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with TransformerEngine." + ) + old_named_params = self._get_named_parameters(model) + with torch.no_grad(): + convert_model(model) + new_named_params = self._get_named_parameters(model) + mapping = {p: new_named_params[n] for n, p in old_named_params.items()} + # We need to switch the optimizer params to the new params *after* the model is wrapped in FSDP + for param_group in optimizer.param_groups: + param_group["params"] = [mapping[p] for p in param_group["params"]] + + return result + + def _prepare_deepspeed(self, *args): + import deepspeed + + ds_initialize = deepspeed.initialize + if self.fp8_backend == FP8BackendType.MSAMP: + # MS-AMP requires DeepSpeed patches + from msamp import deepspeed as msamp_deepspeed + + ds_initialize = msamp_deepspeed.initialize + + deepspeed_plugin = self.deepspeed_plugin + + is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args) + tp_size = deepspeed_plugin.deepspeed_config.get("tensor_parallel", {}).get("autotp_size", 0) + if tp_size > 1: + if not compare_versions("deepspeed", ">=", "0.16.4"): + raise ImportError( + "Deepspeed TP requires deepspeed >= 0.16.4, Please update DeepSpeed via `pip install deepspeed -U`." + ) + if not is_torch_version(">=", "2.2.0"): + raise ImportError( + "Tried to use TP, but `torch.distributed.device_mesh` requires PyTorch >= 2.2.0. Please upgrade your PyTorch version" + ) + from torch.distributed.device_mesh import init_device_mesh + + mesh_dim_name = "tp" + self.state.ds_device_mesh = init_device_mesh(self.device.type, (tp_size,), mesh_dim_names=(mesh_dim_name,)) + + result = [ + self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj + for obj in args + ] + + if deepspeed_plugin.is_auto("train_micro_batch_size_per_gpu"): + if is_dataloader_present: + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if any(bs is None for bs in batch_sizes): + raise ValueError( + "At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. " + "Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + ) + if self.split_batches: + batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes] + + batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})." + ) + else: + raise ValueError( + "When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders " + "with `batch_size` attribute returning an integer value " + "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + ) + else: + batch_size_per_device = deepspeed_plugin.get_value("train_micro_batch_size_per_gpu") + + # handle `gradient_accumulation_steps` when the value is `auto` + deepspeed_plugin.fill_match( + "gradient_accumulation_steps", + must_match=False, + gradient_accumulation_steps=self.gradient_accumulation_steps, + ) + + deepspeed_gradient_accumulation_steps = deepspeed_plugin.get_value("gradient_accumulation_steps") + # update gradient_accumulation_steps if there is a mismatch + if deepspeed_gradient_accumulation_steps != self.gradient_accumulation_steps: + logger.warning( + f"Gradient accumulation steps mismatch: GradientAccumulationPlugin has {self.gradient_accumulation_steps}, " + f"DeepSpeed config has {deepspeed_gradient_accumulation_steps}. Using DeepSpeed's value." + ) + self.gradient_accumulation_steps = deepspeed_gradient_accumulation_steps + + config_kwargs = { + "gradient_clipping": 1.0, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, + } + # This is skipped when preparing just a model + if batch_size_per_device is not None: + config_kwargs["train_micro_batch_size_per_gpu"] = batch_size_per_device + config_kwargs["train_batch_size"] = ( + batch_size_per_device * deepspeed_plugin.get_value("gradient_accumulation_steps") * self.num_processes + ) + + model = None + optimizer = None + scheduler = None + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)): + optimizer = obj + elif (isinstance(obj, (LRScheduler, DummyScheduler))) or ( + type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + scheduler = obj + + if optimizer is not None: + if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot specify an optimizer in the config file and in the code at the same time. " + "Please remove the optimizer from the config file or " + "create `accelerate.utils.DummyOptim` in the code." + ) + elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot create a `DummyOptim` without specifying an optimizer in the config file." + ) + + if isinstance(optimizer, (torch.optim.Optimizer)): + deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True + + if scheduler is not None: + if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You cannot specify a scheduler in the config file and in the code at the same time. " + "Please remove the scheduler from the config file or " + "create `accelerate.utils.DummyScheduler` in the code." + ) + elif ( + "scheduler" not in deepspeed_plugin.deepspeed_config + and isinstance(scheduler, (DummyScheduler)) + and scheduler.lr_scheduler_callable is None + ): + raise ValueError( + "Either specify a scheduler in the config file or " + "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`." + ) + + if optimizer is not None and scheduler is not None: + if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You can only specify `accelerate.utils.DummyScheduler` in the code when using " + "`accelerate.utils.DummyOptim`." + ) + + if model is not None: + # If we are using FP8, we need to apply the autowrap now + if self.fp8_backend == FP8BackendType.TE: + model = apply_fp8_autowrap(model, self.fp8_recipe_handler) + # if the model is an MOE, set the appropriate MOE layers as leaf Z3 modules + deepspeed_plugin.set_moe_leaf_modules(model) + # deal with config keys that use `auto` value and rely on model's hidden_size + hidden_size_based_keys = [ + "zero_optimization.reduce_bucket_size", + "zero_optimization.stage3_prefetch_bucket_size", + "zero_optimization.stage3_param_persistence_threshold", + ] + hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)] + if len(hidden_size_auto_keys) > 0: + reasoning = ( + "therefore it's not possible to automatically fill out the following `auto` entries " + + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " + + "`auto` values for these keys with an integer value of your choice." + ) + if not hasattr(model, "config"): + raise ValueError("Can't find `model.config` entry, " + reasoning) + + if hasattr(model.config, "hidden_size"): + hidden_size = model.config.hidden_size + elif hasattr(model.config, "hidden_sizes"): + # if there are many hidden sizes pick the largest one + hidden_size = max(model.config.hidden_sizes) + else: + raise ValueError( + "Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning + ) + + config_kwargs.update( + { + "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, + "zero_optimization.stage3_prefetch_bucket_size": int(0.9 * hidden_size * hidden_size), + "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, + } + ) + + if isinstance(optimizer, (DummyOptim)): + config_kwargs.update( + {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay} + ) + if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None: + max_lr = ( + getattr(scheduler.optimizer, "lr", None) + if getattr(scheduler.optimizer, "defaults", None) is None + else scheduler.optimizer.defaults["lr"] + ) + config_kwargs.update( + { + "scheduler.params.warmup_min_lr": 0, + "scheduler.params.warmup_max_lr": max_lr, + "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps, + } + ) + if scheduler.total_num_steps is not None: + config_kwargs["scheduler.params.total_num_steps"] = ( + math.ceil(scheduler.total_num_steps / self.num_processes) + if not self.split_batches + else scheduler.total_num_steps + ) + deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs) + self.deepspeed_config = deepspeed_plugin.deepspeed_config + kwargs = dict(model=model, config_params=self.deepspeed_config) + if optimizer is not None: + if isinstance(optimizer, (DummyOptim)): + kwargs["model_parameters"] = optimizer.params + if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None: + kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable + else: + if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get( + "device", "none" + ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True): + if self.device.type == "hpu" and os.environ.get("PT_HPU_LAZY_MODE", "1") == "1": + raise ValueError( + "You can't use an Offload Optimizer with HPU in Lazy Mode. " + "Please set the environment variable `PT_HPU_LAZY_MODE` to `0`." + ) + + optimizer = map_pytorch_optim_to_deepspeed(optimizer) + kwargs["optimizer"] = optimizer + if scheduler is not None: + if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES: + kwargs["lr_scheduler"] = scheduler + + if self.device.type == "hpu": + # This env variable is initialized here to make sure it is set to "true" + # It should be done by the launcher but it does not work for multi-node runs + os.environ["DEEPSPEED_USE_HPU"] = "true" + + engine, optimizer, _, lr_scheduler = ds_initialize(**kwargs) + + if compare_versions("deepspeed", ">=", "0.14.4") and self.state.dynamo_plugin.backend != DynamoBackend.NO: + compile_kwargs = self.state.dynamo_plugin.to_kwargs() + if self.state.dynamo_plugin.use_regional_compilation: + compile_regions_deepspeed(engine.module, **compile_kwargs) + else: + engine.compile(backend=compile_kwargs.pop("backend"), compile_kwargs=compile_kwargs) + if optimizer is not None: + optimizer = DeepSpeedOptimizerWrapper(optimizer) + if scheduler is not None: + if lr_scheduler is None: + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + else: + scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = engine + elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)): + result[i] = optimizer + elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or ( + type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + result[i] = scheduler + # pointing for deepspeed_engine_wrapped.backward() + if self.deepspeed_engine_wrapped is None: + self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine) + else: + logger.warning( + "A wrapped DeepSpeed engine reference is currently tied for this `Accelerator()` instance. " + "If you want to call `accelerator.backward()` referencing a new model/engine, " + "please create a separate `Accelerator()` instance and call `accelerator.prepare()` on it." + ) + self._models.append(engine) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + return tuple(result) + + def _prepare_megatron_lm(self, *args): + megatron_lm_plugin = self.state.megatron_lm_plugin + micro_batch_size = None + if not megatron_lm_plugin.megatron_dataset_flag: + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if len(batch_sizes) == 0: + raise ValueError( + "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM." + ) + + micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})." + ) + else: + for obj in args: + if isinstance(obj, MegatronLMDummyDataLoader): + micro_batch_size = obj.dataset_args["micro_batch_size"] + break + if micro_batch_size is not None: + dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree) + megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree) + else: + raise ValueError( + "When you do not pass the dataloader parameter, the `data_parallel_size`, " + "`micro_batch_size`, and `global_batch_size` megatron parameters will not be updated." + ) + model = None + optimizer = None + scheduler = None + batch_data = None + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None: + batch_data = next(iter(obj)) + elif isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)): + scheduler = obj + + if model is not None: + megatron_lm_plugin.set_network_size_args(model, batch_data) + if optimizer is not None: + megatron_lm_plugin.set_optimizer_type(optimizer) + if scheduler is not None: + if not isinstance(scheduler, MegatronLMDummyScheduler): + raise ValueError( + "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead." + ) + megatron_lm_plugin.set_scheduler_args(scheduler) + + # initialize megatron-lm + megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args) + + (model, optimizer, scheduler) = megatron_lm_prepare_model_optimizer_scheduler(self) + self.wait_for_everyone() + + counter = 0 + result = [] + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader): + result.append(megatron_lm_prepare_data_loader(self, obj)) + counter += 1 + elif isinstance(obj, MegatronLMDummyDataLoader): + if counter == 0: + obj.set_megatron_data_args() + dataloaders = megatron_lm_prepare_data_loader(self, obj) + result.append(dataloaders[counter]) + counter += 1 + else: + result.append(obj) + + if model is not None: + model = MegatronEngine(self, model, optimizer, scheduler) + if optimizer is not None: + optimizer = MegatronLMOptimizerWrapper(optimizer) + if scheduler is not None: + scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], torch.optim.Optimizer): + result[i] = optimizer + elif isinstance(result[i], MegatronLMDummyScheduler): + result[i] = scheduler + + if model is not None: + self._models.append(model) + if len(self._models) > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM" + ) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + + return tuple(result) + + def _prepare_ipex(self, *args): + """ + Prepares model and optimizer for training with IPEX on CPU/XPU. This covers 3 cases, IPEX compiled with CPU + only support, IPEX compiled with XPU support and training with XPU pytorch backend available in stock pytorch + starting from version 2.4. + """ + + # ipex.optimize() is available only for IPEX, both IPEX-CPU and IPEX-XPU + if is_ipex_available(): + import intel_extension_for_pytorch as ipex + else: + raise ImportError( + "IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer" + " to https://github.com/intel/intel-extension-for-pytorch." + ) + + models = [] + optimizers = [] + result = [obj for obj in args] + for i, obj in enumerate(result): + if isinstance(obj, torch.nn.Module): + model = obj + model.train() + models.append((i, model)) + elif isinstance(obj, (torch.optim.Optimizer)): + optimizers.append((i, obj)) + + # Impossible to determine what to do if multiple models and/or optimizers are provided + if len(optimizers) > 1 or (len(models) > 1 and len(optimizers) == 1): + raise ValueError( + "Prepare with IPEX expects either 1+ models and no optimizer OR a single model-optimizer pair." + ) + + # Nothing to do + if len(models) == 0 and len(optimizers) == 0: + return result + + dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None + # Multiple models and no optimizer (inference) are provided + if len(models) > 0 and len(optimizers) == 0: + for i, model in models: + if self.device.type == "xpu" and next(model.parameters()).device.type == "cpu": + model = model.to(self.device) + model, _ = ipex.optimize(model, optimizer=None, dtype=dtype, inplace=True, level="O1") + # Replace in result + result[i] = model + + # A single model-optimizer pair (training) is provided + if len(models) == 1 and len(optimizers) == 1: + i_model, model = models[0] + i_optimizer, optimizer = optimizers[0] + if self.device.type == "xpu" and next(model.parameters()).device.type == "cpu": + model = model.to(self.device) + model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1") + # Replace in result + result[i_model] = model + result[i_optimizer] = optimizer + + return tuple(result) + + def _prepare_device_mesh(self): + """ + Prepare the device mesh for distributed training. The dataloader will determine how to load data based on the + device mesh. + """ + if self.distributed_type == DistributedType.DEEPSPEED and hasattr(self.state, "ds_device_mesh"): + return self.state.ds_device_mesh + else: + return self.torch_device_mesh + + def _prepare_msamp(self, *args, device_placement): + if not is_msamp_available(): + raise ImportError( + "MS-AMP was not found on your system. Please ensure that MS-AMP is available " + " or choose `'te'` as the backend for FP8 mixed precision training." + ) + # We've already checked for FSDP + MS-AMP during `__init__` + import msamp + + model, optimizer = None, None + optimizer_index = None + num_models, num_optimizers = 0, 0 + result = [obj for obj in args] + for i, obj in enumerate(result): + if isinstance(obj, torch.nn.Module): + model = obj + num_models += 1 + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + optimizer_index = i + num_optimizers += 1 + # DataLoader/Scheduler case + if optimizer is None and model is None: + return result, device_placement + elif optimizer is None or model is None: + raise ValueError( + "You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP." + ) + elif num_models > 1 or num_optimizers > 1: + raise ValueError( + f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with MS-AMP." + ) + else: + # DEPRECATE @ 2.0 + if self.fp8_recipe_handler is not None: + opt_level = self.fp8_recipe_handler.opt_level + else: + opt_level = self.msamp_recipe_handler.opt_level + model, optimizer = msamp.initialize(model, optimizer, opt_level=opt_level) + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], (torch.optim.Optimizer)): + result[i] = optimizer + if optimizer_index is not None: + # NOTE: MS-AMP moves the optimizer, but *not* the model to the right device + device_placement[optimizer_index] = False + return tuple(result), device_placement + + def prepare_data_loader( + self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None + ): + """ + Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + data_loader (`torch.utils.data.DataLoader`): + A vanilla PyTorch DataLoader to prepare + device_placement (`bool`, *optional*): + Whether or not to place the batches on the proper device in the prepared dataloader. Will default to + `self.device_placement`. + slice_fn_for_dispatch (`Callable`, *optional*`): + If passed, this function will be used to slice tensors across `num_processes`. Will default to + [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will + be ignored otherwise. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> data_loader = torch.utils.data.DataLoader(...) + >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True) + ``` + """ + # Ensure we can't double wrap a DataLoader due to `find_batch_size` + if getattr(data_loader, "_is_accelerate_prepared", False): + if data_loader not in self._dataloaders: + self._dataloaders.append(data_loader) + return data_loader + if device_placement is None: + device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False + + device_mesh = self._prepare_device_mesh() + + prepared_data_loader = prepare_data_loader( + data_loader, + self.device, + num_processes=self.num_processes, + process_index=self.process_index, + split_batches=self.split_batches, + put_on_device=device_placement, + rng_types=self.rng_types.copy(), + dispatch_batches=self.dispatch_batches, + even_batches=self.even_batches, + slice_fn_for_dispatch=slice_fn_for_dispatch, + use_seedable_sampler=self.use_seedable_sampler, + data_seed=self.dataloader_config.data_seed, + non_blocking=self.non_blocking, + use_stateful_dataloader=self.use_stateful_dataloader, + torch_device_mesh=device_mesh, + ) + self._dataloaders.append(prepared_data_loader) + return prepared_data_loader + + def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None): + """ + Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + optimizer (`torch.optim.Optimizer`): + A vanilla PyTorch optimizer to prepare + device_placement (`bool`, *optional*): + Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> optimizer = torch.optim.Adam(...) + >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True) + ``` + """ + if is_lomo_available(): + # We need to import locally to avoid circular imports since lomo imports stuff from + # transformers & accelerate + from lomo_optim import AdaLomo, Lomo + + # Support multiple optimizers: https://github.com/huggingface/accelerate/pull/2695#discussion_r1589164607 + self.has_lomo_optimizer |= isinstance(optimizer, (Lomo, AdaLomo)) + + # Ensure we can't double wrap an optimizer due to `find_batch_size` + if getattr(optimizer, "_is_accelerate_prepared", False): + if optimizer not in self._optimizers: + self._optimizers.append(optimizer) + return optimizer + if device_placement is None: + device_placement = self.device_placement + # NOTE: Special case with MS-AMP we do *not* pass in the scaler explicitly to the `AcceleratedOptimizer`, + # Their optimizer handles it for us. + scaler = None if self.fp8_backend == FP8BackendType.MSAMP else self.scaler + optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=scaler) + self._optimizers.append(optimizer) + return optimizer + + def prepare_scheduler(self, scheduler: LRScheduler): + """ + Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + scheduler (`torch.optim.lr_scheduler.LRScheduler`): + A vanilla PyTorch scheduler to prepare + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> optimizer = torch.optim.Adam(...) + >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...) + >>> scheduler = accelerator.prepare_scheduler(scheduler) + ``` + """ + # Ensure we can't double wrap a scheduler due to `find_batch_size` + if getattr(scheduler, "_is_accelerate_prepared", False): + if scheduler not in self._schedulers: + self._schedulers.append(scheduler) + return scheduler + # We try to find the optimizer associated with `scheduler`, the default is the full list. + optimizer = self._optimizers + for opt in self._optimizers: + if getattr(scheduler, "optimizer", None) == opt.optimizer: + optimizer = opt + break + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + self._schedulers.append(scheduler) + return scheduler + + def backward(self, loss, **kwargs): + """ + Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based + on the configuration. + + Should be used in lieu of `loss.backward()`. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> outputs = model(inputs) + >>> loss = loss_fn(outputs, labels) + >>> accelerator.backward(loss) + ``` + """ + learning_rate = kwargs.get("learning_rate") + + if self.distributed_type != DistributedType.DEEPSPEED: + # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` + loss = loss / self.gradient_accumulation_steps + if self.distributed_type == DistributedType.DEEPSPEED: + self.deepspeed_engine_wrapped.backward(loss, sync_gradients=self.sync_gradients, **kwargs) + elif self.distributed_type == DistributedType.MEGATRON_LM: + return + elif self.scaler is not None: + self.scaler.scale(loss).backward(**kwargs) + elif learning_rate is not None and self.has_lomo_optimizer: + self.lomo_backward(loss, learning_rate) + else: + loss.backward(**kwargs) + + def set_trigger(self): + """ + Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which + will check across all processes. + + Note: + Does not require `wait_for_everyone()` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume later in the training script + >>> # `should_do_breakpoint` is a custom function to monitor when to break, + >>> # e.g. when the loss is NaN + >>> if should_do_breakpoint(loss): + ... accelerator.set_trigger() + >>> # Assume later in the training script + >>> if accelerator.check_breakpoint(): + ... break + ``` + """ + self.flag_tensor = torch.tensor(1, device=self.device) + + def check_trigger(self): + """ + Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and + reset the trigger tensor to 0. + + Note: + Does not require `wait_for_everyone()` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume later in the training script + >>> # `should_do_breakpoint` is a custom function to monitor when to break, + >>> # e.g. when the loss is NaN + >>> if should_do_breakpoint(loss): + ... accelerator.set_trigger() + >>> # Assume later in the training script + >>> if accelerator.check_trigger(): + ... break + ``` + """ + # Now that we are outside `__init__`, we can initialize it if it is `None` on device + if self.flag_tensor is None: + self.flag_tensor = torch.tensor(0, device=self.device) + flag_tensor = self.reduce(self.flag_tensor) + if flag_tensor.item() >= 1: + self.flag_tensor = torch.tensor(0, device=self.device) + return True + return False + + def unscale_gradients(self, optimizer=None): + """ + Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings. + + Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`] + + Args: + optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*): + The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers + that were passed to [`~Accelerator.prepare`]. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer = accelerator.prepare(model, optimizer) + >>> outputs = model(inputs) + >>> loss = loss_fn(outputs, labels) + >>> accelerator.backward(loss) + >>> accelerator.unscale_gradients(optimizer=optimizer) + ``` + """ + if self.native_amp and self.mixed_precision == "fp16": + if optimizer is None: + # TODO: this unscales all optimizers where we should only unscale the one where parameters are. + optimizer = self._optimizers + elif not isinstance(optimizer, (tuple, list)): + optimizer = [optimizer] + for opt in optimizer: + while isinstance(opt, AcceleratedOptimizer): + opt = opt.optimizer + self.scaler.unscale_(opt) + + def clip_grad_norm_(self, parameters, max_norm, norm_type=2): + """ + Should be used in place of `torch.nn.utils.clip_grad_norm_`. + + Returns: + `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) + ... optimizer.step() + ``` + """ + if self.distributed_type == DistributedType.FSDP: + self.unscale_gradients() + parameters = [p for p in parameters] + for model in self._models: + if parameters == [p for p in model.parameters()]: + if not self.is_fsdp2: + return model.clip_grad_norm_(max_norm, norm_type) + else: + return torch.nn.utils.clip_grad_norm_( + parameters, max_norm, norm_type=norm_type + ) # viz: https://github.com/pytorch/torchtitan/blob/main/docs/fsdp.md + elif self.distributed_type == DistributedType.DEEPSPEED: + # DeepSpeed handles gradient clipping internally, but we can retrieve the gradient norm + if self.deepspeed_engine_wrapped is not None: + return self.deepspeed_engine_wrapped.get_global_grad_norm() + return None + elif self.distributed_type == DistributedType.XLA: + # Reduce gradients first for XLA + for acc_opt in self._optimizers: + if not acc_opt.gradient_state.is_xla_gradients_synced: + opt = acc_opt + while isinstance(opt, AcceleratedOptimizer): + opt = opt.optimizer + gradients = xm._fetch_gradients(opt) + # Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor + # one by one in self.reduce is non-inplace. + xm.all_reduce("sum", gradients, scale=1.0 / self.num_processes) + # Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step. + acc_opt.gradient_state.is_xla_gradients_synced = True + if os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true": + self.unscale_gradients() + parameters = [p for p in parameters] + for model in self._models: + if parameters == [p for p in model.parameters()]: + return model.clip_grad_norm_(max_norm, norm_type) + self.unscale_gradients() + return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) + + def clip_grad_value_(self, parameters, clip_value): + """ + Should be used in place of `torch.nn.utils.clip_grad_value_`. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_value_(model.parameters(), clip_value) + ... optimizer.step() + ``` + """ + if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]: + raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.") + self.unscale_gradients() + torch.nn.utils.clip_grad_value_(parameters, clip_value) + + def gather(self, tensor): + """ + Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to + regroup the predictions from all processes when doing evaluation. + + Note: + This gather happens in all processes. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to gather across all processes. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the + first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors. + + Example: + + ```python + >>> # Assuming four processes + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.tensor([accelerator.process_index]) + >>> gathered_tensor = accelerator.gather(process_tensor) + >>> gathered_tensor + tensor([0, 1, 2, 3]) + ``` + """ + return gather(tensor) + + def gather_for_metrics(self, input_data, use_gather_object=False): + """ + Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be + used for gathering the inputs and targets for metric calculation. + + Args: + input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`): + The tensors or objects for calculating metrics across all processes + use_gather_object(`bool`): + Whether to forcibly use gather_object instead of gather (which is already done if all objects passed do + not contain tensors). This flag can be useful for gathering tensors with different sizes that we don't + want to pad and concatenate along the first dimension. Using it with GPU tensors is not well supported + and inefficient as it incurs GPU -> CPU transfer since tensors would be pickled. + + Example: + + ```python + >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5) + >>> dataloader = accelerator.prepare(dataloader) + >>> batch = next(iter(dataloader)) + >>> gathered_items = accelerator.gather_for_metrics(batch) + >>> len(gathered_items) + 9 + ``` + """ + + try: + recursively_apply(lambda x: x, input_data, error_on_other_type=True) + all_tensors = True + except TypeError: + all_tensors = False + + use_gather_object = use_gather_object or not all_tensors + + if use_gather_object: + data = gather_object(input_data) + else: + data = self.gather(input_data) + + try: + if self.gradient_state.end_of_dataloader: + # at the end of a dataloader, `gather_for_metrics` regresses to + # `gather` unless the dataset has a remainder so log. + if self.gradient_state.remainder == -1: + logger.info( + "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself." + ) + return data + elif self.gradient_state.remainder > 0: + # Last batch needs to be truncated on distributed systems as it contains additional samples + def _adjust_samples(tensor): + return tensor[: self.gradient_state.remainder] + + if use_gather_object: + # gather_object put the objects in a list + return _adjust_samples(data) + else: + return recursively_apply(_adjust_samples, data) + else: # remainder is 0 + # no remainder even though at end of dataloader, so nothing to do. + return data + else: + # Not at the end of the dataloader, no need to adjust the tensors + return data + except Exception: + # Dataset had no length or raised an error + return data + + def reduce(self, tensor, reduction="sum", scale=1.0): + """ + Reduce the values in *tensor* across all processes based on *reduction*. + + Note: + All processes get the reduced value. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to reduce across all processes. + reduction (`str`, *optional*, defaults to "sum"): + A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation. + scale (`float`, *optional*, defaults to 1.0): + A default scaling value to be applied after the reduce, only valied on XLA. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: + The reduced tensor(s). + + Example: + + ```python + >>> # Assuming two processes + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index) + >>> process_tensor = process_tensor.to(accelerator.device) + >>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum") + >>> reduced_tensor + tensor([4, 6]) + ``` + """ + return reduce(tensor, reduction, scale) + + def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + dim (`int`, *optional*, defaults to 0): + The dimension on which to pad. + pad_index (`int`, *optional*, defaults to 0): + The value with which to pad. + pad_first (`bool`, *optional*, defaults to `False`): + Whether to pad at the beginning or the end. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: + The padded tensor(s). + + Example: + + ```python + >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2 + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device) + >>> padded_tensor = accelerator.pad_across_processes(process_tensor) + >>> padded_tensor.shape + torch.Size([2]) + ``` + """ + return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first) + + def unwrap_model(self, model, keep_fp32_wrapper: bool = True, keep_torch_compile: bool = True): + """ + Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving + the model. + + Args: + model (`torch.nn.Module`): + The model to unwrap. + keep_fp32_wrapper (`bool`, *optional*, defaults to `True`): + Whether to not remove the mixed precision hook if it was added. + keep_torch_compile (`bool`, *optional*, defaults to `True`): + Whether to not unwrap compiled model if compiled. + Returns: + `torch.nn.Module`: The unwrapped model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> from torch.nn.parallel import DistributedDataParallel + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model = accelerator.prepare(MyModel()) + >>> print(model.__class__.__name__) + DistributedDataParallel + + >>> model = accelerator.unwrap_model(model) + >>> print(model.__class__.__name__) + MyModel + ``` + """ + return extract_model_from_parallel(model, keep_fp32_wrapper, keep_torch_compile) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> import time + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> if accelerator.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> accelerator.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` + """ + wait_for_everyone() + + @on_main_process + def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}): + """ + Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations + + Args: + project_name (`str`): + The name of the project. All trackers will save their data based on this + config (`dict`, *optional*): + Optional starting configuration to be logged. + init_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be + formatted like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers( + ... project_name="my_project", + ... config={"learning_rate": 0.001, "batch_size": 32}, + ... init_kwargs={"tensorboard": {"flush_secs": 60}}, + ... ) + ``` + """ + for tracker in self.log_with: + if issubclass(type(tracker), GeneralTracker): + # Custom trackers are already initialized + self.trackers.append(tracker) + else: + tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)] + if tracker_init.requires_logging_directory: + # We can skip this check since it was done in `__init__` + self.trackers.append( + tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {})) + ) + else: + self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {}))) + + for tracker in self.trackers: + tracker.start() + + if config is not None: + for tracker in self.trackers: + tracker.store_init_configuration(config) + + def get_tracker(self, name: str, unwrap: bool = False): + """ + Returns a `tracker` from `self.trackers` based on `name` on the main process only. + + Args: + name (`str`): + The name of a tracker, corresponding to the `.name` property. + unwrap (`bool`): + Whether to return the internal tracking mechanism or to return the wrapped tracker instead + (recommended). + + Returns: + `GeneralTracker`: The tracker corresponding to `name` if it exists. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> tensorboard_tracker = accelerator.get_tracker("tensorboard") + ``` + """ + if len(self.trackers) > 0: + for tracker in self.trackers: + if tracker.name == name: + return tracker.tracker if unwrap else tracker + raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.") + # Handle tracker only made on main process + return GeneralTracker(_blank=True) + + @on_main_process + def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}): + """ + Logs `values` to all stored trackers in `self.trackers` on the main process only. + + Args: + values (`dict`): + Values should be a dictionary-like object containing only types `int`, `float`, or `str`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + log_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted + like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> accelerator.log({"loss": 0.5, "accuracy": 0.9}) + ``` + """ + for tracker in self.trackers: + tracker.log(values, step=step, **log_kwargs.get(tracker.name, {})) + + def end_training(self): + """ + Runs any special end training behaviors, such as stopping trackers on the main process only or destoying + process group. Should always be called at the end of your script if using experiment tracking. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> # Do training + >>> accelerator.end_training() + ``` + """ + for tracker in self.trackers: + tracker.finish() + + self.state.destroy_process_group() + + def save(self, obj, f, safe_serialization=False): + """ + Save the object passed to disk once per machine. Use in place of `torch.save`. + + Args: + obj (`object`): The object to save. + f (`str` or `os.PathLike`): Where to save the content of `obj`. + safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` + + Note: + If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node, + rather than only once on the main node. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> arr = [0, 1, 2, 3] + >>> accelerator.save(arr, "array.pkl") + ``` + """ + save( + obj, + f, + save_on_each_node=self.project_configuration.save_on_each_node, + safe_serialization=safe_serialization, + ) + + def save_model( + self, + model: torch.nn.Module, + save_directory: Union[str, os.PathLike], + max_shard_size: Union[int, str] = "10GB", + safe_serialization: bool = True, + ): + """ + Save a model so that it can be re-loaded using load_checkpoint_in_model + + Arguments: + model: (`torch.nn.Module`): + Model to be saved. The model can be wrapped or unwraped. + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model = ... + >>> accelerator.save_model(model, save_directory) + ``` + """ + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + # get the state_dict of the model + if any(has_offloaded_params(module) for module in model.modules()): + state_dict = get_state_dict_offloaded_model(model) + else: + if any(param.device == torch.device("meta") for param in model.parameters()): + raise RuntimeError("You can't save the model since some parameters are on the meta device.") + state_dict = self.get_state_dict(model) + + # Case: DeepSpeed zero3 gets gathered and `state_dict` is empty + if state_dict is None: + return + os.makedirs(save_directory, exist_ok=True) + + if safe_serialization: + state_dict = clean_state_dict_for_safetensors(state_dict) + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + filename_pattern = SAFE_WEIGHTS_PATTERN_NAME if safe_serialization else WEIGHTS_PATTERN_NAME + + state_dict_split = split_torch_state_dict_into_shards( + state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size + ) + + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + # If we have a shard file that is not going to be replaced, we delete it, but only from the main process + # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "") + + # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 + filename_no_suffix = filename.replace(".bin", "") + reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") + + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in state_dict_split.filename_to_tensors.keys() + and reg.fullmatch(filename_no_suffix) is not None + and PartialState().is_main_process + ): + os.remove(full_filename) + + # Save the model + for filename, tensors in state_dict_split.filename_to_tensors.items(): + shard = {tensor: state_dict[tensor] for tensor in tensors} + self.save(shard, os.path.join(save_directory, filename), safe_serialization=safe_serialization) + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME + save_index_file = os.path.join(save_directory, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + else: + path_to_weights = os.path.join(save_directory, WEIGHTS_NAME) + logger.info(f"Model weights saved in {path_to_weights}") + + def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: + """ + Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`]. + + Args: + hook (`Callable`): + A function to be called in [`Accelerator.save_state`] before `save_checkpoint`. + + The hook should have the following signature: + + `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None` + + The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths` + argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed + to [`Accelerator.load_state`]. + + + + Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save + configurations in addition to model weights. Can also be used to overwrite model saving with a customized + method. In this case, make sure to remove already loaded weights from the weights list. + + + + Returns: + `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling + `handle.remove()` + """ + handle = hooks.RemovableHandle(self._save_model_state_pre_hook) + self._save_model_state_pre_hook[handle.id] = hook + return handle + + def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs): + """ + Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder. + + If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled + then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater + than `total_limit` then the oldest save is deleted. Each checkpoint is saved in separate folders named + `checkpoint_`. + + Otherwise they are just saved to `output_dir`. + + + + Should only be used when wanting to save a checkpoint during training and restoring the state in the same + environment. + + + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + save_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for saving model which can be passed to the underlying save function, such + as optional arguments for DeepSpeed's `save_checkpoint` function. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, lr_scheduler = ... + >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) + >>> accelerator.save_state(output_dir="my_checkpoint") + ``` + """ + if self.project_configuration.automatic_checkpoint_naming: + output_dir = os.path.join(self.project_dir, "checkpoints") + os.makedirs(output_dir, exist_ok=True) + if self.project_configuration.automatic_checkpoint_naming: + folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)] + if ( + self.project_configuration.total_limit is not None + and (len(folders) + 1 > self.project_configuration.total_limit) + and self.is_main_process + ): + + def _inner(folder): + return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] + + folders.sort(key=_inner) + logger.warning( + f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint." + ) + for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]: + shutil.rmtree(folder) + output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}") + if os.path.exists(output_dir): + raise ValueError( + f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with." + ) + self.wait_for_everyone() + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving current state to {output_dir}") + + if self.distributed_type == DistributedType.XLA: + # Finish running the previous step before checkpointing + xm.mark_step() + + # Save the models taking care of FSDP and DeepSpeed nuances + weights = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Saving FSDP model") + save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i) + logger.info(f"FSDP Model saved to output dir {output_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Saving DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Saving Megatron-LM Model, Optimizer and Scheduler") + model.save_checkpoint(output_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}") + else: + weights.append(self.get_state_dict(model, unwrap=False)) + + # Save the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for i, opt in enumerate(self._optimizers): + logger.info("Saving FSDP Optimizer") + save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i) + logger.info(f"FSDP Optimizer saved to output dir {output_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Save the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + # Save the samplers of the dataloaders + dataloaders = self._dataloaders + + # Call model loading hooks that might have been registered with + # accelerator.register_model_state_hook + for hook in self._save_model_state_pre_hook.values(): + hook(self._models, weights, output_dir) + + save_location = save_accelerator_state( + output_dir, + weights, + optimizers, + schedulers, + dataloaders, + self.state.process_index, + self.step, + self.scaler, + save_on_each_node=self.project_configuration.save_on_each_node, + safe_serialization=safe_serialization, + ) + for i, obj in enumerate(self._custom_objects): + save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node) + self.project_configuration.iteration += 1 + return save_location + + def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: + """ + Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`]. + + Args: + hook (`Callable`): + A function to be called in [`Accelerator.load_state`] before `load_checkpoint`. + + The hook should have the following signature: + + `hook(models: list[torch.nn.Module], input_dir: str) -> None` + + The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the + `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`]. + + + + Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load + configurations in addition to model weights. Can also be used to overwrite model loading with a customized + method. In this case, make sure to remove already loaded models from the models list. + + + + Returns: + `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling + `handle.remove()` + """ + handle = hooks.RemovableHandle(self._load_model_state_pre_hook) + self._load_model_state_pre_hook[handle.id] = hook + return handle + + def load_state(self, input_dir: str = None, load_kwargs: dict | None = None, **load_model_func_kwargs): + """ + Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects. + + + + Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for + checkpointing, it will not be loaded if stored in the directory. + + + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder all relevant weights and states were saved in. Can be `None` if + `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint. + load_kwargs (`dict`, *optional*): + Additional keyword arguments for the underlying `load` function, such as optional arguments for + state_dict and optimizer on. + load_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for loading model which can be passed to the underlying load function, + such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the + model and optimizer on. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, lr_scheduler = ... + >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) + >>> accelerator.load_state("my_checkpoint") + ``` + """ + if input_dir is not None: + # Check if folder exists + input_dir = os.path.expanduser(input_dir) + if not os.path.isdir(input_dir): + raise ValueError(f"Tried to find {input_dir} but folder does not exist") + elif self.project_configuration.automatic_checkpoint_naming: + # Pick up from automatic checkpoint naming + input_dir = os.path.join(self.project_dir, "checkpoints") + folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)] + + def _inner(folder): + return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] + + folders.sort(key=_inner) + input_dir = folders[-1] + else: + raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.") + logger.info(f"Loading states from {input_dir}") + + # Load the models taking care of FSDP and DeepSpeed nuances + models = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Loading FSDP model") + load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i) + logger.info(f"FSDP Model loaded from input dir {input_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Loading DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Loading Megatron-LM Model, Optimizer and Scheduler") + model.load_checkpoint(input_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}") + else: + models.append(model) + + # We need to load the scaler state before the optimizer for FSDP2 + # (`torch.distributed.checkpoint.set_optimizer_state_dict`) which we use to set the state of the optimizer calls `optimizer.step` on + # a dummy tensor, but since the scaler is not initialized, it will raise an error (the scaler exists but its `_scale` is None) + scaler = None + if self.scaler is not None and self.is_fsdp2: + input_scaler_file = os.path.join(input_dir, SCALER_NAME) + scaler_state = torch.load(input_scaler_file) + self.scaler.load_state_dict(scaler_state) + # We also need to call the `_lazy_init_scale_growth_tracker` to initialize the scaler, as it would else be called + # on the first call to scale + self.scaler._lazy_init_scale_growth_tracker(self.scaler._device) + logger.info("GradScaler state loaded successfully") + else: + scaler = self.scaler + + # Load the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for i, opt in enumerate(self._optimizers): + logger.info("Loading FSDP Optimizer") + load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i) + logger.info(f"FSDP Optimizer loaded from input dir {input_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Load the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + dataloaders = self._dataloaders + + # Call model loading hooks that might have been registered with + # accelerator.register_model_state_hook + for hook in self._load_model_state_pre_hook.values(): + hook(models, input_dir) + + map_location = load_model_func_kwargs.pop("map_location", None) + if map_location is None: + if self.num_processes > 1 and self.multi_device and self.distributed_type != DistributedType.MULTI_XPU: + map_location = "on_device" + else: + map_location = "cpu" + + override_attributes = load_accelerator_state( + input_dir, + models, + optimizers, + schedulers, + dataloaders, + self.state.process_index, + scaler, + map_location, + load_kwargs, + **load_model_func_kwargs, + ) + if "step" in override_attributes: + self.step = override_attributes["step"] + custom_checkpoints = [ + f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None + ] + if len(custom_checkpoints) != len(self._custom_objects): + err = ( + f"Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:" + ) + err += f"\n\tFound checkpoints: {len(custom_checkpoints)}" + err += f"\n\tRegistered objects: {len(self._custom_objects)}\n" + err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects," + err += "or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually." + raise RuntimeError(err) + else: + logger.info(f"Loading in {len(custom_checkpoints)} custom states") + for index, obj in enumerate(self._custom_objects): + load_custom_state(obj, input_dir, index) + + def free_memory(self, *objects): + """ + Will release all references to the internal objects stored and call the garbage collector. You should call this + method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, scheduler = ... + >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + >>> model, optimizer, scheduler = accelerator.free_memory(model, optimizer, scheduler) + ``` + """ + # Deepspeed needs a bit more prep that should be done first + if hasattr(self, "deepspeed_engine_wrapped"): + if self.deepspeed_engine_wrapped is not None: + self.deepspeed_engine_wrapped.engine.destroy() + self.deepspeed_engine_wrapped = None + objects = release_memory(*objects) + self._schedulers = [] + self._optimizers = [] + self._models = [] + self._dataloaders = [] + self.step = 0 + return objects + + def clear(self, *objects): + """ + Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the + garbage collector. You should call this method between two trainings with different models/optimizers. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, scheduler = ... + >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + >>> model, optimizer, scheduler = accelerator.clear(model, optimizer, scheduler) + ``` + """ + return self.free_memory(*objects) + + def _get_named_parameters(self, *args, drop_refs=False): + named_parameters = {} + accessor_mapping = {} + for obj in args: + if isinstance(obj, torch.nn.Module): + obj = extract_model_from_parallel(obj) + if not drop_refs: + named_parameters.update({n: p for n, p in obj.named_parameters()}) + continue + + # we need this bit as `WeightWithDynamic...` returns 0 when `data_ptr()` is called, + # the underlying pointer is actually hidden in `_tensor` attribute + if self.fp8_backend == FP8BackendType.AO: + from torchao.float8.fsdp_utils import WeightWithDynamicFloat8CastTensor + + accessor_mapping[WeightWithDynamicFloat8CastTensor] = "_tensor" + # we know we're in FSDP2 so DTensor is available + if self.is_fsdp2: + from torch.distributed.tensor import DTensor + + accessor_mapping[DTensor] = "_local_tensor" + + named_parameters.update( + { + n: getattr(p, accessor_mapping[type(p)]).data_ptr() + if type(p) in accessor_mapping + else p.data_ptr() + for n, p in obj.named_parameters() + } + ) + return named_parameters + + def _get_devices(self, *args): + model_device = None + optimizer_device = None + for obj in args: + # Loop through model parameters and stop at the first once we have its device. + if isinstance(obj, torch.nn.Module): + for param in obj.parameters(): + model_device = param.device + break + # Loop through optimizer parameters groups and stop at the first once we have its device. + if isinstance(obj, torch.optim.Optimizer): + for param_group in obj.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + return (model_device, optimizer_device) + + def get_state_dict(self, model, unwrap=True): + """ + Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full + precision. + + Args: + model (`torch.nn.Module`): + A PyTorch model sent through [`Accelerator.prepare`] + unwrap (`bool`, *optional*, defaults to `True`): + Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict + + Returns: + `dict`: The state dictionary of the model potentially without full precision. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> net = torch.nn.Linear(2, 2) + >>> net = accelerator.prepare(net) + >>> state_dict = accelerator.get_state_dict(net) + ``` + """ + + if self.distributed_type == DistributedType.DEEPSPEED: + zero3_sharding = self.deepspeed_config["zero_optimization"]["stage"] == 3 + tp_sharding = self.deepspeed_config.get("tensor_parallel", {}).get("autotp_size", 0) > 1 + if zero3_sharding or tp_sharding: + if model.zero_gather_16bit_weights_on_model_save(): + if tp_sharding and not compare_versions("deepspeed", ">=", "0.16.4"): + raise ImportError( + "Deepspeed TP requires deepspeed >= 0.16.4, Please update DeepSpeed via `pip install deepspeed -U`." + ) + state_dict = ( + model._consolidated_16bit_state_dict() + if tp_sharding + else model._zero3_consolidated_16bit_state_dict() + ) + else: + raise ValueError( + "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. " + "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or " + "set `zero3_save_16bit_model` to True when using `accelerate config`. " + "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights." + ) + else: + from deepspeed.checkpoint.utils import clone_tensors_for_torch_save + + state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict()) + elif self.is_fsdp2: + from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict + + options = StateDictOptions(full_state_dict=True, broadcast_from_rank0=True, cpu_offload=True) + state_dict = get_model_state_dict(model, options=options) + elif self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp import FullStateDictConfig, StateDictType + from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + + full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) + with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config): + state_dict = model.state_dict() + else: + if unwrap: + model = self.unwrap_model(model) + state_dict = model.state_dict() + + return state_dict + + def register_for_checkpointing(self, *objects): + """ + Makes note of `objects` and will save or load them in during `save_state` or `load_state`. + + These should be utilized when the state is being loaded or saved in the same script. It is not designed to be + used in different scripts. + + + + Every `object` must have a `load_state_dict` and `state_dict` function to be stored. + + + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function. + >>> obj = CustomObject() + >>> accelerator.register_for_checkpointing(obj) + >>> accelerator.save_state("checkpoint.pt") + ``` + """ + invalid_objects = [] + for obj in objects: + if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"): + invalid_objects.append(obj) + if len(invalid_objects) > 0: + err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:" + for index, obj in enumerate(invalid_objects): + err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`" + raise ValueError(err) + self._custom_objects.extend(objects) + + @contextmanager + def maybe_context_parallel( + self, + buffers: list[torch.Tensor] | None = None, + buffer_seq_dims: list[int] | None = None, + no_restore_buffers: set[torch.Tensor] | None = None, + ): + """ + A context manager that enables context parallel training. + + Args: + buffers (`list[torch.Tensor]`, `optional`): + Buffers, which are going to be sharded along the sequence dimension. Common examples are inputs, labels + or positional embedding buffers. This context manager will modify these buffers in-place, and after + exiting the context, the buffers will be restored to their original state. To avoid unnecessary + restores, you can use `no_restore_buffers` to specify which buffers don't need to be restored. + buffer_seq_dims (`list[int]`, `optional`): + Sequence dimensions of `buffers`. + no_restore_buffers (`set[torch.Tensor]`, `optional`): + This set must be a subset of `buffers`. Specifies which buffers from `buffers` argument won't be + restored after the context exits. These buffers will be then kept in sharded state. + + + + `context_parallel` is currently only supported together with FSDP2, and requires `parallelism_config.cp_size` > + 1. If either of these conditions are not met, this context manager will have no effect, though to enable fewer + code changes it will not raise an Exception. + + + + + + This context manager has to be recreated with each training step, as shown in the example below. + + + + Example: + + ```python + >>> for batch in dataloader: + ... with accelerator.maybe_context_parallel( + ... buffers=[batch["input_ids"], batch["attention_mask"]], + ... buffer_seq_dims=[1, 1], + ... no_restore_buffers={batch["input_ids"]}, + ... ): + ... outputs = model(batch) + ... ... + ``` + """ + # We don't need to check FSDP2 as parallelism_config does that for us + # Invariant: in this branch self._cp_context is set, as it was set by `self._prepare_cp` + if self.parallelism_config and self.parallelism_config.cp_enabled: + with self._cp_context( + buffers=buffers, buffer_seq_dims=buffer_seq_dims, no_restore_buffers=no_restore_buffers + ): + yield + else: + logger.warning_once( + "Context parallel training is not enabled. This context manager will have no effect. " + "To enable it, set `parallelism_config.cp_size` > 1 in the `Accelerator` constructor." + ) + yield + + @contextmanager + def autocast(self, autocast_handler: AutocastKwargs = None): + """ + Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing + different will happen otherwise. + + A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is + useful in blocks under `autocast` where you want to revert to fp32. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(mixed_precision="fp16") + >>> with accelerator.autocast(): + ... train() + ``` + """ + if autocast_handler is None: + autocast_handler = self.autocast_handler + autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler) + autocast_context.__enter__() + # TODO: should the `yield` be in a try/finally block? + yield + autocast_context.__exit__(*sys.exc_info()) + + @contextmanager + def profile(self, profile_handler: ProfileKwargs | None = None): + """ + Will profile the code inside the context manager. The profile will be saved to a Chrome Trace file if + `profile_handler.output_trace_dir` is set. + + A different `profile_handler` can be passed in to override the one set in the `Accelerator` object. + + Args: + profile_handler (`ProfileKwargs`, *optional*): + The profile handler to use for this context manager. If not passed, will use the one set in the + `Accelerator` object. + + Example: + + ```python + # Profile with default settings + from accelerate import Accelerator + from accelerate.utils import ProfileKwargs + + accelerator = Accelerator() + with accelerator.profile() as prof: + train() + accelerator.print(prof.key_averages().table()) + + + # Profile with the custom handler + def custom_handler(prof): + print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10)) + + + kwargs = ProfileKwargs(schedule_option=dict(wait=1, warmup=1, active=1), on_trace_ready=custom_handler) + accelerator = Accelerator(kwarg_handler=[kwargs]) + with accelerator.profile() as prof: + for _ in range(10): + train_iteration() + prof.step() + + + # Profile and export to Chrome Trace + kwargs = ProfileKwargs(output_trace_dir="output_trace") + accelerator = Accelerator(kwarg_handler=[kwargs]) + with accelerator.profile(): + train() + ``` + """ + profile_handler = profile_handler or self.profile_handler or ProfileKwargs() + + with profile_handler.build() as profiler: + yield profiler + + if profile_handler.output_trace_dir is None: + return + + os.makedirs(profile_handler.output_trace_dir, exist_ok=True) + profiler.export_chrome_trace( + os.path.join(profile_handler.output_trace_dir, PROFILE_PATTERN_NAME.format(suffix=self.process_index)) + ) + self.wait_for_everyone() + + @property + def optimizer_step_was_skipped(self): + """ + Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which + case the learning rate should not be changed. + """ + for optimizer in self._optimizers: + if optimizer.step_was_skipped: + return True + return False + + def skip_first_batches(self, dataloader, num_batches: int = 0): + """ + Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. + + Args: + dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches. + num_batches (`int`, *optional*, defaults to 0): The number of batches to skip + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2) + >>> # for the first epoch only + >>> for input, target in skipped_dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... optimizer.step() + + >>> # subsequent epochs + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... ... + ``` + """ + return skip_first_batches(dataloader, num_batches=num_batches) + + def __deepcopy__(self, memo): + logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.") + return self + + def verify_device_map(self, model: torch.nn.Module) -> bool: + """ + Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`. + """ + # Checks if any of the child modules has the attribute `hf_device_map` and this map has more than one entry. + for m in model.modules(): + if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1: + return True + + return False + + def lomo_backward(self, loss: torch.Tensor, learning_rate: float) -> None: + """ + Runs backward pass on LOMO optimizers. + """ + if is_lomo_available(): + # We need to import locally to avoid circular imports since lomo imports stuff from + # transformers & accelerate + from lomo_optim import AdaLomo, Lomo + + if learning_rate is None: + raise ValueError("A learning rate must be passed in order to call backward pass with LOMO optimizers.") + + _backward_called = False + + for optimizer in self._optimizers: + if isinstance(optimizer.optimizer, (Lomo, AdaLomo)): + optimizer.optimizer.fused_backward(loss, learning_rate) + _backward_called = True + + if not _backward_called: + raise ValueError( + "Backward pass not properly called on LOMO optimizers. Are you sure you passed a LOMO optimizer in accelerator.prepare()?" + ) + + @property + def fp8_backend(self) -> FP8BackendType: + "Returns the configured backend for training in FP8" + if self.has_fp8_handler: + if self.fp8_recipe_handler is not None: + return FP8BackendType(self.fp8_recipe_handler.backend) + elif self.ao_recipe_handler is not None: + return FP8BackendType.AO + elif self.te_recipe_handler is not None: + return FP8BackendType.TE + elif self.msamp_recipe_handler is not None: + return FP8BackendType.MSAMP + elif self.state.deepspeed_plugin is not None and self.state.deepspeed_plugin.enable_msamp: + return FP8BackendType.MSAMP + + return FP8BackendType(parse_choice_from_env("ACCELERATE_FP8_BACKEND", "NO")) diff --git a/venv/lib/python3.10/site-packages/accelerate/big_modeling.py b/venv/lib/python3.10/site-packages/accelerate/big_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..2bace0f5615f37c84c1c3e77fe116081ed691bac --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/big_modeling.py @@ -0,0 +1,789 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import re +from contextlib import contextmanager +from functools import wraps +from typing import Optional, Union + +import torch +import torch.nn as nn + +from .hooks import ( + AlignDevicesHook, + CpuOffload, + LayerwiseCastingHook, + UserCpuOffloadHook, + add_hook_to_module, + attach_align_device_hook, + attach_align_device_hook_on_blocks, +) +from .utils import ( + OffloadedWeightsLoader, + check_cuda_p2p_ib_support, + check_device_map, + extract_submodules_state_dict, + find_tied_parameters, + get_balanced_memory, + infer_auto_device_map, + is_bnb_available, + is_mlu_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_xpu_available, + load_checkpoint_in_model, + offload_state_dict, + parse_flag_from_env, + retie_parameters, +) +from .utils.constants import SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING +from .utils.other import recursive_getattr + + +logger = logging.getLogger(__name__) + + +@contextmanager +def init_empty_weights(include_buffers: bool = None): + """ + A context manager under which models are initialized with all parameters on the meta device, therefore creating an + empty model. Useful when just initializing the model would blow the available RAM. + + Args: + include_buffers (`bool`, *optional*): + Whether or not to also put all buffers on the meta device while initializing. + + Example: + + ```python + import torch.nn as nn + from accelerate import init_empty_weights + + # Initialize a model with 100 billions parameters in no time and without using any RAM. + with init_empty_weights(): + tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) + ``` + + + + Any model created under this context manager has no weights. As such you can't do something like + `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. + Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not + called. + + + """ + if include_buffers is None: + include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) + with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f: + yield f + + +@contextmanager +def init_on_device(device: torch.device, include_buffers: bool = None): + """ + A context manager under which models are initialized with all parameters on the specified device. + + Args: + device (`torch.device`): + Device to initialize all parameters on. + include_buffers (`bool`, *optional*): + Whether or not to also put all buffers on the meta device while initializing. + + Example: + + ```python + import torch.nn as nn + from accelerate import init_on_device + + with init_on_device(device=torch.device("cuda")): + tst = nn.Linear(100, 100) # on `cuda` device + ``` + """ + if include_buffers is None: + include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) + + if include_buffers: + with device: + yield + return + + old_register_parameter = nn.Module.register_parameter + if include_buffers: + old_register_buffer = nn.Module.register_buffer + + def register_empty_parameter(module, name, param): + old_register_parameter(module, name, param) + if param is not None: + param_cls = type(module._parameters[name]) + kwargs = module._parameters[name].__dict__ + kwargs["requires_grad"] = param.requires_grad + module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) + + def register_empty_buffer(module, name, buffer, persistent=True): + old_register_buffer(module, name, buffer, persistent=persistent) + if buffer is not None: + module._buffers[name] = module._buffers[name].to(device) + + # Patch tensor creation + if include_buffers: + tensor_constructors_to_patch = { + torch_function_name: getattr(torch, torch_function_name) + for torch_function_name in ["empty", "zeros", "ones", "full"] + } + else: + tensor_constructors_to_patch = {} + + def patch_tensor_constructor(fn): + def wrapper(*args, **kwargs): + kwargs["device"] = device + return fn(*args, **kwargs) + + return wrapper + + try: + nn.Module.register_parameter = register_empty_parameter + if include_buffers: + nn.Module.register_buffer = register_empty_buffer + for torch_function_name in tensor_constructors_to_patch.keys(): + setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) + yield + finally: + nn.Module.register_parameter = old_register_parameter + if include_buffers: + nn.Module.register_buffer = old_register_buffer + for torch_function_name, old_torch_function in tensor_constructors_to_patch.items(): + setattr(torch, torch_function_name, old_torch_function) + + +def cpu_offload( + model: nn.Module, + execution_device: Optional[torch.device] = None, + offload_buffers: bool = False, + state_dict: Optional[dict[str, torch.Tensor]] = None, + preload_module_classes: Optional[list[str]] = None, +): + """ + Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one + copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that + state dict and put on the execution device passed as they are needed, then offloaded again. + + Args: + model (`torch.nn.Module`): + The model to offload. + execution_device (`torch.device`, *optional*): + The device on which the forward pass of the model will be executed (should be a GPU). Will default to the + model first parameter device. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the model that will be kept on CPU. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if execution_device is None: + execution_device = next(iter(model.parameters())).device + if state_dict is None: + state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()} + + add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) + attach_align_device_hook( + model, + execution_device=execution_device, + offload=True, + offload_buffers=offload_buffers, + weights_map=state_dict, + preload_module_classes=preload_module_classes, + ) + + return model + + +def cpu_offload_with_hook( + model: torch.nn.Module, + execution_device: Optional[Union[int, str, torch.device]] = None, + prev_module_hook: Optional[UserCpuOffloadHook] = None, +): + """ + Offloads a model on the CPU and puts it back to an execution device when executed. The difference with + [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when + the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop. + + Args: + model (`torch.nn.Module`): + The model to offload. + execution_device(`str`, `int` or `torch.device`, *optional*): + The device on which the model should be executed. Will default to the MPS device if it's available, then + GPU 0 if there is a GPU, and finally to the CPU. + prev_module_hook (`UserCpuOffloadHook`, *optional*): + The hook sent back by this function for a previous model in the pipeline you are running. If passed, its + offload method will be called just before the forward of the model to which this hook is attached. + + Example: + + ```py + model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device) + model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1) + model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2) + + hid_1 = model_1(input) + for i in range(50): + # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. + hid_2 = model_2(hid_1) + # model2 is offloaded to the CPU just before this forward. + hid_3 = model_3(hid_3) + + # For model3, you need to manually call the hook offload method. + hook_3.offload() + ``` + """ + hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook) + add_hook_to_module(model, hook, append=True) + user_hook = UserCpuOffloadHook(model, hook) + return model, user_hook + + +def disk_offload( + model: nn.Module, + offload_dir: Union[str, os.PathLike], + execution_device: Optional[torch.device] = None, + offload_buffers: bool = False, + preload_module_classes: Optional[list[str]] = None, +): + """ + Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as + memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and + put on the execution device passed as they are needed, then offloaded again. + + Args: + model (`torch.nn.Module`): The model to offload. + offload_dir (`str` or `os.PathLike`): + The folder in which to offload the model weights (or where the model weights are already offloaded). + execution_device (`torch.device`, *optional*): + The device on which the forward pass of the model will be executed (should be a GPU). Will default to the + model's first parameter device. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")): + offload_state_dict(offload_dir, model.state_dict()) + if execution_device is None: + execution_device = next(iter(model.parameters())).device + weights_map = OffloadedWeightsLoader(save_folder=offload_dir) + + add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) + attach_align_device_hook( + model, + execution_device=execution_device, + offload=True, + offload_buffers=offload_buffers, + weights_map=weights_map, + preload_module_classes=preload_module_classes, + ) + + return model + + +def dispatch_model( + model: nn.Module, + device_map: dict[str, Union[str, int, torch.device]], + main_device: Optional[torch.device] = None, + state_dict: Optional[dict[str, torch.Tensor]] = None, + offload_dir: Optional[Union[str, os.PathLike]] = None, + offload_index: Optional[dict[str, str]] = None, + offload_buffers: bool = False, + skip_keys: Optional[Union[str, list[str]]] = None, + preload_module_classes: Optional[list[str]] = None, + force_hooks: bool = False, +): + """ + Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on + the CPU or even the disk. + + Args: + model (`torch.nn.Module`): + The model to dispatch. + device_map (`Dict[str, Union[str, int, torch.device]]`): + A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that + `"disk"` is accepted even if it's not a proper value for `torch.device`. + main_device (`str`, `int` or `torch.device`, *optional*): + The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or + `"disk"`. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the part of the model that will be kept on CPU. + offload_dir (`str` or `os.PathLike`): + The folder in which to offload the model weights (or where the model weights are already offloaded). + offload_index (`Dict`, *optional*): + A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default + to the index saved in `save_folder`. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + force_hooks (`bool`, *optional*, defaults to `False`): + Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a + single device. + """ + # Error early if the device map is incomplete. + check_device_map(model, device_map) + + # We need to force hook for quantized model that can't be moved with to() + if getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes": + # since bnb 0.43.2, we can move 4-bit model + if getattr(model, "is_loaded_in_8bit", False) or ( + getattr(model, "is_loaded_in_4bit", False) and not is_bnb_available(min_version="0.43.2") + ): + force_hooks = True + + # We attach hooks if the device_map has at least 2 different devices or if + # force_hooks is set to `True`. Otherwise, the model in already loaded + # in the unique device and the user can decide where to dispatch the model. + # If the model is quantized, we always force-dispatch the model + if (len(set(device_map.values())) > 1) or force_hooks: + if main_device is None: + if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}: + main_device = "cpu" + else: + main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] + + if main_device != "cpu": + cpu_modules = [name for name, device in device_map.items() if device == "cpu"] + if state_dict is None and len(cpu_modules) > 0: + state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules) + + disk_modules = [name for name, device in device_map.items() if device == "disk"] + if offload_dir is None and offload_index is None and len(disk_modules) > 0: + raise ValueError( + "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules " + f"need to be offloaded: {', '.join(disk_modules)}." + ) + if ( + len(disk_modules) > 0 + and offload_index is None + and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))) + ): + disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules) + offload_state_dict(offload_dir, disk_state_dict) + + execution_device = { + name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items() + } + execution_device[""] = main_device + offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"] + offload = {name: device in offloaded_devices for name, device in device_map.items()} + save_folder = offload_dir if len(disk_modules) > 0 else None + if state_dict is not None or save_folder is not None or offload_index is not None: + device = main_device if offload_index is not None else None + weights_map = OffloadedWeightsLoader( + state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device + ) + else: + weights_map = None + + # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the + # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its + # original pointer) on each devices. + tied_params = find_tied_parameters(model) + + tied_params_map = {} + for group in tied_params: + for param_name in group: + # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need + # to care about views of tensors through storage_offset. + data_ptr = recursive_getattr(model, param_name).data_ptr() + tied_params_map[data_ptr] = {} + + # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer, + # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. + + attach_align_device_hook_on_blocks( + model, + execution_device=execution_device, + offload=offload, + offload_buffers=offload_buffers, + weights_map=weights_map, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + tied_params_map=tied_params_map, + ) + + # warn if there is any params on the meta device + offloaded_devices_str = " and ".join( + [device for device in set(device_map.values()) if device in ("cpu", "disk")] + ) + if len(offloaded_devices_str) > 0: + logger.warning( + f"Some parameters are on the meta device because they were offloaded to the {offloaded_devices_str}." + ) + + # Attaching the hook may break tied weights, so we retie them + retie_parameters(model, tied_params) + + # add warning to cuda and to method + def add_warning(fn, model): + @wraps(fn) + def wrapper(*args, **kwargs): + warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks." + if str(fn.__name__) == "to": + to_device = torch._C._nn._parse_to(*args, **kwargs)[0] + if to_device is not None: + logger.warning(warning_msg) + else: + logger.warning(warning_msg) + for param in model.parameters(): + if param.device == torch.device("meta"): + raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.") + return fn(*args, **kwargs) + + return wrapper + + # Make sure to update _accelerate_added_attributes in hooks.py if you add any hook + model.to = add_warning(model.to, model) + if is_npu_available(): + model.npu = add_warning(model.npu, model) + elif is_mlu_available(): + model.mlu = add_warning(model.mlu, model) + elif is_sdaa_available(): + model.sdaa = add_warning(model.sdaa, model) + elif is_musa_available(): + model.musa = add_warning(model.musa, model) + elif is_xpu_available(): + model.xpu = add_warning(model.xpu, model) + else: + model.cuda = add_warning(model.cuda, model) + + # Check if we are using multi-gpus with RTX 4000 series + use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1 + if use_multi_gpu and not check_cuda_p2p_ib_support(): + logger.warning( + "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. " + "This can affect the multi-gpu inference when using accelerate device_map." + "Please make sure to update your driver to the latest version which resolves this." + ) + else: + device = list(device_map.values())[0] + # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). + if is_npu_available() and isinstance(device, int): + device = f"npu:{device}" + elif is_mlu_available() and isinstance(device, int): + device = f"mlu:{device}" + elif is_sdaa_available() and isinstance(device, int): + device = f"sdaa:{device}" + elif is_musa_available() and isinstance(device, int): + device = f"musa:{device}" + if device != "disk": + model.to(device) + else: + raise ValueError( + "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead." + ) + # Convert OrderedDict back to dict for easier usage + model.hf_device_map = dict(device_map) + return model + + +def load_checkpoint_and_dispatch( + model: nn.Module, + checkpoint: Union[str, os.PathLike], + device_map: Optional[Union[str, dict[str, Union[int, str, torch.device]]]] = None, + max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[list[str]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + offload_buffers: bool = False, + dtype: Optional[Union[str, torch.dtype]] = None, + offload_state_dict: Optional[bool] = None, + skip_keys: Optional[Union[str, list[str]]] = None, + preload_module_classes: Optional[list[str]] = None, + force_hooks: bool = False, + strict: bool = False, + full_state_dict: bool = True, + broadcast_from_rank0: bool = False, +): + """ + Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are + loaded and adds the various hooks that will make this model run properly (even if split across devices). + + Args: + model (`torch.nn.Module`): The model in which we want to load a checkpoint. + checkpoint (`str` or `os.PathLike`): + The folder checkpoint to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more + information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map). + Defaults to None, which means [`dispatch_model`] will not be called. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU + and the available CPU RAM if unset. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_buffers (`bool`, *optional*, defaults to `False`): + In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as + well as the parameters. + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + offload_state_dict (`bool`, *optional*): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map + picked contains `"disk"` values. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + force_hooks (`bool`, *optional*, defaults to `False`): + Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a + single device. + strict (`bool`, *optional*, defaults to `False`): + Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's + state_dict. + full_state_dict (`bool`, *optional*, defaults to `True`): if this is set to `True`, all the tensors in the + loaded state_dict will be gathered. No ShardedTensor and DTensor will be in the loaded state_dict. + broadcast_from_rank0 (`False`, *optional*, defaults to `False`): when the option is `True`, a distributed + `ProcessGroup` must be initialized. rank0 should receive a full state_dict and will broadcast the tensors + in the state_dict one by one to other ranks. Other ranks will receive the tensors and shard (if applicable) + according to the local shards in the model. + + Example: + + ```python + >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch + >>> from huggingface_hub import hf_hub_download + >>> from transformers import AutoConfig, AutoModelForCausalLM + + >>> # Download the Weights + >>> checkpoint = "EleutherAI/gpt-j-6B" + >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin") + + >>> # Create a model and initialize it with empty weights + >>> config = AutoConfig.from_pretrained(checkpoint) + >>> with init_empty_weights(): + ... model = AutoModelForCausalLM.from_config(config) + + >>> # Load the checkpoint and dispatch it to the right devices + >>> model = load_checkpoint_and_dispatch( + ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"] + ... ) + ``` + """ + if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or 'sequential'." + ) + if isinstance(device_map, str): + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + dtype=dtype, + low_zero=(device_map == "balanced_low_0"), + ) + device_map = infer_auto_device_map( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + dtype=dtype, + offload_buffers=offload_buffers, + ) + if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): + offload_state_dict = True + load_checkpoint_in_model( + model, + checkpoint, + device_map=device_map, + offload_folder=offload_folder, + dtype=dtype, + offload_state_dict=offload_state_dict, + offload_buffers=offload_buffers, + strict=strict, + full_state_dict=full_state_dict, + broadcast_from_rank0=broadcast_from_rank0, + ) + if device_map is None: + return model + return dispatch_model( + model, + device_map=device_map, + offload_dir=offload_folder, + offload_buffers=offload_buffers, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + force_hooks=force_hooks, + ) + + +def attach_layerwise_casting_hooks( + module: torch.nn.Module, + storage_dtype: torch.dtype, + compute_dtype: torch.dtype, + skip_modules_pattern: Union[str, tuple[str, ...]] = None, + skip_modules_classes: Optional[tuple[type[torch.nn.Module], ...]] = None, + non_blocking: bool = False, +) -> None: + r""" + Applies layerwise casting to a given module. The module expected here is a PyTorch `nn.Module`. This is helpful for + reducing memory requirements when one doesn't want to fully quantize a model. Model params can be kept in say, + `torch.float8_e4m3fn` and upcasted to a higher precision like `torch.bfloat16` during forward pass and downcasted + back to `torch.float8_e4m3fn` to realize memory savings. + + Args: + module (`torch.nn.Module`): + The module whose leaf modules will be cast to a high precision dtype for computation, and to a low + precision dtype for storage. + storage_dtype (`torch.dtype`): + The dtype to cast the module to before/after the forward pass for storage. + compute_dtype (`torch.dtype`): + The dtype to cast the module to during the forward pass for computation. + skip_modules_pattern (`tuple[str, ...]`, defaults to `None`): + A list of patterns to match the names of the modules to skip during the layerwise casting process. If set + to `None` alongside `skip_modules_classes` being `None`, the layerwise casting is applied directly to the + module instead of its internal submodules. + skip_modules_classes (`tuple[type[torch.nn.Module], ...]`, defaults to `None`): + A list of module classes to skip during the layerwise casting process. + non_blocking (`bool`, defaults to `False`): + If `True`, the weight casting operations are non-blocking. + + Example: + + ```python + >>> from accelerate.hooks import attach_layerwise_casting_hooks + >>> from transformers import AutoModelForCausalLM + >>> import torch + + >>> # Model + >>> checkpoint = "EleutherAI/gpt-j-6B" + >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) + + >>> # Attach hooks and perform inference + >>> attach_layerwise_casting_hooks(model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16) + >>> with torch.no_grad(): + ... model(...) + ``` + + Users can also pass modules they want to avoid from getting downcasted. + + ```py + >>> attach_layerwise_casting_hooks( + ... model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16, skip_modules_pattern=["norm"] + ... ) + ``` + """ + _attach_layerwise_casting_hooks( + module, storage_dtype, compute_dtype, skip_modules_pattern, skip_modules_classes, non_blocking + ) + + +def _attach_layerwise_casting_hooks( + module: torch.nn.Module, + storage_dtype: torch.dtype, + compute_dtype: torch.dtype, + skip_modules_pattern: Union[str, tuple[str, ...]] = None, + skip_modules_classes: Optional[tuple[type[torch.nn.Module], ...]] = None, + non_blocking: bool = False, + _prefix: str = "", +): + should_skip = (skip_modules_classes is not None and isinstance(module, skip_modules_classes)) or ( + skip_modules_pattern is not None and any(re.search(pattern, _prefix) for pattern in skip_modules_pattern) + ) + if should_skip: + logger.debug(f'Skipping layerwise casting for layer "{_prefix}"') + return + + if isinstance(module, SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING): + logger.debug(f'Applying layerwise casting to layer "{_prefix}"') + add_hook_to_module( + module, + LayerwiseCastingHook(storage_dtype=storage_dtype, compute_dtype=compute_dtype, non_blocking=non_blocking), + append=True, + ) + return + + for name, submodule in module.named_children(): + layer_name = f"{_prefix}.{name}" if _prefix else name + _attach_layerwise_casting_hooks( + submodule, + storage_dtype, + compute_dtype, + skip_modules_pattern, + skip_modules_classes, + non_blocking, + _prefix=layer_name, + ) + + +def _attach_context_parallel_hooks( + model: nn.Module, +): + """ + Monkeypatch huggingface's `transformers` model to fix attention mask issues when using context parallelism. + + This function attaches forward_pre_hooks to each self_attn module of the model, where each hook checks the + args/kwargs, if they contain an attention mask, if it does, it will remove this mask, check if it is a causal mask, + if yes, will add a kwarg `is_causal=True`, otherwise will raise an error. This is because context parallelism does + not support attention masks. This function modifies the model in place. + + Args: + model (`nn.Module`): + The model to attach the hooks to. + + """ + + def _self_attn_pre_forward_hook(_module, module_args, module_kwargs): + if "attention_mask" in module_kwargs: + module_kwargs["attention_mask"] = None + module_kwargs["is_causal"] = True + + return module_args, module_kwargs + + for name, module in model.named_modules(): + # We hope (assume) that if user uses their own model (without this structure which transformers uses), they read the docs saying they can't pass in attention masks + # Then these cases can happen: + # 1) some modules end with a `self-attn` module, in which case we attach the hook, but the + # there's no attention mask kwarg -> hook is a no-op + # 2) some modules end with a `self-attn` module, in which case we attach the hook, and the + # attention mask kwarg is passed -> hook will remove the attention mask and add + # `is_causal=True` kwarg, which either crashes the training or fixes it + # (training would crash anyway as attention mask isn't supported) + # 3) no modules end with a `self-attn` module, in which case we don't attach the hook, this is + # a no-op as well + if name.endswith("self_attn"): + # we want the hook to be executed first, to avoid any other hooks doing work on the attention mask + module.register_forward_pre_hook(_self_attn_pre_forward_hook, with_kwargs=True, prepend=True) diff --git a/venv/lib/python3.10/site-packages/accelerate/checkpointing.py b/venv/lib/python3.10/site-packages/accelerate/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..425dd3fecd323dc2cf161ff5b427662b82ee823b --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/checkpointing.py @@ -0,0 +1,330 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from pathlib import Path + +import numpy as np +import torch +from safetensors.torch import load_model + +from .utils import ( + MODEL_NAME, + OPTIMIZER_NAME, + RNG_STATE_NAME, + SAFE_MODEL_NAME, + SAFE_WEIGHTS_NAME, + SAMPLER_NAME, + SCALER_NAME, + SCHEDULER_NAME, + WEIGHTS_NAME, + get_pretty_name, + is_cuda_available, + is_hpu_available, + is_mlu_available, + is_musa_available, + is_sdaa_available, + is_torch_version, + is_torch_xla_available, + is_xpu_available, + load, + save, +) + + +if is_torch_version(">=", "2.4.0"): + from torch.amp import GradScaler +else: + from torch.cuda.amp import GradScaler + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + +from .logging import get_logger +from .state import PartialState + + +logger = get_logger(__name__) + + +def save_accelerator_state( + output_dir: str, + model_states: list[dict], + optimizers: list, + schedulers: list, + dataloaders: list, + process_index: int, + step: int, + scaler: GradScaler = None, + save_on_each_node: bool = False, + safe_serialization: bool = True, +): + """ + Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. + + + + If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native + `pickle`. + + + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + model_states (`List[torch.nn.Module]`): + A list of model states + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + dataloaders (`List[torch.utils.data.DataLoader]`): + A list of dataloader instances to save their sampler states + process_index (`int`): + The current process index in the Accelerator state + step (`int`): + The current step in the internal step tracker + scaler (`torch.amp.GradScaler`, *optional*): + An optional gradient scaler instance to save; + save_on_each_node (`bool`, *optional*): + Whether to save on every node, or only the main node. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + """ + output_dir = Path(output_dir) + # Model states + for i, state in enumerate(model_states): + weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME + if i > 0: + weights_name = weights_name.replace(".", f"_{i}.") + output_model_file = output_dir.joinpath(weights_name) + save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization) + logger.info(f"Model weights saved in {output_model_file}") + # Optimizer states + for i, opt in enumerate(optimizers): + state = opt.state_dict() + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + output_optimizer_file = output_dir.joinpath(optimizer_name) + save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Optimizer state saved in {output_optimizer_file}") + # Scheduler states + for i, scheduler in enumerate(schedulers): + state = scheduler.state_dict() + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + output_scheduler_file = output_dir.joinpath(scheduler_name) + save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Scheduler state saved in {output_scheduler_file}") + # DataLoader states + for i, dataloader in enumerate(dataloaders): + sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" + output_sampler_file = output_dir.joinpath(sampler_name) + # Only save if we have our custom sampler + from .data_loader import IterableDatasetShard, SeedableRandomSampler + + if isinstance(dataloader.dataset, IterableDatasetShard): + sampler = dataloader.get_sampler() + if isinstance(sampler, SeedableRandomSampler): + save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False) + if getattr(dataloader, "use_stateful_dataloader", False): + dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin" + output_dataloader_state_dict_file = output_dir.joinpath(dataloader_state_dict_name) + state_dict = dataloader.state_dict() + torch.save(state_dict, output_dataloader_state_dict_file) + logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}") + + # GradScaler state + if scaler is not None: + state = scaler.state_dict() + output_scaler_file = output_dir.joinpath(SCALER_NAME) + torch.save(state, output_scaler_file) + logger.info(f"Gradient scaler state saved in {output_scaler_file}") + # Random number generator states + states = {} + states_name = f"{RNG_STATE_NAME}_{process_index}.pkl" + states["step"] = step + states["random_state"] = random.getstate() + states["numpy_random_seed"] = np.random.get_state() + states["torch_manual_seed"] = torch.get_rng_state() + if is_xpu_available(): + states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all() + if is_mlu_available(): + states["torch_mlu_manual_seed"] = torch.mlu.get_rng_state_all() + elif is_sdaa_available(): + states["torch_sdaa_manual_seed"] = torch.sdaa.get_rng_state_all() + elif is_musa_available(): + states["torch_musa_manual_seed"] = torch.musa.get_rng_state_all() + if is_hpu_available(): + states["torch_hpu_manual_seed"] = torch.hpu.get_rng_state_all() + if is_cuda_available(): + states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all() + if is_torch_xla_available(): + states["xm_seed"] = xm.get_rng_state() + output_states_file = output_dir.joinpath(states_name) + torch.save(states, output_states_file) + logger.info(f"Random states saved in {output_states_file}") + return output_dir + + +def load_accelerator_state( + input_dir, + models, + optimizers, + schedulers, + dataloaders, + process_index, + scaler=None, + map_location=None, + load_kwargs=None, + **load_model_func_kwargs, +): + """ + Loads states of the models, optimizers, scaler, and RNG generators from a given directory. + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder to load all relevant weights and states. + models (`List[torch.nn.Module]`): + A list of model instances + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + process_index (`int`): + The current process index in the Accelerator state + scaler (`torch.amp.GradScaler`, *optional*): + An optional *GradScaler* instance to load + map_location (`str`, *optional*): + What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". + load_kwargs (`dict`, *optional*): + Additional arguments that can be passed to the `load` function. + load_model_func_kwargs (`dict`, *optional*): + Additional arguments that can be passed to the model's `load_state_dict` method. + + Returns: + `dict`: Contains the `Accelerator` attributes to override while loading the state. + """ + # stores the `Accelerator` attributes to override + override_attributes = dict() + if map_location not in [None, "cpu", "on_device"]: + raise TypeError( + "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`" + ) + if map_location is None: + map_location = "cpu" + elif map_location == "on_device": + map_location = PartialState().device + + if load_kwargs is None: + load_kwargs = {} + + input_dir = Path(input_dir) + # Model states + for i, model in enumerate(models): + ending = f"_{i}" if i > 0 else "" + input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors") + if input_model_file.exists(): + load_model(model, input_model_file, device=str(map_location), **load_model_func_kwargs) + else: + # Load with torch + input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin") + state_dict = load(input_model_file, map_location=map_location) + model.load_state_dict(state_dict, **load_model_func_kwargs) + logger.info("All model weights loaded successfully") + + # Optimizer states + for i, opt in enumerate(optimizers): + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + input_optimizer_file = input_dir.joinpath(optimizer_name) + optimizer_state = load(input_optimizer_file, map_location=map_location, **load_kwargs) + optimizers[i].load_state_dict(optimizer_state) + logger.info("All optimizer states loaded successfully") + + # Scheduler states + for i, scheduler in enumerate(schedulers): + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + input_scheduler_file = input_dir.joinpath(scheduler_name) + scheduler_state = load(input_scheduler_file, **load_kwargs) + scheduler.load_state_dict(scheduler_state) + logger.info("All scheduler states loaded successfully") + + for i, dataloader in enumerate(dataloaders): + sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" + input_sampler_file = input_dir.joinpath(sampler_name) + # Only load if we have our custom sampler + from .data_loader import IterableDatasetShard, SeedableRandomSampler + + if isinstance(dataloader.dataset, IterableDatasetShard): + sampler = dataloader.get_sampler() + if isinstance(sampler, SeedableRandomSampler): + sampler = dataloader.set_sampler(load(input_sampler_file)) + if getattr(dataloader, "use_stateful_dataloader", False): + dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin" + input_dataloader_state_dict_file = input_dir.joinpath(dataloader_state_dict_name) + if input_dataloader_state_dict_file.exists(): + state_dict = load(input_dataloader_state_dict_file, **load_kwargs) + dataloader.load_state_dict(state_dict) + logger.info("All dataloader sampler states loaded successfully") + + # GradScaler state + if scaler is not None: + input_scaler_file = input_dir.joinpath(SCALER_NAME) + scaler_state = load(input_scaler_file) + scaler.load_state_dict(scaler_state) + logger.info("GradScaler state loaded successfully") + + # Random states + try: + states = load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl")) + if "step" in states: + override_attributes["step"] = states["step"] + random.setstate(states["random_state"]) + np.random.set_state(states["numpy_random_seed"]) + torch.set_rng_state(states["torch_manual_seed"]) + if is_xpu_available(): + torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"]) + if is_mlu_available(): + torch.mlu.set_rng_state_all(states["torch_mlu_manual_seed"]) + elif is_sdaa_available(): + torch.sdaa.set_rng_state_all(states["torch_sdaa_manual_seed"]) + elif is_musa_available(): + torch.musa.set_rng_state_all(states["torch_musa_manual_seed"]) + else: + torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) + if is_torch_xla_available(): + xm.set_rng_state(states["xm_seed"]) + logger.info("All random states loaded successfully") + except Exception: + logger.info("Could not load random states") + + return override_attributes + + +def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False): + """ + Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` + """ + # Should this be the right way to get a qual_name type value from `obj`? + save_location = Path(path) / f"custom_checkpoint_{index}.pkl" + logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}") + save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node) + + +def load_custom_state(obj, path, index: int = 0): + """ + Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`. Will always set `weights_only=False` when + loading the state. + """ + load_location = f"{path}/custom_checkpoint_{index}.pkl" + logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}") + obj.load_state_dict(load(load_location, map_location="cpu", weights_only=False)) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__init__.py b/venv/lib/python3.10/site-packages/accelerate/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94f779d7bf5a9e74545eeb26a5eb6b4d3d460793 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52c09c0e93acedb96e33f79fb1fdb0ca15c54270 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0903b1e5b4357414a3cbbee1772c9de378733b84 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faf774f9290c59b8c0f4c2b6fd6218a85deb118a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4a487831de00e5a0cf0f5b1a902549627fa9878 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/merge.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/merge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c169aa34fa4bcf6d088009b8dfe61963f409ade5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/merge.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b99febd6dacf6cc52549fc2a75fc9b6e8f890cd3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/to_fsdp2.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/to_fsdp2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87352e6ac1274cf65b7479692869e2f7875f73de Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/to_fsdp2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01968f51a67fc68f77fe66ae73e9355c135d863e Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..476f138b4484c45f3d5f5fe3f4d0eb07fc44aac0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py b/venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..b878c8debd874e1418b946775b11568c7487ad72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from accelerate.commands.config import get_config_parser +from accelerate.commands.env import env_command_parser +from accelerate.commands.estimate import estimate_command_parser +from accelerate.commands.launch import launch_command_parser +from accelerate.commands.merge import merge_command_parser +from accelerate.commands.test import test_command_parser +from accelerate.commands.to_fsdp2 import to_fsdp2_command_parser +from accelerate.commands.tpu import tpu_command_parser +from accelerate.commands.utils import CustomArgumentParser + + +def main(): + parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate []", allow_abbrev=False) + subparsers = parser.add_subparsers(help="accelerate command helpers") + + # Register commands + get_config_parser(subparsers=subparsers) + estimate_command_parser(subparsers=subparsers) + env_command_parser(subparsers=subparsers) + launch_command_parser(subparsers=subparsers) + merge_command_parser(subparsers=subparsers) + tpu_command_parser(subparsers=subparsers) + test_command_parser(subparsers=subparsers) + to_fsdp2_command_parser(subparsers=subparsers) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__init__.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..649a15888cccd070b3d4ca9a600457c6ad59d4d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/__init__.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from .config import config_command_parser +from .config_args import default_config_file, load_config_from_file # noqa: F401 +from .default import default_command_parser +from .update import update_command_parser + + +def get_config_parser(subparsers=None): + parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) + # The main config parser + config_parser = config_command_parser(subparsers) + # The subparser to add commands to + subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand") + + # Then add other parsers with the parent parser + default_command_parser(subcommands, parents=[parent_parser]) + update_command_parser(subcommands, parents=[parent_parser]) + + return config_parser + + +def main(): + config_parser = get_config_parser() + args = config_parser.parse_args() + + if not hasattr(args, "func"): + config_parser.print_help() + exit(1) + + # Run + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..914a8d62c84786a407516e6f067469fa41d0ad89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d6656788aba187c919fe928e910fbde2a72e323 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76b839e540db88447b3dc56302a1f27bc2d4a664 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72abc5c4686220b541a974973b0bbcfc247621d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c8019e15d1e75f39140de5a43bd9b55c05e8998 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..846329ce1b225877de8cb8da451c93acfe273a50 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06362d65164c509f57ef14bc256d4b8719fd5006 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e389e4d2e87684a925954462891ac40989fc78ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/cluster.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..e466ba8048beb2b024e5903552a4f2de2416f86c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/cluster.py @@ -0,0 +1,917 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from ...utils import ( + ComputeEnvironment, + DistributedType, + is_deepspeed_available, + is_fp8_available, + is_hpu_available, + is_mlu_available, + is_mps_available, + is_msamp_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_transformer_engine_available, + is_transformers_available, + is_xpu_available, +) +from ...utils.constants import ( + DEEPSPEED_MULTINODE_LAUNCHERS, + FSDP2_STATE_DICT_TYPE, + FSDP_AUTO_WRAP_POLICY, + FSDP_BACKWARD_PREFETCH, + FSDP_SHARDING_STRATEGY, + FSDP_STATE_DICT_TYPE, + TORCH_DYNAMO_MODES, +) +from .config_args import ClusterConfig +from .config_utils import ( + DYNAMO_BACKENDS, + _ask_field, + _ask_options, + _convert_distributed_mode, + _convert_dynamo_backend, + _convert_fp8_backend, + _convert_mixed_precision, + _convert_yes_no_to_bool, +) + + +def get_cluster_input(): + distributed_type = _ask_options( + "Which type of machine are you using?", + [ + "No distributed training", + "multi-CPU", + "multi-XPU", + "multi-HPU", + "multi-GPU", + "multi-NPU", + "multi-MLU", + "multi-SDAA", + "multi-MUSA", + "TPU", + ], + _convert_distributed_mode, + ) + + machine_rank = 0 + num_machines = 1 + num_processes = 1 + gpu_ids = None + main_process_ip = None + main_process_port = None + rdzv_backend = "static" + same_network = True + debug = False + + if distributed_type in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + DistributedType.MULTI_HPU, + ]: + num_machines = _ask_field( + "How many different machines will you use (use more than 1 for multi-node training)? [1]: ", + int, + default=1, + ) + if num_machines > 1: + machine_rank = _ask_options( + "What is the rank of this machine?", + list(range(num_machines)), + int, + ) + main_process_ip = _ask_field( + "What is the IP address of the machine that will host the main process? ", + ) + main_process_port = _ask_field( + "What is the port you will use to communicate with the main process? ", + int, + ) + same_network = _ask_field( + "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + if not same_network: + rdzv_backend = _ask_field( + "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static" + ) + debug = _ask_field( + "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if distributed_type == DistributedType.NO: + use_cpu = _ask_field( + "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + elif distributed_type == DistributedType.MULTI_CPU: + use_cpu = True + else: + use_cpu = False + + ipex_config = {} + mpirun_config = {} + if use_cpu or is_xpu_available(): + ipex_config["ipex"] = _ask_field( + "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU/XPU? [yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if use_cpu: + if distributed_type == DistributedType.MULTI_CPU: + use_mpirun = _ask_field( + "Do you want accelerate to launch mpirun? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_mpirun: + mpirun_hostfile = _ask_field( + "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ", + str, + default="~/hostfile", + ) + mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip()) + mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1) + + dynamo_config = {} + use_dynamo = _ask_field( + "Do you wish to optimize your script with torch dynamo?[yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_dynamo: + prefix = "dynamo_" + dynamo_config[prefix + "backend"] = _ask_options( + "Which dynamo backend would you like to use?", + [x.lower() for x in DYNAMO_BACKENDS], + _convert_dynamo_backend, + default=2, + ) + use_custom_options = _ask_field( + "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if use_custom_options: + dynamo_config[prefix + "mode"] = _ask_options( + "Which mode do you want to use?", + TORCH_DYNAMO_MODES, + lambda x: TORCH_DYNAMO_MODES[int(x)], + default=0, + ) + dynamo_config[prefix + "use_fullgraph"] = _ask_field( + "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + dynamo_config[prefix + "use_dynamic"] = _ask_field( + "Do you want to enable dynamic shape tracing? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + dynamo_config[prefix + "use_regional_compilation"] = _ask_field( + "Do you want to enable regional compilation? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + use_mps = not use_cpu and is_mps_available() + deepspeed_config = {} + if ( + distributed_type + in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.NO, + ] + and not use_mps + ): + use_deepspeed = _ask_field( + "Do you want to use DeepSpeed? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_deepspeed: + distributed_type = DistributedType.DEEPSPEED + assert is_deepspeed_available(), ( + "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source" + ) + + if distributed_type == DistributedType.DEEPSPEED: + use_deepspeed_config = _ask_field( + "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_deepspeed_config: + deepspeed_config["deepspeed_config_file"] = _ask_field( + "Please enter the path to the json DeepSpeed config file: ", + str, + default="none", + ) + else: + deepspeed_config["zero_stage"] = _ask_options( + "What should be your DeepSpeed's ZeRO optimization stage?", + [0, 1, 2, 3], + int, + default=2, + ) + + deepspeed_devices = ["none", "cpu", "nvme"] + if deepspeed_config["zero_stage"] >= 2: + deepspeed_config["offload_optimizer_device"] = _ask_options( + "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] + ) + deepspeed_config["offload_param_device"] = _ask_options( + "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] + ) + if deepspeed_config["offload_param_device"] == "nvme": + deepspeed_config["offload_param_nvme_path"] = _ask_field( + "Nvme Path to offload parameters?", + str, + default="/nvme", + ) + if deepspeed_config["offload_optimizer_device"] == "nvme": + deepspeed_config["offload_optimizer_nvme_path"] = _ask_field( + "Nvme Path to offload optimizer states?", + str, + default="/nvme", + ) + deepspeed_config["gradient_accumulation_steps"] = _ask_field( + "How many gradient accumulation steps you're passing in your script? [1]: ", + int, + default=1, + ) + use_gradient_clipping = _ask_field( + "Do you want to use gradient clipping? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_gradient_clipping: + deepspeed_config["gradient_clipping"] = _ask_field( + "What is the gradient clipping value? [1.0]: ", + float, + default=1.0, + ) + if deepspeed_config["zero_stage"] == 3: + deepspeed_config["zero3_save_16bit_model"] = _ask_field( + "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + deepspeed_config["zero3_init_flag"] = _ask_field( + "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if deepspeed_config["zero3_init_flag"]: + if not is_transformers_available(): + raise Exception( + "When `zero3_init_flag` is set, it requires Transformers to be installed. " + "Please run `pip3 install transformers`." + ) + use_moe = _ask_field( + "Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_moe: + deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field( + "Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : " + " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ", + str, + ) + + if num_machines > 1: + launcher_query = "Which Type of launcher do you want to use?" + deepspeed_config["deepspeed_multinode_launcher"] = _ask_options( + launcher_query, + DEEPSPEED_MULTINODE_LAUNCHERS, + lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)], + ) + + if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + deepspeed_config["deepspeed_hostfile"] = _ask_field( + "DeepSpeed configures multi-node compute resources with hostfile. " + "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; " + "for more information please refer official [documentation]" + "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). " + "Please specify the location of hostfile: ", + str, + ) + + is_exclusion_filter = _ask_field( + "Do you want to specify exclusion filter string? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if is_exclusion_filter: + deepspeed_config["deepspeed_exclusion_filter"] = _ask_field( + "DeepSpeed exclusion filter string: ", + str, + ) + + is_inclusion_filter = _ask_field( + "Do you want to specify inclusion filter string? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if is_inclusion_filter: + deepspeed_config["deepspeed_inclusion_filter"] = _ask_field( + "DeepSpeed inclusion filter string: ", + str, + ) + + fsdp_config = {} + + if distributed_type in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + ]: + use_fsdp = _ask_field( + "Do you want to use FullyShardedDataParallel? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_fsdp: + distributed_type = DistributedType.FSDP + if distributed_type == DistributedType.FSDP: + fsdp_config["fsdp_version"] = _ask_options( + "What should be your FSDP version? [2]: ", + [1, 2], + lambda x: int(x) + 1, + default=1, + ) + fsdp_version = fsdp_config["fsdp_version"] # extract to a variable to simplify usage later + + if fsdp_version == 1: + sharding_strategy_query = "What should be your sharding strategy?" + fsdp_config["fsdp_reshard_after_forward"] = _ask_options( + sharding_strategy_query, + FSDP_SHARDING_STRATEGY, + lambda x: FSDP_SHARDING_STRATEGY[int(x)], + ) + else: + fsdp_config["fsdp_reshard_after_forward"] = _ask_field( + "Do you want to enable resharding after forward? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + fsdp_config["fsdp_offload_params"] = _ask_field( + "Do you want to offload parameters and gradients to CPU? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + fsdp_wrap_query = "What should be your auto wrap policy?" + fsdp_config["fsdp_auto_wrap_policy"] = _ask_options( + fsdp_wrap_query, + FSDP_AUTO_WRAP_POLICY, + lambda x: FSDP_AUTO_WRAP_POLICY[int(x)], + ) + if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]: + use_no_split_modules = _ask_field( + "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if not use_no_split_modules: + fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field( + "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :" + "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ", + str, + ) + elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]: + fsdp_config["fsdp_min_num_params"] = _ask_field( + "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ", + int, + default=100000000, + ) + # Removed in FSDP2, ask for user input for FSDP1 + if fsdp_version == 1: + fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?" + fsdp_config["fsdp_backward_prefetch"] = _ask_options( + fsdp_backward_prefetch_query, + FSDP_BACKWARD_PREFETCH, + lambda x: FSDP_BACKWARD_PREFETCH[int(x)], + ) + + fsdp_state_dict_type_query = "What should be your FSDP's state dict type?" + fsdp_config["fsdp_state_dict_type"] = _ask_options( + fsdp_state_dict_type_query, + FSDP_STATE_DICT_TYPE if fsdp_version == 1 else FSDP2_STATE_DICT_TYPE, + lambda x: FSDP_STATE_DICT_TYPE[int(x)] if fsdp_version == 1 else FSDP2_STATE_DICT_TYPE[int(x)], + default=0, + ) + # Not implemented in FSDP2, ask for user input for FSDP1 + if fsdp_version == 1: + fsdp_config["fsdp_forward_prefetch"] = _ask_field( + "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + # Obsolete in FSDP2, ask for user input for FSDP1 + if fsdp_version == 1: + fsdp_config["fsdp_use_orig_params"] = _ask_field( + "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field( + "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + # Obsolete in FSDP2, ask for user input for FSDP1 + if fsdp_version == 1: + if fsdp_config["fsdp_cpu_ram_efficient_loading"]: + fsdp_config["fsdp_sync_module_states"] = True + else: + fsdp_config["fsdp_sync_module_states"] = _ask_field( + "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + fsdp_config["fsdp_activation_checkpointing"] = _ask_field( + "Do you want to enable FSDP activation checkpointing? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + parallelism_config = {} + + if fsdp_config.get("fsdp_version", 1) == 2: + use_parallelism_config = _ask_field( + "Do you want to use the parallelism config? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if use_parallelism_config: + prefix = "parallelism_config_" + parallelism_config[prefix + "dp_replicate_size"] = _ask_field( + "What is the data parallelism replicate size? [1]: ", + int, + default=1, + error_message="Please enter an integer.", + ) + + parallelism_config[prefix + "dp_shard_size"] = _ask_field( + "What is the FSDP shard size? [1]: ", + int, + default=1, + error_message="Please enter an integer.", + ) + + parallelism_config[prefix + "tp_size"] = _ask_field( + "What is the tensor parallelism size? [1]: ", + int, + default=1, + error_message="Please enter an integer.", + ) + + parallelism_config[prefix + "cp_size"] = _ask_field( + "What is the context parallelism size? [1]: ", + int, + default=1, + error_message="Please enter an integer.", + ) + if parallelism_config[prefix + "cp_size"] > 1: + parallelism_config[prefix + "cp_comm_strategy"] = _ask_options( + "What is the compute parallelism communication strategy?", + ["allgather", "alltoall"], + lambda x: ["allgather", "alltoall"][int(x)], + default=0, + ) + + megatron_lm_config = {} + if distributed_type in [DistributedType.MULTI_GPU]: + use_megatron_lm = _ask_field( + "Do you want to use Megatron-LM ? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_megatron_lm: + distributed_type = DistributedType.MEGATRON_LM + if distributed_type == DistributedType.MEGATRON_LM: + prefix = "megatron_lm_" + megatron_lm_config[prefix + "tp_degree"] = _ask_field( + "What is the Tensor Parallelism degree/size? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + if megatron_lm_config[prefix + "tp_degree"] > 1: + megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field( + "Do you want to enable Sequence Parallelism? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "pp_degree"] = _ask_field( + "What is the Pipeline Parallelism degree/size? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + if megatron_lm_config[prefix + "pp_degree"] > 1: + megatron_lm_config[prefix + "num_micro_batches"] = _ask_field( + "What is the number of micro-batches? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + + megatron_lm_config[prefix + "recompute_activations"] = _ask_field( + "Do you want to enable selective activation recomputation? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field( + "Do you want to use distributed optimizer " + "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "gradient_clipping"] = _ask_field( + "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ", + float, + default=1.0, + ) + # TPU specific defaults + tpu_commands = None + tpu_command_file = None + tpu_downcast_bf16 = "no" + tpu_env = [] + tpu_name = None + tpu_vm = None + tpu_zone = None + tpu_use_sudo = False + tpu_use_cluster = False + + if distributed_type in [ + DistributedType.MULTI_CPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.XLA, + ]: + machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "") + if machine_type == "TPU": + machine_type += " cores" + elif machine_type == "CPU": + machine_type = "processes" + else: + machine_type += "(s)" + num_processes = _ask_field( + f"How many {machine_type} should be used for distributed training? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + num_processes = _ask_field( + "How many GPU(s) should be used for distributed training? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + else: + num_processes = 1 + + if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1): + raise ValueError( + f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using." + ) + + if ( + distributed_type + in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + DistributedType.NO, + ] + and not use_cpu + and not use_mps + ): + if is_npu_available(): + machine_type = "NPU(s)" + elif is_mlu_available(): + machine_type = "MLU(s)" + elif is_sdaa_available(): + machine_type = "SDAA(s)" + elif is_musa_available(): + machine_type = "MUSA(s)" + elif is_xpu_available(): + machine_type = "XPU(s)" + elif is_hpu_available(): + machine_type = "HPU(s)" + else: + machine_type = "GPU(s)" + gpu_ids = _ask_field( + f"What {machine_type} (by id) should be used for training on this machine as a comma-separated list? [all]:", + default="all", + ) + + # CPU affinity is only supported on NVIDIA hardware for now + enable_cpu_affinity = False + if distributed_type in (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps: + enable_cpu_affinity = _ask_field( + "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + fp8_config = None + if distributed_type == DistributedType.XLA: + mixed_precision = "no" + main_training_function = _ask_field( + "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ", + default="main", + ) + tpu_use_cluster = _ask_field( + "Are you using a TPU cluster? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if tpu_use_cluster: + tpu_name = _ask_field( + "What is the name of your TPU cluster? ", + default=None, + error_message="Please enter the name of your TPU cluster.", + ) + tpu_zone = _ask_field( + "What is the zone of your TPU cluster? ", + default=None, + error_message="Please enter the zone of your TPU cluster.", + ) + tpu_use_sudo = _ask_field( + "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ", + default=False, + error_message="Please enter yes or no.", + ) + run_commands = _ask_field( + "Do you have code you wish to run on startup in each pod? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if run_commands: + use_command_file = _ask_field( + "Is this code located in a bash script? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_command_file: + tpu_command_file = _ask_field( + "What is the path to your bash script? ", + default=None, + error_message="Please enter the path to your bash script.", + ) + tpu_command_file = os.path.abspath(tpu_command_file) + else: + print("Please enter each command separately you wish to run on startup in each pod.") + tpu_commands = [] + another_command = True + while another_command: + tpu_commands.append( + _ask_field( + "Please enter a single command to be ran ", + default=None, + error_message="Please enter the commands you wish to run on startup in each pod as a single string.", + ) + ) + another_command = _ask_field( + "Do you wish to add another command? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + tpu_vm = _ask_field( + "If not using an instance group, what are the names of the Compute VM instances to be used, separated by a comma: ", + default="", + ).split(",") + tpu_env = _ask_field( + "What environment variables do you wish to set in each pod, separated by a comma: ", + default="", + ).split(",") + + else: + main_training_function = "main" + if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config: + mixed_precision = None + else: + mixed_precision = _ask_options( + "Do you wish to use mixed precision?", + ["no", "fp16", "bf16", "fp8"], + _convert_mixed_precision, + ) + if mixed_precision == "fp8": + if not is_fp8_available(): + raise ValueError("FP8 (either Transformer Engine or MSAMP) is not installed on this machine.") + fp8_config = {} + fp8_config["backend"] = _ask_options( + "Which FP8 backend do you want to use?", + ["te", "msamp"], + _convert_fp8_backend, + ) + if fp8_config["backend"] == "TE": + if not is_transformer_engine_available(): + raise ValueError("TransformersEngine was selected, but it is not installed on this machine.") + fp8_config["use_autocast_during_eval"] = _ask_field( + "Do you want to use FP8 autocast during eval mode? Generally better metrics are found when this is disabled [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + ) + fp8_config["margin"] = _ask_field( + "What margin should be used for gradient scaling? [0]: ", + int, + default=0, + ) + fp8_config["interval"] = _ask_field( + "What interval should be used for for how often the scaling factor is recomputed? [1]: ", + int, + default=1, + ) + fp8_config["fp8_format"] = _ask_options( + "Which weight format should be used?", + ["HYBRID", "E4M3", "E5M2"], + lambda i: ["HYBRID", "E4M3", "E5M2"][i], + default=0, + ) + fp8_config["amax_history_length"] = _ask_field( + "What length of history should be used for the amax scaling factor computation? [1024]: ", + int, + default=1024, + ) + fp8_config["amax_compute_algorithm"] = _ask_options( + "Which algorithm should be used for the amax scaling factor computation?", + ["max", "most_recent"], + lambda x: "max" if x == 0 else "most_recent", + default=0, + ) + fp8_config["override_linear_precision"] = _ask_field( + "Do you want to to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + ) + if fp8_config["override_linear_precision"]: + fprop = _ask_field( + "Should `fprop` be executed in higher precision? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + ) + dgrad = _ask_field( + "Should `dgrad` be executed in higher precision? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + ) + wgrad = _ask_field( + "Should `wgrad` be executed in higher precision? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + ) + fp8_config["override_linear_precision"] = (fprop, dgrad, wgrad) + else: + fp8_config["override_linear_precision"] = (False, False, False) + + elif fp8_config["backend"] == "MSAMP": + if not is_msamp_available(): + raise ValueError("MSAMP was selected, but it is not installed on this machine.") + fp8_config["optimization_level"] = _ask_options( + "Which optimization level should be used?", + ["O1", "O2"], + lambda x: "O1" if x == 0 else "O2", + default=1, + ) + + if use_dynamo and mixed_precision == "no" and not use_cpu: + print( + "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." + ) + + if distributed_type == DistributedType.XLA and mixed_precision == "bf16": + tpu_downcast_bf16 = _ask_field( + "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no" + ) + + return ClusterConfig( + compute_environment=ComputeEnvironment.LOCAL_MACHINE, + distributed_type=distributed_type, + num_processes=num_processes, + gpu_ids=gpu_ids, + mixed_precision=mixed_precision, + downcast_bf16=tpu_downcast_bf16, + machine_rank=machine_rank, + num_machines=num_machines, + main_process_ip=main_process_ip, + main_process_port=main_process_port, + main_training_function=main_training_function, + fp8_config=fp8_config, + deepspeed_config=deepspeed_config, + fsdp_config=fsdp_config, + parallelism_config=parallelism_config, + megatron_lm_config=megatron_lm_config, + ipex_config=ipex_config, + mpirun_config=mpirun_config, + use_cpu=use_cpu, + rdzv_backend=rdzv_backend, + same_network=same_network, + commands=tpu_commands, + command_file=tpu_command_file, + tpu_env=tpu_env, + tpu_name=tpu_name, + tpu_vm=tpu_vm, + tpu_zone=tpu_zone, + tpu_use_sudo=tpu_use_sudo, + tpu_use_cluster=tpu_use_cluster, + dynamo_config=dynamo_config, + debug=debug, + enable_cpu_affinity=enable_cpu_affinity, + ) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/config.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..72414f2abe62d76bd5133f4b0ed99bf34133f6f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/config.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +from accelerate.utils import ComputeEnvironment + +from .cluster import get_cluster_input +from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 +from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 +from .sagemaker import get_sagemaker_input + + +description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" + + +def get_user_input(): + compute_environment = _ask_options( + "In which compute environment are you running?", + ["This machine", "AWS (Amazon SageMaker)"], + _convert_compute_environment, + ) + if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + config = get_sagemaker_input() + else: + config = get_cluster_input() + return config + + +def config_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("config", description=description) + else: + parser = argparse.ArgumentParser("Accelerate config command", description=description) + + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + if subparsers is not None: + parser.set_defaults(func=config_command) + return parser + + +def config_command(args): + config = get_user_input() + if args.config_file is not None: + config_file = args.config_file + else: + if not os.path.isdir(cache_dir): + os.makedirs(cache_dir) + config_file = default_yaml_config_file + + if config_file.endswith(".json"): + config.to_json_file(config_file) + else: + config.to_yaml_file(config_file) + print(f"accelerate configuration saved at {config_file}") + + +def main(): + parser = config_command_parser() + args = parser.parse_args() + config_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/config_args.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/config_args.py new file mode 100644 index 0000000000000000000000000000000000000000..75664a95ef6edd08bf7f1158c663cac9d44dd550 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/config_args.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from dataclasses import dataclass +from enum import Enum +from typing import Optional, Union + +import yaml + +from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType +from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION + + +hf_cache_home = os.path.expanduser( + os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface")) +) +cache_dir = os.path.join(hf_cache_home, "accelerate") +default_json_config_file = os.path.join(cache_dir, "default_config.yaml") +default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml") + +# For backward compatibility: the default config is the json one if it's the only existing file. +if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file): + default_config_file = default_yaml_config_file +else: + default_config_file = default_json_config_file + + +def load_config_from_file(config_file): + if config_file is not None: + if not os.path.isfile(config_file): + raise FileNotFoundError( + f"The passed configuration file `{config_file}` does not exist. " + "Please pass an existing file to `accelerate launch`, or use the default one " + "created through `accelerate config` and run `accelerate launch` " + "without the `--config_file` argument." + ) + else: + config_file = default_config_file + with open(config_file, encoding="utf-8") as f: + if config_file.endswith(".json"): + if ( + json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE) + == ComputeEnvironment.LOCAL_MACHINE + ): + config_class = ClusterConfig + else: + config_class = SageMakerConfig + return config_class.from_json_file(json_file=config_file) + else: + if ( + yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE) + == ComputeEnvironment.LOCAL_MACHINE + ): + config_class = ClusterConfig + else: + config_class = SageMakerConfig + return config_class.from_yaml_file(yaml_file=config_file) + + +@dataclass +class BaseConfig: + compute_environment: ComputeEnvironment + distributed_type: Union[DistributedType, SageMakerDistributedType] + mixed_precision: str + use_cpu: bool + debug: bool + + def to_dict(self): + result = self.__dict__ + # For serialization, it's best to convert Enums to strings (or their underlying value type). + + def _convert_enums(value): + if isinstance(value, Enum): + return value.value + if isinstance(value, dict): + if not bool(value): + return None + for key1, value1 in value.items(): + value[key1] = _convert_enums(value1) + return value + + for key, value in result.items(): + result[key] = _convert_enums(value) + result = {k: v for k, v in result.items() if v is not None} + return result + + @staticmethod + def process_config(config_dict): + """ + Processes `config_dict` and sets default values for any missing keys + """ + if "compute_environment" not in config_dict: + config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE + if "distributed_type" not in config_dict: + raise ValueError("A `distributed_type` must be specified in the config file.") + if "num_processes" not in config_dict and config_dict["distributed_type"] == DistributedType.NO: + config_dict["num_processes"] = 1 + if "mixed_precision" not in config_dict: + config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None + if "fp16" in config_dict: # Convert the config to the new format. + del config_dict["fp16"] + if "dynamo_backend" in config_dict: # Convert the config to the new format. + dynamo_backend = config_dict.pop("dynamo_backend") + config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend} + if "use_cpu" not in config_dict: + config_dict["use_cpu"] = False + if "debug" not in config_dict: + config_dict["debug"] = False + if "enable_cpu_affinity" not in config_dict: + config_dict["enable_cpu_affinity"] = False + return config_dict + + @classmethod + def from_json_file(cls, json_file=None): + json_file = default_json_config_file if json_file is None else json_file + with open(json_file, encoding="utf-8") as f: + config_dict = json.load(f) + config_dict = cls.process_config(config_dict) + extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys())) + if len(extra_keys) > 0: + raise ValueError( + f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`" + " version or fix (and potentially remove) these keys from your config file." + ) + + return cls(**config_dict) + + def to_json_file(self, json_file): + with open(json_file, "w", encoding="utf-8") as f: + content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + f.write(content) + + @classmethod + def from_yaml_file(cls, yaml_file=None): + yaml_file = default_yaml_config_file if yaml_file is None else yaml_file + with open(yaml_file, encoding="utf-8") as f: + config_dict = yaml.safe_load(f) + config_dict = cls.process_config(config_dict) + extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys())) + if len(extra_keys) > 0: + raise ValueError( + f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`" + " version or fix (and potentially remove) these keys from your config file." + ) + return cls(**config_dict) + + def to_yaml_file(self, yaml_file): + with open(yaml_file, "w", encoding="utf-8") as f: + yaml.safe_dump(self.to_dict(), f) + + def __post_init__(self): + if isinstance(self.compute_environment, str): + self.compute_environment = ComputeEnvironment(self.compute_environment) + if isinstance(self.distributed_type, str): + if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + self.distributed_type = SageMakerDistributedType(self.distributed_type) + else: + self.distributed_type = DistributedType(self.distributed_type) + if getattr(self, "dynamo_config", None) is None: + self.dynamo_config = {} + + +@dataclass +class ClusterConfig(BaseConfig): + num_processes: int = -1 # For instance if we use SLURM and the user manually passes it in + machine_rank: int = 0 + num_machines: int = 1 + gpu_ids: Optional[str] = None + main_process_ip: Optional[str] = None + main_process_port: Optional[int] = None + rdzv_backend: Optional[str] = "static" + same_network: Optional[bool] = False + main_training_function: str = "main" + enable_cpu_affinity: bool = False + + # args for FP8 training + fp8_config: dict = None + # args for deepspeed_plugin + deepspeed_config: dict = None + # args for fsdp + fsdp_config: dict = None + # args for parallelism config + parallelism_config: dict = None + # args for megatron_lm + megatron_lm_config: dict = None + # args for ipex + ipex_config: dict = None + # args for mpirun + mpirun_config: dict = None + # args for TPU + downcast_bf16: bool = False + + # args for TPU pods + tpu_name: str = None + tpu_zone: str = None + tpu_use_cluster: bool = False + tpu_use_sudo: bool = False + command_file: str = None + commands: list[str] = None + tpu_vm: list[str] = None + tpu_env: list[str] = None + + # args for dynamo + dynamo_config: dict = None + + def __post_init__(self): + if self.deepspeed_config is None: + self.deepspeed_config = {} + if self.fsdp_config is None: + self.fsdp_config = {} + if self.megatron_lm_config is None: + self.megatron_lm_config = {} + if self.ipex_config is None: + self.ipex_config = {} + if self.mpirun_config is None: + self.mpirun_config = {} + if self.fp8_config is None: + self.fp8_config = {} + if self.parallelism_config is None: + self.parallelism_config = {} + return super().__post_init__() + + +@dataclass +class SageMakerConfig(BaseConfig): + ec2_instance_type: str + iam_role_name: str + image_uri: Optional[str] = None + profile: Optional[str] = None + region: str = "us-east-1" + num_machines: int = 1 + gpu_ids: str = "all" + base_job_name: str = f"accelerate-sagemaker-{num_machines}" + pytorch_version: str = SAGEMAKER_PYTORCH_VERSION + transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION + py_version: str = SAGEMAKER_PYTHON_VERSION + sagemaker_inputs_file: str = None + sagemaker_metrics_file: str = None + additional_args: dict = None + dynamo_config: dict = None + enable_cpu_affinity: bool = False diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..24ee971383c8fdda1491e2b5795446790755ac70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from ...utils.dataclasses import ( + ComputeEnvironment, + DistributedType, + DynamoBackend, + FP8BackendType, + PrecisionType, + SageMakerDistributedType, +) +from ..menu import BulletMenu + + +DYNAMO_BACKENDS = [ + "EAGER", + "AOT_EAGER", + "INDUCTOR", + "AOT_TS_NVFUSER", + "NVPRIMS_NVFUSER", + "CUDAGRAPHS", + "OFI", + "FX2TRT", + "ONNXRT", + "TENSORRT", + "AOT_TORCHXLA_TRACE_ONCE", + "TORHCHXLA_TRACE_ONCE", + "IPEX", + "TVM", +] + + +def _ask_field(input_text, convert_value=None, default=None, error_message=None): + ask_again = True + while ask_again: + result = input(input_text) + try: + if default is not None and len(result) == 0: + return default + return convert_value(result) if convert_value is not None else result + except Exception: + if error_message is not None: + print(error_message) + + +def _ask_options(input_text, options=[], convert_value=None, default=0): + menu = BulletMenu(input_text, options) + result = menu.run(default_choice=default) + return convert_value(result) if convert_value is not None else result + + +def _convert_compute_environment(value): + value = int(value) + return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value]) + + +def _convert_distributed_mode(value): + value = int(value) + return DistributedType( + [ + "NO", + "MULTI_CPU", + "MULTI_XPU", + "MULTI_HPU", + "MULTI_GPU", + "MULTI_NPU", + "MULTI_MLU", + "MULTI_SDAA", + "MULTI_MUSA", + "XLA", + ][value] + ) + + +def _convert_dynamo_backend(value): + value = int(value) + return DynamoBackend(DYNAMO_BACKENDS[value]).value + + +def _convert_mixed_precision(value): + value = int(value) + return PrecisionType(["no", "fp16", "bf16", "fp8"][value]) + + +def _convert_sagemaker_distributed_mode(value): + value = int(value) + return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value]) + + +def _convert_fp8_backend(value): + value = int(value) + return FP8BackendType(["TE", "MSAMP"][value]) + + +def _convert_yes_no_to_bool(value): + return {"yes": True, "no": False}[value.lower()] + + +class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter): + """ + A custom formatter that will remove the usage line from the help message for subcommands. + """ + + def _format_usage(self, usage, actions, groups, prefix): + usage = super()._format_usage(usage, actions, groups, prefix) + usage = usage.replace(" [] ", "") + return usage diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/default.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/default.py new file mode 100644 index 0000000000000000000000000000000000000000..f5f267f8c47cccc80ce9ef53f970c2266472c117 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/default.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path + +import torch + +from ...utils import ( + is_hpu_available, + is_mlu_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_xpu_available, +) +from .config_args import ClusterConfig, default_json_config_file +from .config_utils import SubcommandHelpFormatter + + +description = "Create a default config file for Accelerate with only a few flags set." + + +def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file): + """ + Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also + set CPU if it is a CPU-only machine. + + Args: + mixed_precision (`str`, *optional*, defaults to "no"): + Mixed Precision to use. Should be one of "no", "fp16", or "bf16" + save_location (`str`, *optional*, defaults to `default_json_config_file`): + Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default + location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overridden by setting + the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`. + """ + path = Path(save_location) + path.parent.mkdir(parents=True, exist_ok=True) + if path.exists(): + print( + f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." + ) + return False + mixed_precision = mixed_precision.lower() + if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: + raise ValueError( + f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" + ) + config = { + "compute_environment": "LOCAL_MACHINE", + "mixed_precision": mixed_precision, + } + if is_mlu_available(): + num_mlus = torch.mlu.device_count() + config["num_processes"] = num_mlus + config["use_cpu"] = False + if num_mlus > 1: + config["distributed_type"] = "MULTI_MLU" + else: + config["distributed_type"] = "NO" + if is_sdaa_available(): + num_sdaas = torch.sdaa.device_count() + config["num_processes"] = num_sdaas + config["use_cpu"] = False + if num_sdaas > 1: + config["distributed_type"] = "MULTI_SDAA" + else: + config["distributed_type"] = "NO" + elif is_musa_available(): + num_musas = torch.musa.device_count() + config["num_processes"] = num_musas + config["use_cpu"] = False + if num_musas > 1: + config["distributed_type"] = "MULTI_MUSA" + else: + config["distributed_type"] = "NO" + elif is_hpu_available(): + num_hpus = torch.hpu.device_count() + config["num_processes"] = num_hpus + config["use_cpu"] = False + if num_hpus > 1: + config["distributed_type"] = "MULTI_HPU" + else: + config["distributed_type"] = "NO" + elif torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + config["num_processes"] = num_gpus + config["use_cpu"] = False + if num_gpus > 1: + config["distributed_type"] = "MULTI_GPU" + else: + config["distributed_type"] = "NO" + elif is_xpu_available(): + num_xpus = torch.xpu.device_count() + config["num_processes"] = num_xpus + config["use_cpu"] = False + if num_xpus > 1: + config["distributed_type"] = "MULTI_XPU" + else: + config["distributed_type"] = "NO" + elif is_npu_available(): + num_npus = torch.npu.device_count() + config["num_processes"] = num_npus + config["use_cpu"] = False + if num_npus > 1: + config["distributed_type"] = "MULTI_NPU" + else: + config["distributed_type"] = "NO" + else: + num_xpus = 0 + config["use_cpu"] = True + config["num_processes"] = 1 + config["distributed_type"] = "NO" + config["debug"] = False + config["enable_cpu_affinity"] = False + config = ClusterConfig(**config) + config.to_json_file(path) + return path + + +def default_command_parser(parser, parents): + parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) + parser.add_argument( + "--config_file", + default=default_json_config_file, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + dest="save_location", + ) + + parser.add_argument( + "--mixed_precision", + choices=["no", "fp16", "bf16"], + type=str, + help="Whether or not to use mixed precision training. " + "Choose between FP16 and BF16 (bfloat16) training. " + "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", + default="no", + ) + parser.set_defaults(func=default_config_command) + return parser + + +def default_config_command(args): + config_file = write_basic_config(args.mixed_precision, args.save_location) + if config_file: + print(f"accelerate configuration saved at {config_file}") diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py new file mode 100644 index 0000000000000000000000000000000000000000..5092ef31fc4715f901be6c1e7bfe80c0b140d767 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os + +from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES +from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType +from ...utils.imports import is_boto3_available +from .config_args import SageMakerConfig +from .config_utils import ( + DYNAMO_BACKENDS, + _ask_field, + _ask_options, + _convert_dynamo_backend, + _convert_mixed_precision, + _convert_sagemaker_distributed_mode, + _convert_yes_no_to_bool, +) + + +if is_boto3_available(): + import boto3 # noqa: F401 + + +def _create_iam_role_for_sagemaker(role_name): + iam_client = boto3.client("iam") + + sagemaker_trust_policy = { + "Version": "2012-10-17", + "Statement": [ + {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} + ], + } + try: + # create the role, associated with the chosen trust policy + iam_client.create_role( + RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2) + ) + policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sagemaker:*", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetAuthorizationToken", + "cloudwatch:PutMetricData", + "cloudwatch:GetMetricData", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:GetLogEvents", + "s3:CreateBucket", + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:PutObject", + ], + "Resource": "*", + } + ], + } + # attach policy to role + iam_client.put_role_policy( + RoleName=role_name, + PolicyName=f"{role_name}_policy_permission", + PolicyDocument=json.dumps(policy_document, indent=2), + ) + except iam_client.exceptions.EntityAlreadyExistsException: + print(f"role {role_name} already exists. Using existing one") + + +def _get_iam_role_arn(role_name): + iam_client = boto3.client("iam") + return iam_client.get_role(RoleName=role_name)["Role"]["Arn"] + + +def get_sagemaker_input(): + credentials_configuration = _ask_options( + "How do you want to authorize?", + ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "], + int, + ) + aws_profile = None + if credentials_configuration == 0: + aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default") + os.environ["AWS_PROFILE"] = aws_profile + else: + print( + "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," + "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" + ) + aws_access_key_id = _ask_field("AWS Access Key ID: ") + os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id + + aws_secret_access_key = _ask_field("AWS Secret Access Key: ") + os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key + + aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1") + os.environ["AWS_DEFAULT_REGION"] = aws_region + + role_management = _ask_options( + "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?", + ["Provide IAM Role name", "Create new IAM role using credentials"], + int, + ) + if role_management == 0: + iam_role_name = _ask_field("Enter your IAM role name: ") + else: + iam_role_name = "accelerate_sagemaker_execution_role" + print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials') + _create_iam_role_for_sagemaker(iam_role_name) + + is_custom_docker_image = _ask_field( + "Do you want to use custom Docker image? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + docker_image = None + if is_custom_docker_image: + docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower()) + + is_sagemaker_inputs_enabled = _ask_field( + "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + sagemaker_inputs_file = None + if is_sagemaker_inputs_enabled: + sagemaker_inputs_file = _ask_field( + "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ", + lambda x: str(x).lower(), + ) + + is_sagemaker_metrics_enabled = _ask_field( + "Do you want to enable SageMaker metrics? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + sagemaker_metrics_file = None + if is_sagemaker_metrics_enabled: + sagemaker_metrics_file = _ask_field( + "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ", + lambda x: str(x).lower(), + ) + + distributed_type = _ask_options( + "What is the distributed mode?", + ["No distributed training", "Data parallelism"], + _convert_sagemaker_distributed_mode, + ) + dynamo_config = {} + use_dynamo = _ask_field( + "Do you wish to optimize your script with torch dynamo?[yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_dynamo: + prefix = "dynamo_" + dynamo_config[prefix + "backend"] = _ask_options( + "Which dynamo backend would you like to use?", + [x.lower() for x in DYNAMO_BACKENDS], + _convert_dynamo_backend, + default=2, + ) + use_custom_options = _ask_field( + "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if use_custom_options: + dynamo_config[prefix + "mode"] = _ask_options( + "Which mode do you want to use?", + TORCH_DYNAMO_MODES, + lambda x: TORCH_DYNAMO_MODES[int(x)], + default="default", + ) + dynamo_config[prefix + "use_fullgraph"] = _ask_field( + "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + dynamo_config[prefix + "use_dynamic"] = _ask_field( + "Do you want to enable dynamic shape tracing? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + dynamo_config[prefix + "use_regional_compilation"] = _ask_field( + "Do you want to enable regional compilation? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + ec2_instance_query = "Which EC2 instance type you want to use for your training?" + if distributed_type != SageMakerDistributedType.NO: + ec2_instance_type = _ask_options( + ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)] + ) + else: + ec2_instance_query += "? [ml.p3.2xlarge]:" + ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge") + + debug = False + if distributed_type != SageMakerDistributedType.NO: + debug = _ask_field( + "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + num_machines = 1 + if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): + num_machines = _ask_field( + "How many machines do you want use? [1]: ", + int, + default=1, + ) + + mixed_precision = _ask_options( + "Do you wish to use FP16 or BF16 (mixed precision)?", + ["no", "fp16", "bf16", "fp8"], + _convert_mixed_precision, + ) + + if use_dynamo and mixed_precision == "no": + print( + "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." + ) + + return SageMakerConfig( + image_uri=docker_image, + compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, + distributed_type=distributed_type, + use_cpu=False, + dynamo_config=dynamo_config, + ec2_instance_type=ec2_instance_type, + profile=aws_profile, + region=aws_region, + iam_role_name=iam_role_name, + mixed_precision=mixed_precision, + num_machines=num_machines, + sagemaker_inputs_file=sagemaker_inputs_file, + sagemaker_metrics_file=sagemaker_metrics_file, + debug=debug, + ) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/config/update.py b/venv/lib/python3.10/site-packages/accelerate/commands/config/update.py new file mode 100644 index 0000000000000000000000000000000000000000..5f025594b04ada3e3a78687befc5c1bc1d236adf --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/config/update.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path + +from .config_args import default_config_file, load_config_from_file +from .config_utils import SubcommandHelpFormatter + + +description = "Update an existing config file with the latest defaults while maintaining the old configuration." + + +def update_config(args): + """ + Update an existing config file with the latest defaults while maintaining the old configuration. + """ + config_file = args.config_file + if config_file is None and Path(default_config_file).exists(): + config_file = default_config_file + elif not Path(config_file).exists(): + raise ValueError(f"The passed config file located at {config_file} doesn't exist.") + config = load_config_from_file(config_file) + + if config_file.endswith(".json"): + config.to_json_file(config_file) + else: + config.to_yaml_file(config_file) + return config_file + + +def update_command_parser(parser, parents): + parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to the config file to update. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + parser.set_defaults(func=update_config_command) + return parser + + +def update_config_command(args): + config_file = update_config(args) + print(f"Sucessfully updated the configuration file at {config_file}.") diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/env.py b/venv/lib/python3.10/site-packages/accelerate/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd2170aea8ba48a08e31c8031c0447825ec4797 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/env.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import platform +import subprocess + +import numpy as np +import psutil +import torch + +from accelerate import __version__ as version +from accelerate.commands.config import default_config_file, load_config_from_file + +from ..utils import is_mlu_available, is_musa_available, is_npu_available, is_sdaa_available, is_xpu_available + + +def env_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("env") + else: + parser = argparse.ArgumentParser("Accelerate env command") + + parser.add_argument( + "--config_file", default=None, help="The config file to use for the default values in the launching script." + ) + + if subparsers is not None: + parser.set_defaults(func=env_command) + return parser + + +def env_command(args): + pt_version = torch.__version__ + pt_cuda_available = torch.cuda.is_available() + pt_xpu_available = is_xpu_available() + pt_mlu_available = is_mlu_available() + pt_sdaa_available = is_sdaa_available() + pt_musa_available = is_musa_available() + pt_npu_available = is_npu_available() + + accelerator = "N/A" + if pt_cuda_available: + accelerator = "CUDA" + elif pt_xpu_available: + accelerator = "XPU" + elif pt_mlu_available: + accelerator = "MLU" + elif pt_sdaa_available: + accelerator = "SDAA" + elif pt_musa_available: + accelerator = "MUSA" + elif pt_npu_available: + accelerator = "NPU" + + accelerate_config = "Not found" + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file): + accelerate_config = load_config_from_file(args.config_file).to_dict() + + # if we can run which, get it + command = None + bash_location = "Not found" + if os.name == "nt": + command = ["where", "accelerate"] + elif os.name == "posix": + command = ["which", "accelerate"] + if command is not None: + bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip() + info = { + "`Accelerate` version": version, + "Platform": platform.platform(), + "`accelerate` bash location": bash_location, + "Python version": platform.python_version(), + "Numpy version": np.__version__, + "PyTorch version": f"{pt_version}", + "PyTorch accelerator": accelerator, + "System RAM": f"{psutil.virtual_memory().total / 1024**3:.2f} GB", + } + if pt_cuda_available: + info["GPU type"] = torch.cuda.get_device_name() + elif pt_xpu_available: + info["XPU type"] = torch.xpu.get_device_name() + elif pt_mlu_available: + info["MLU type"] = torch.mlu.get_device_name() + elif pt_sdaa_available: + info["SDAA type"] = torch.sdaa.get_device_name() + elif pt_musa_available: + info["MUSA type"] = torch.musa.get_device_name() + elif pt_npu_available: + info["CANN version"] = torch.version.cann + + print("\nCopy-and-paste the text below in your GitHub issue\n") + print("\n".join([f"- {prop}: {val}" for prop, val in info.items()])) + + print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:") + accelerate_config_str = ( + "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()]) + if isinstance(accelerate_config, dict) + else f"\t{accelerate_config}" + ) + print(accelerate_config_str) + + info["`Accelerate` configs"] = accelerate_config + + return info + + +def main() -> int: + parser = env_command_parser() + args = parser.parse_args() + env_command(args) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/estimate.py b/venv/lib/python3.10/site-packages/accelerate/commands/estimate.py new file mode 100644 index 0000000000000000000000000000000000000000..77571777c6c8e78b7729e4b29e1a98cea6e95de7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/estimate.py @@ -0,0 +1,312 @@ +#!/usr/bin/env python + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from huggingface_hub import model_info +from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError + +from accelerate import init_empty_weights +from accelerate.commands.utils import CustomArgumentParser +from accelerate.utils import ( + calculate_maximum_sizes, + convert_bytes, + is_timm_available, + is_transformers_available, +) + + +if is_transformers_available(): + import transformers + from transformers import AutoConfig, AutoModel + +if is_timm_available(): + import timm + + +def verify_on_hub(repo: str, token: str = None): + "Verifies that the model is on the hub and returns the model info." + try: + return model_info(repo, token=token) + except (OSError, GatedRepoError): + return "gated" + except RepositoryNotFoundError: + return "repo" + + +def check_has_model(error): + """ + Checks what library spawned `error` when a model is not found + """ + if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: + return "timm" + elif ( + is_transformers_available() + and isinstance(error, OSError) + and "does not appear to have a file named" in error.args[0] + ): + return "transformers" + else: + return "unknown" + + +def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): + """ + Creates an empty model in full precision from its parent library on the `Hub` to calculate the overall memory + consumption. + + Args: + model_name (`str`): + The model name on the Hub + library_name (`str`): + The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no + metadata on the Hub to determine the library. + trust_remote_code (`bool`, `optional`, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + access_token (`str`, `optional`, defaults to `None`): + The access token to use to access private or gated models on the Hub. (for use on the Gradio app) + + Returns: + `torch.nn.Module`: The torch model that has been initialized on the `meta` device. + + """ + model_info = verify_on_hub(model_name, access_token) + # Simplified errors + if model_info == "gated": + raise GatedRepoError( + f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." + ) + elif model_info == "repo": + raise RepositoryNotFoundError( + f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," + " make sure you are authenticated via `huggingface-cli login` and have access." + ) + if library_name is None: + library_name = getattr(model_info, "library_name", False) + if not library_name: + raise ValueError( + f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" + ) + if library_name == "transformers": + if not is_transformers_available(): + raise ImportError( + f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" + ) + print(f"Loading pretrained config for `{model_name}` from `transformers`...") + if model_info.config is None: + raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") + + auto_map = model_info.config.get("auto_map", False) + config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) + with init_empty_weights(): + # remote code could specify a specific `AutoModel` class in the `auto_map` + constructor = AutoModel + if isinstance(auto_map, dict): + value = None + for key in auto_map.keys(): + if key.startswith("AutoModelFor"): + value = key + break + if value is not None: + constructor = getattr(transformers, value) + # we need to pass the dtype, otherwise it is going to use the torch_dtype that is saved in the config + model = constructor.from_config(config, torch_dtype=torch.float32, trust_remote_code=trust_remote_code) + elif library_name == "timm": + if not is_timm_available(): + raise ImportError( + f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" + ) + print(f"Loading pretrained config for `{model_name}` from `timm`...") + with init_empty_weights(): + model = timm.create_model(model_name, pretrained=False) + else: + raise ValueError( + f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." + ) + return model + + +def create_ascii_table(headers: list, rows: list, title: str): + "Creates a pretty table from a list of rows, minimal version of `tabulate`." + sep_char, in_between = "│", "─" + column_widths = [] + for i in range(len(headers)): + column_values = [row[i] for row in rows] + [headers[i]] + max_column_width = max(len(value) for value in column_values) + column_widths.append(max_column_width) + + formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] + + pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" + diff = 0 + + def make_row(left_char, middle_char, right_char): + return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" + + separator = make_row("├", "┼", "┤") + if len(title) > sum(column_widths): + diff = abs(len(title) - len(separator)) + column_widths[-1] += diff + + # Update with diff + separator = make_row("├", "┼", "┤") + initial_rows = [ + make_row("┌", in_between, "┐"), + f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", + make_row("├", "┬", "┤"), + ] + table = "\n".join(initial_rows) + "\n" + column_widths[-1] += diff + centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] + table += f"{pattern % tuple(centered_line)}\n{separator}\n" + for i, line in enumerate(rows): + centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] + table += f"{pattern % tuple(centered_line)}\n" + table += f"└{'┴'.join([in_between * n for n in column_widths])}┘" + + return table + + +def estimate_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("estimate-memory") + else: + parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") + + parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") + parser.add_argument( + "--library_name", + type=str, + help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", + choices=["timm", "transformers"], + ) + parser.add_argument( + "--dtypes", + type=str, + nargs="+", + default=["float32", "float16", "int8", "int4"], + help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", + choices=["float32", "float16", "int8", "int4"], + ) + parser.add_argument( + "--trust_remote_code", + action="store_true", + help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag + should only be used for repositories you trust and in which you have read the code, as it will execute + code present on the Hub on your local machine.""", + default=False, + ) + + if subparsers is not None: + parser.set_defaults(func=estimate_command) + return parser + + +def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict: + """ + Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of + 1. + + Args: + bytes (`int`): + The size of the model being trained. + mixed_precision (`str`): + The mixed precision that would be ran. + msamp_config (`str`): + The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`. + """ + memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1} + fp32_size = bytes + fp16_size = bytes // 2 + + if mixed_precision == "float32": + memory_sizes["model"] = fp32_size + memory_sizes["gradients"] = fp32_size + memory_sizes["optimizer"] = fp32_size * 2 + memory_sizes["step"] = fp32_size * 4 + elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None): + # With native `TransformersEngine`, there is no memory savings with FP8 + # With mixed precision training, the model has weights stored + # in FP16 and FP32 + memory_sizes["model"] = fp32_size + # 1.5 from weight gradient + computation (GEMM) + memory_sizes["gradients"] = fp32_size + fp16_size + # 2x from optimizer states + memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states + memory_sizes["step"] = memory_sizes["optimizer"] + return memory_sizes + + +def gather_data(args): + "Creates an empty model and gathers the data for the sizes" + try: + model = create_empty_model( + args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code + ) + except (RuntimeError, OSError) as e: + library = check_has_model(e) + if library != "unknown": + raise RuntimeError( + f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." + ) + raise e + + total_size, largest_layer = calculate_maximum_sizes(model) + + data = [] + + for dtype in args.dtypes: + dtype_total_size = total_size + dtype_largest_layer = largest_layer[0] + dtype_training_size = estimate_training_usage(dtype_total_size, dtype) + if dtype == "float16": + dtype_total_size /= 2 + dtype_largest_layer /= 2 + elif dtype == "int8": + dtype_total_size /= 4 + dtype_largest_layer /= 4 + elif dtype == "int4": + dtype_total_size /= 8 + dtype_largest_layer /= 8 + data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) + return data + + +def estimate_command(args): + data = gather_data(args) + for row in data: + for i, item in enumerate(row): + if isinstance(item, (int, float)): + row[i] = convert_bytes(item) + elif isinstance(item, dict): + training_usage = max(item.values()) + row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A" + + headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] + + title = f"Memory Usage for loading `{args.model_name}`" + table = create_ascii_table(headers, data, title) + print(table) + + +def main(): + parser = estimate_command_parser() + args = parser.parse_args() + estimate_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/launch.py b/venv/lib/python3.10/site-packages/accelerate/commands/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..81495c29dbeb311614e2edc32c96efd0e7d56e3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/launch.py @@ -0,0 +1,1245 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import importlib +import logging +import os +import subprocess +import sys +from pathlib import Path + +import psutil +import torch + +from accelerate.commands.config import default_config_file, load_config_from_file +from accelerate.commands.config.config_args import SageMakerConfig +from accelerate.commands.config.config_utils import DYNAMO_BACKENDS +from accelerate.commands.utils import CustomArgumentParser +from accelerate.state import get_int_from_env +from accelerate.utils import ( + ComputeEnvironment, + DistributedType, + PrepareForLaunch, + _filter_args, + check_cuda_p2p_ib_support, + convert_dict_to_env_variables, + is_bf16_available, + is_deepspeed_available, + is_hpu_available, + is_mlu_available, + is_musa_available, + is_npu_available, + is_rich_available, + is_sagemaker_available, + is_sdaa_available, + is_torch_xla_available, + is_xpu_available, + patch_environment, + prepare_deepspeed_cmd_env, + prepare_multi_gpu_env, + prepare_sagemager_args_inputs, + prepare_simple_launcher_cmd_env, + prepare_tpu, + str_to_bool, +) +from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES + + +if is_rich_available(): + from rich import get_console + from rich.logging import RichHandler + + FORMAT = "%(message)s" + logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]) + + +logger = logging.getLogger(__name__) + + +options_to_group = { + "multi_gpu": "Distributed GPUs", + "tpu": "TPU", + "use_deepspeed": "DeepSpeed Arguments", + "use_fsdp": "FSDP Arguments", + "use_megatron_lm": "Megatron-LM Arguments", + "fp8_backend": "FP8 Arguments", +} + + +def clean_option(option): + "Finds all cases of - after the first two characters and changes them to _" + if "fp8_backend" in option: + option = "--fp8_backend" + if option.startswith("--"): + return option[2:].replace("-", "_") + + +class CustomHelpFormatter(argparse.HelpFormatter): + """ + This is a custom help formatter that will hide all arguments that are not used in the command line when the help is + called. This is useful for the case where the user is using a specific platform and only wants to see the arguments + for that platform. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.titles = [ + "Hardware Selection Arguments", + "Resource Selection Arguments", + "Training Paradigm Arguments", + "positional arguments", + "optional arguments", + ] + + def add_argument(self, action: argparse.Action): + if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]: + args = sys.argv[2:] + else: + args = sys.argv[1:] + + if len(args) > 1: + args = list(map(clean_option, args)) + used_platforms = [arg for arg in args if arg in options_to_group.keys()] + used_titles = [options_to_group[o] for o in used_platforms] + if action.container.title not in self.titles + used_titles: + action.help = argparse.SUPPRESS + elif action.container.title == "Hardware Selection Arguments": + if set(action.option_strings).isdisjoint(set(args)): + action.help = argparse.SUPPRESS + else: + action.help = action.help + " (currently selected)" + elif action.container.title == "Training Paradigm Arguments": + if set(action.option_strings).isdisjoint(set(args)): + action.help = argparse.SUPPRESS + else: + action.help = action.help + " (currently selected)" + + action.option_strings = [s for s in action.option_strings if "-" not in s[2:]] + super().add_argument(action) + + def end_section(self): + if len(self._current_section.items) < 2: + self._current_section.items = [] + self._current_section.heading = "" + super().end_section() + + +def launch_command_parser(subparsers=None): + description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)" + if subparsers is not None: + parser = subparsers.add_parser( + "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter + ) + else: + parser = CustomArgumentParser( + "Accelerate launch command", + description=description, + add_help=False, + allow_abbrev=False, + formatter_class=CustomHelpFormatter, + ) + + parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") + + parser.add_argument( + "--config_file", + default=None, + help="The config file to use for the default values in the launching script.", + ) + parser.add_argument( + "--quiet", + "-q", + action="store_true", + help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", + ) + # Hardware selection arguments + hardware_args = parser.add_argument_group( + "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." + ) + hardware_args.add_argument( + "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." + ) + hardware_args.add_argument( + "--multi_gpu", + default=False, + action="store_true", + help="Whether or not this should launch a distributed GPU training.", + ) + hardware_args.add_argument( + "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." + ) + # Resource selection arguments + resource_args = parser.add_argument_group( + "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." + ) + resource_args.add_argument( + "--mixed_precision", + type=str, + choices=["no", "fp16", "bf16", "fp8"], + help="Whether or not to use mixed precision training. " + "Choose between FP16 and BF16 (bfloat16) training. " + "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", + ) + resource_args.add_argument( + "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." + ) + resource_args.add_argument( + "--num_machines", type=int, default=None, help="The total number of machines used in this training." + ) + resource_args.add_argument( + "--num_cpu_threads_per_process", + type=int, + default=None, + help="The number of CPU threads per process. Can be tuned for optimal performance.", + ) + resource_args.add_argument( + "--enable_cpu_affinity", + default=False, + action="store_true", + help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.", + ) + # Dynamo arguments + resource_args.add_argument( + "--dynamo_backend", + type=str, + choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], + help="Choose a backend to optimize your training with dynamo, see more at " + "https://github.com/pytorch/torchdynamo.", + ) + resource_args.add_argument( + "--dynamo_mode", + type=str, + default="default", + choices=TORCH_DYNAMO_MODES, + help="Choose a mode to optimize your training with dynamo.", + ) + resource_args.add_argument( + "--dynamo_use_fullgraph", + default=False, + action="store_true", + help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", + ) + resource_args.add_argument( + "--dynamo_use_dynamic", + default=False, + action="store_true", + help="Whether to enable dynamic shape tracing.", + ) + resource_args.add_argument( + "--dynamo_use_regional_compilation", + default=False, + action="store_true", + help="Whether to enable regional compilation.", + ) + + # Training Paradigm arguments + paradigm_args = parser.add_argument_group( + "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." + ) + paradigm_args.add_argument( + "--use_deepspeed", + default=False, + action="store_true", + help="Whether to use deepspeed.", + ) + paradigm_args.add_argument( + "--use_fsdp", + default=False, + action="store_true", + help="Whether to use fsdp.", + ) + paradigm_args.add_argument( + "--use_parallelism_config", + default=False, + action="store_true", + help="Whether to use the parallelism config to configure the N-d distributed training.", + ) + paradigm_args.add_argument( + "--use_megatron_lm", + default=False, + action="store_true", + help="Whether to use Megatron-LM.", + ) + + paradigm_args.add_argument( + "--use_xpu", + default=None, + action="store_true", + help="Whether to use IPEX plugin to speed up training on XPU specifically. This argument is deprecated and ignored, will be removed in Accelerate v1.20.", + ) + + # distributed GPU training arguments + distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") + distributed_args.add_argument( + "--gpu_ids", + default=None, + help="What GPUs (by id) should be used for training on this machine as a comma-separated list", + ) + distributed_args.add_argument( + "--same_network", + default=False, + action="store_true", + help="Whether all machines used for multinode training exist on the same local network.", + ) + distributed_args.add_argument( + "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." + ) + distributed_args.add_argument( + "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." + ) + distributed_args.add_argument( + "--main_process_port", + type=int, + default=None, + help="The port to use to communicate with the machine of rank 0.", + ) + distributed_args.add_argument( + "-t", + "--tee", + default="0", + type=str, + help="Tee std streams into a log file and also to console.", + ) + distributed_args.add_argument( + "--log_dir", + type=str, + default=None, + help=( + "Base directory to use for log files when using torchrun/torch.distributed.run as launcher. " + "Use with --tee to redirect std streams info log files." + ), + ) + distributed_args.add_argument( + "--role", + type=str, + default="default", + help="User-defined role for the workers.", + ) + # Rendezvous related arguments + distributed_args.add_argument( + "--rdzv_backend", + type=str, + default="static", + help="The rendezvous method to use, such as 'static' (the default) or 'c10d'", + ) + distributed_args.add_argument( + "--rdzv_conf", + type=str, + default="", + help="Additional rendezvous configuration (=,=,...).", + ) + distributed_args.add_argument( + "--max_restarts", + type=int, + default=0, + help="Maximum number of worker group restarts before failing.", + ) + distributed_args.add_argument( + "--monitor_interval", + type=float, + default=0.1, + help="Interval, in seconds, to monitor the state of workers.", + ) + parser.add_argument( + "-m", + "--module", + action="store_true", + help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", + ) + parser.add_argument( + "--no_python", + action="store_true", + help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", + ) + + # TPU arguments + tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") + tpu_args.add_argument( + "--tpu_cluster", + action="store_true", + dest="tpu_use_cluster", + help="Whether to use a GCP TPU pod for training.", + ) + tpu_args.add_argument( + "--no_tpu_cluster", + action="store_false", + dest="tpu_use_cluster", + help="Should not be passed explicitly, this is for internal use only.", + ) + tpu_args.add_argument( + "--tpu_use_sudo", + action="store_true", + help="Whether to use `sudo` when running the TPU training script in each pod.", + ) + tpu_args.add_argument( + "--vm", + type=str, + action="append", + help=( + "List of single Compute VM instance names. " + "If not provided we assume usage of instance groups. For TPU pods." + ), + ) + tpu_args.add_argument( + "--env", + type=str, + action="append", + help="List of environment variables to set on the Compute VM instances. For TPU pods.", + ) + tpu_args.add_argument( + "--main_training_function", + type=str, + default=None, + help="The name of the main function to be executed in your script (only for TPU training).", + ) + tpu_args.add_argument( + "--downcast_bf16", + action="store_true", + help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", + ) + + # DeepSpeed arguments + deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") + deepspeed_args.add_argument( + "--deepspeed_config_file", + default=None, + type=str, + help="DeepSpeed config file.", + ) + deepspeed_args.add_argument( + "--zero_stage", + default=None, + type=int, + help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `2`.", + ) + deepspeed_args.add_argument( + "--offload_optimizer_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_param_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_optimizer_nvme_path", + default=None, + type=str, + help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_param_nvme_path", + default=None, + type=str, + help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--gradient_accumulation_steps", + default=None, + type=int, + help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1`.", + ) + deepspeed_args.add_argument( + "--gradient_clipping", + default=None, + type=float, + help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1.0`.", + ) + deepspeed_args.add_argument( + "--zero3_init_flag", + default=None, + type=str, + help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", + ) + deepspeed_args.add_argument( + "--zero3_save_16bit_model", + default=None, + type=str, + help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", + ) + deepspeed_args.add_argument( + "--deepspeed_hostfile", + default=None, + type=str, + help="DeepSpeed hostfile for configuring multi-node compute resources.", + ) + deepspeed_args.add_argument( + "--deepspeed_exclusion_filter", + default=None, + type=str, + help="DeepSpeed exclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_inclusion_filter", + default=None, + type=str, + help="DeepSpeed inclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_multinode_launcher", + default=None, + type=str, + help="DeepSpeed multi-node launcher to use, e.g. `pdsh`, `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5). If unspecified, will default to `pdsh`.", + ) + deepspeed_args.add_argument( + "--deepspeed_moe_layer_cls_names", + default=None, + type=str, + help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..." + " (useful only when `use_deepspeed` flag is passed).", + ) + + # fsdp arguments + fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") + fsdp_args.add_argument( + "--fsdp_version", + type=str, + default="1", + choices=["1", "2"], + help="FSDP version to use. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_offload_params", + default="false", + type=str, + help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_min_num_params", + type=int, + default=1e8, + help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", + ) + # We enable this for backwards compatibility, throw a warning if this is set in `FullyShardedDataParallelPlugin` + fsdp_args.add_argument( + "--fsdp_sharding_strategy", + type=str, + default="FULL_SHARD", + help="FSDP's sharding strategy. (useful only when `use_fsdp` flag is passed and `fsdp_version=1`).", + ) + fsdp_args.add_argument( + "--fsdp_reshard_after_forward", + type=str, + default="true", + help="FSDP's Reshard After Forward Strategy. (useful only when `use_fsdp` flag is passed). Supports either boolean (FSDP2) or `FULL_SHARD | SHARD_GRAD_OP | NO_RESHARD` (FSDP1).", + ) + fsdp_args.add_argument( + "--fsdp_auto_wrap_policy", + type=str, + default=None, + help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_transformer_layer_cls_to_wrap", + default=None, + type=str, + help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " + "(useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_backward_prefetch", + default=None, + type=str, + help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_state_dict_type", + default=None, + type=str, + help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_forward_prefetch", + default="false", + type=str, + help="If True, then FSDP explicitly prefetches the next upcoming " + "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_use_orig_params", + default="true", + type=str, + help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres." + " (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_cpu_ram_efficient_loading", + default="true", + type=str, + help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " + "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. " + "(useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_sync_module_states", + default="true", + type=str, + help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0." + " (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_activation_checkpointing", + default="false", + type=str, + help="Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).", + ) + + # megatron_lm args + megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") + megatron_lm_args.add_argument( + "--megatron_lm_tp_degree", + type=int, + default=1, + help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_pp_degree", + type=int, + default=1, + help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_num_micro_batches", + type=int, + default=None, + help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_sequence_parallelism", + default=None, + type=str, + help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_recompute_activations", + default=None, + type=str, + help="Decides Whether (true|false) to enable Selective Activation Recomputation. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_use_distributed_optimizer", + default=None, + type=str, + help="Decides Whether (true|false) to use distributed optimizer " + "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_gradient_clipping", + default=1.0, + type=float, + help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " + "(useful only when `use_megatron_lm` flag is passed).", + ) + + # FP8 arguments + fp8_args = parser.add_argument_group( + "FP8 Arguments", "Arguments related to FP8 training (requires `--mixed_precision=fp8`)" + ) + fp8_args.add_argument( + "--fp8_backend", + type=str, + choices=["te", "msamp"], + help="Choose a backend to train with FP8 (te: TransformerEngine, msamp: MS-AMP)", + ) + fp8_args.add_argument( + "--fp8_use_autocast_during_eval", + default=False, + action="store_true", + help="Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.", + ) + fp8_args.add_argument( + "--fp8_margin", + type=int, + default=0, + help="The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).", + ) + fp8_args.add_argument( + "--fp8_interval", + type=int, + default=1, + help="The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).", + ) + fp8_args.add_argument( + "--fp8_format", + type=str, + default="HYBRID", + choices=["HYBRID", "E4M3", "E5M2"], + help="The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).", + ) + fp8_args.add_argument( + "--fp8_amax_history_len", + type=int, + default=1024, + help="The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).", + ) + fp8_args.add_argument( + "--fp8_amax_compute_algo", + type=str, + default="most_recent", + choices=["max", "most_recent"], + help="The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).", + ) + fp8_args.add_argument( + "--fp8_override_linear_precision", + type=lambda x: tuple(map(str_to_bool, x.split(","))), + default=(False, False, False), + help="Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-separated string of booleans (useful only when `--fp8_backend=te` is passed).", + ) + fp8_args.add_argument( + "--fp8_opt_level", + type=str, + default="O2", + choices=["O1", "O2"], + help="What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed).", + ) + + # AWS arguments + aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") + aws_args.add_argument( + "--aws_access_key_id", + type=str, + default=None, + help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", + ) + aws_args.add_argument( + "--aws_secret_access_key", + type=str, + default=None, + help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", + ) + parser.add_argument( + "--debug", + action="store_true", + help="Whether to print out the torch.distributed stack trace when something fails.", + ) + parser.add_argument( + "training_script", + type=str, + help=( + "The full path to the script to be launched in parallel, followed by all the arguments for the training " + "script." + ), + ) + + # MPI arguments + mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU") + mpirun_args.add_argument( + "--mpirun_hostfile", + type=str, + default=None, + help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will " + "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.", + ) + mpirun_args.add_argument( + "--mpirun_ccl", + type=int, + default=1, + help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.", + ) + + # ParallelismConfig arguments + parallelism_config_args = parser.add_argument_group( + "ParallelismConfig Arguments", + "Arguments related to the ParallelismConfig used for distributed training.", + ) + parallelism_config_args.add_argument( + "--parallelism_config_dp_replicate_size", + type=int, + default=1, + help="The number of processes for data parallel training. Defaults to 1 (no data parallelism).", + ) + + parallelism_config_args.add_argument( + "--parallelism_config_dp_shard_size", + type=int, + default=1, + help="The number of processes for FSDP sharding. Defaults to 1 (No FSDP sharding).", + ) + + parallelism_config_args.add_argument( + "--parallelism_config_tp_size", + type=int, + default=1, + help="The number of processes for tensor parallel training. Defaults to 1 (no tensor parallelism).", + ) + + parallelism_config_args.add_argument( + "--parallelism_config_cp_size", + type=int, + default=1, + help="The number of processese for context parallel training. Defaults to 1 (no context parallelism).", + ) + parallelism_config_args.add_argument( + "--parallelism_config_cp_comm_strategy", + type=str, + default="allgather", + help="The communication strategy for context parallel training. Defaults to 'allgather'. Other option is alltoall", + ) + + # Other arguments of the training scripts + parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") + + if subparsers is not None: + parser.set_defaults(func=launch_command) + return parser + + +def simple_launcher(args): + cmd, current_env = prepare_simple_launcher_cmd_env(args) + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + + +def multi_gpu_launcher(args): + import torch.distributed.run as distrib_run + + current_env = prepare_multi_gpu_env(args) + if not check_cuda_p2p_ib_support(): + message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." + warn = False + if "NCCL_P2P_DISABLE" not in current_env: + current_env["NCCL_P2P_DISABLE"] = "1" + warn = True + if "NCCL_IB_DISABLE" not in current_env: + current_env["NCCL_IB_DISABLE"] = "1" + warn = True + if warn: + logger.warning(message) + + debug = getattr(args, "debug", False) + args = _filter_args( + args, + distrib_run.get_args_parser(), + ["--training_script", args.training_script, "--training_script_args", args.training_script_args], + ) + + with patch_environment(**current_env): + try: + distrib_run.run(args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def deepspeed_launcher(args): + import torch.distributed.run as distrib_run + + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.") + else: + from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME + + cmd, current_env = prepare_deepspeed_cmd_env(args) + if not check_cuda_p2p_ib_support(): + message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." + warn = False + if "NCCL_P2P_DISABLE" not in current_env: + current_env["NCCL_P2P_DISABLE"] = "1" + warn = True + if "NCCL_IB_DISABLE" not in current_env: + current_env["NCCL_IB_DISABLE"] = "1" + warn = True + if warn: + logger.warning(message) + + if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f: + valid_env_items = convert_dict_to_env_variables(current_env) + if len(valid_env_items) > 1: + f.writelines(valid_env_items) + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + else: + debug = getattr(args, "debug", False) + args = _filter_args( + args, + distrib_run.get_args_parser(), + ["--training_script", args.training_script, "--training_script_args", args.training_script_args], + ) + with patch_environment(**current_env): + try: + distrib_run.run(args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def tpu_launcher(args): + import torch_xla.distributed.xla_multiprocessing as xmp + + if args.no_python: + raise ValueError("--no_python cannot be used with TPU launcher") + + args, current_env = prepare_tpu(args, {}) + + if args.module: + mod_name = args.training_script + else: + # Import training_script as a module + script_path = Path(args.training_script) + sys.path.append(str(script_path.parent.resolve())) + mod_name = script_path.stem + + mod = importlib.import_module(mod_name) + if not hasattr(mod, args.main_training_function): + raise ValueError( + f"Your training script should have a function named {args.main_training_function}, or you should pass a " + "different value to `--main_training_function`." + ) + + # Patch sys.argv + sys.argv = [mod.__file__] + args.training_script_args + + main_function = getattr(mod, args.main_training_function) + with patch_environment(**current_env): + xmp.spawn(PrepareForLaunch(main_function), args=()) + + +def tpu_pod_launcher(args): + from torch_xla.distributed import xla_dist + + current_env = {} + args, current_env = prepare_tpu(args, current_env, True) + debug = getattr(args, "debug", False) + + training_script = args.training_script + training_script_args = args.training_script_args + new_args = _filter_args( + args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"] + ) + + if args.tpu_use_sudo: + new_cmd = ["sudo"] + else: + new_cmd = [] + + new_cmd += [ + "accelerate-launch", + "--tpu", + "--no_tpu_cluster", + "--num_machines", + "1", + "--mixed_precision", + "no", + "--dynamo_backend", + "no", + "--num_processes", + str(args.num_processes), + "--main_training_function", + str(args.main_training_function), + training_script, + ] + training_script_args + + new_args.positional = new_cmd + bad_flags = "" + for arg in vars(new_args): + if arg.startswith("docker_"): + value = getattr(new_args, arg) + if value != "" and value is not None: + bad_flags += f'{arg}="{value}"\n' + if bad_flags != "": + raise ValueError( + f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}" + ) + new_args.env = [f"{k}={v}" for k, v in current_env.items()] + new_args.env.append("ACCELERATE_IN_TPU_POD=1") + try: + xla_dist.resolve_and_execute(new_args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def sagemaker_launcher(sagemaker_config: SageMakerConfig, args): + if not is_sagemaker_available(): + raise ImportError( + "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`" + ) + if args.module or args.no_python: + raise ValueError( + "SageMaker requires a python training script file and cannot be used with --module or --no_python" + ) + + from sagemaker.huggingface import HuggingFace + + args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args) + + huggingface_estimator = HuggingFace(**args) + + huggingface_estimator.fit(inputs=sagemaker_inputs) + print(f"You can find your model data at: {huggingface_estimator.model_data}") + + +def _validate_launch_command(args): + # Sanity checks + if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1: + raise ValueError( + "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time." + ) + if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2): + raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.") + + if (not args.use_fsdp or args.fsdp_version == 1) and args.use_parallelism_config: + raise ValueError("You cannot use `--use_parallelism_config` without `--use_fsdp` and `--fsdp_version=2`. ") + + defaults = None + warned = [] + mp_from_config_flag = False + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu: + defaults = load_config_from_file(args.config_file) + if ( + not args.multi_gpu + and not args.tpu + and not args.tpu_use_cluster + and not args.use_deepspeed + and not args.use_fsdp + and not args.use_megatron_lm + ): + args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED + args.multi_gpu = ( + True + if defaults.distributed_type + in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + ) + else False + ) + args.tpu = defaults.distributed_type == DistributedType.XLA + args.use_fsdp = defaults.distributed_type == DistributedType.FSDP + args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM + args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False + args.use_parallelism_config = defaults.parallelism_config != {} + if args.gpu_ids is None: + if defaults.gpu_ids is not None: + args.gpu_ids = defaults.gpu_ids + else: + args.gpu_ids = "all" + + if args.multi_gpu and args.num_machines is None: + args.num_machines = defaults.num_machines + + if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1: + raise ValueError( + "Less than two GPU ids were configured and tried to run on on multiple GPUs. " + "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`." + ) + if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE: + # Update args with the defaults + for name, attr in defaults.__dict__.items(): + if isinstance(attr, dict): + # Copy defaults.somedict.somearg to args.somearg and + # defaults.fsdp_config.x to args.fsdp_x + for key, value in attr.items(): + if name == "fsdp_config" and not key.startswith("fsdp"): + key = "fsdp_" + key + elif name == "fp8_config" and not key.startswith("fp8"): + key = "fp8_" + key + if hasattr(args, "nondefault") and key not in args.nondefault: + setattr(args, key, value) + elif ( + name not in ["compute_environment", "mixed_precision", "distributed_type"] + and getattr(args, name, None) is None + ): + # Those args are handled separately + setattr(args, name, attr) + if not args.debug: + args.debug = defaults.debug + + if not args.mixed_precision: + if defaults.mixed_precision is None: + args.mixed_precision = "no" + else: + args.mixed_precision = defaults.mixed_precision + mp_from_config_flag = True + else: + native_amp = is_bf16_available(True) + if ( + args.mixed_precision == "bf16" + and not native_amp + and not (args.tpu and is_torch_xla_available(check_is_tpu=True)) + ): + raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") + + # Silently set the default here + if args.dynamo_backend is None: + args.dynamo_backend = "no" + if args.num_processes == -1: + raise ValueError("You need to manually pass in `--num_processes` using this config yaml.") + else: + if args.num_processes is None: + if is_xpu_available(): + args.num_processes = torch.xpu.device_count() + elif is_mlu_available(): + args.num_processes = torch.mlu.device_count() + elif is_sdaa_available(): + args.num_processes = torch.sdaa.device_count() + elif is_musa_available(): + args.num_processes = torch.musa.device_count() + elif is_npu_available(): + args.num_processes = torch.npu.device_count() + elif is_hpu_available(): + args.num_processes = torch.hpu.device_count() + else: + args.num_processes = torch.cuda.device_count() + warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`") + if args.debug is None: + args.debug = False + if ( + not args.multi_gpu + and args.num_processes > 1 + and ( + (is_xpu_available() and torch.xpu.device_count() > 1) + or (is_npu_available() and torch.npu.device_count() > 1) + or (is_hpu_available() and torch.hpu.device_count() > 1) + or (is_mlu_available() and torch.mlu.device_count() > 1) + or (is_sdaa_available() and torch.sdaa.device_count() > 1) + or (is_musa_available() and torch.musa.device_count() > 1) + or (torch.cuda.is_available() and torch.cuda.device_count() > 1) + ) + ): + warned.append( + "\t\tMore than one GPU was found, enabling multi-GPU training.\n" + "\t\tIf this was unintended please pass in `--num_processes=1`." + ) + args.multi_gpu = True + if args.num_machines is None: + warned.append("\t`--num_machines` was set to a value of `1`") + args.num_machines = 1 + if args.mixed_precision is None: + warned.append("\t`--mixed_precision` was set to a value of `'no'`") + args.mixed_precision = "no" + if not hasattr(args, "use_cpu"): + args.use_cpu = args.cpu + if args.dynamo_backend is None: + warned.append("\t`--dynamo_backend` was set to a value of `'no'`") + args.dynamo_backend = "no" + if args.debug: + logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.") + + is_aws_env_disabled = defaults is None or ( + defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER + ) + if is_aws_env_disabled and args.num_cpu_threads_per_process is None: + args.num_cpu_threads_per_process = get_int_from_env(["OMP_NUM_THREADS"], 1) + if args.use_cpu and args.num_processes >= 1 and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0: + local_size = get_int_from_env( + ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], + max(int(args.num_processes / args.num_machines), 1), + ) + threads_per_process = int(psutil.cpu_count(logical=False) / local_size) + if threads_per_process > 1: + args.num_cpu_threads_per_process = threads_per_process + warned.append( + f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs" + ) + + if args.use_xpu is not None: + logger.warning( + "use_xpu is deprecated and ignored, will be removed in Accelerate v1.20. " + "XPU is a PyTorch native citizen now, we don't need extra argument to enable it any more." + ) + + if any(warned): + message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n" + message += "\n".join(warned) + message += ( + "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`." + ) + logger.warning(message) + return args, defaults, mp_from_config_flag + + +def launch_command(args): + args, defaults, mp_from_config_flag = _validate_launch_command(args) + # Use the proper launcher + if args.use_deepspeed and not args.cpu: + args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else [] + if mp_from_config_flag: + args.deepspeed_fields_from_accelerate_config.append("mixed_precision") + args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config) + deepspeed_launcher(args) + elif args.use_fsdp and not args.cpu: + multi_gpu_launcher(args) + elif args.use_megatron_lm and not args.cpu: + multi_gpu_launcher(args) + elif args.multi_gpu and not args.cpu: + multi_gpu_launcher(args) + elif args.tpu and not args.cpu: + if args.tpu_use_cluster: + tpu_pod_launcher(args) + else: + tpu_launcher(args) + elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + sagemaker_launcher(defaults, args) + else: + simple_launcher(args) + + +def main(): + parser = launch_command_parser() + args = parser.parse_args() + launch_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c851cc0b192ab8207d3fa68d7409868c84354c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .selection_menu import BulletMenu diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa52a0c9eb05604d1b43b32ccdbd9b0d8090db49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08415ff55d005ed6065f00905b9fe56960cf295b Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d25527cbb940c3bb12c542b3fa05b0de798aea2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9e3d8d67969363989054c5843946f3d641f9a0a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae0d3b12edab54cfb200cb2140115fc727bb3ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5244989e14a6405e7a07a906f688c083e26c482c Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f0bb7b68025ae4fe0c2c76c095eb36b4e64f2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py @@ -0,0 +1,65 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet +""" + +import os +import sys +from contextlib import contextmanager + + +# Windows only +if os.name == "nt": + import ctypes + import msvcrt # noqa + + class CursorInfo(ctypes.Structure): + # _fields is a specific attr expected by ctypes + _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] + + +def hide_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = False + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25l") + sys.stdout.flush() + + +def show_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = True + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25h") + sys.stdout.flush() + + +@contextmanager +def hide(): + "Context manager to hide the terminal cursor" + try: + hide_cursor() + yield + finally: + show_cursor() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..de46f37ddcf4591167e3e01791391e4b1729034f --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py @@ -0,0 +1,59 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A variety of helper functions and constants when dealing with terminal menu choices, based on +https://github.com/bchao1/bullet +""" + +import enum +import shutil +import sys + + +TERMINAL_WIDTH, _ = shutil.get_terminal_size() + +CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} + + +class Direction(enum.Enum): + UP = 0 + DOWN = 1 + + +def forceWrite(content, end=""): + sys.stdout.write(str(content) + end) + sys.stdout.flush() + + +def writeColor(content, color, end=""): + forceWrite(f"\u001b[{color}m{content}\u001b[0m", end) + + +def reset_cursor(): + forceWrite("\r") + + +def move_cursor(num_lines: int, direction: str): + forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}") + + +def clear_line(): + forceWrite(" " * TERMINAL_WIDTH) + reset_cursor() + + +def linebreak(): + reset_cursor() + forceWrite("-" * TERMINAL_WIDTH) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py new file mode 100644 index 0000000000000000000000000000000000000000..f1270eaece9d4243e7282dcb31166feeeb9bdfc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py @@ -0,0 +1,84 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file contains utilities for handling input from the user and registering specific keys to specific functions, +based on https://github.com/bchao1/bullet +""" + +from .keymap import KEYMAP, get_character + + +def mark(key: str): + """ + Mark the function with the key code so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += [key] + func.handle_key = handle + return func + + return decorator + + +def mark_multiple(*keys: list[str]): + """ + Mark the function with the key codes so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += keys + func.handle_key = handle + return func + + return decorator + + +class KeyHandler(type): + """ + Metaclass that adds the key handlers to the class + """ + + def __new__(cls, name, bases, attrs): + new_cls = super().__new__(cls, name, bases, attrs) + if not hasattr(new_cls, "key_handler"): + new_cls.key_handler = {} + new_cls.handle_input = KeyHandler.handle_input + + for value in attrs.values(): + handled_keys = getattr(value, "handle_key", []) + for key in handled_keys: + new_cls.key_handler[key] = value + return new_cls + + @staticmethod + def handle_input(cls): + "Finds and returns the selected character if it exists in the handler" + char = get_character() + if char != KEYMAP["undefined"]: + char = ord(char) + handler = cls.key_handler.get(char) + if handler: + cls.current_selection = char + return handler(cls) + else: + return None + + +def register(cls): + """Adds KeyHandler metaclass to the class""" + return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy()) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py new file mode 100644 index 0000000000000000000000000000000000000000..787db12860fe21c6786dda69c34fcccab114f2f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py @@ -0,0 +1,133 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet +""" + +import os +import string +import sys + + +ARROW_KEY_FLAG = 1 << 8 + +KEYMAP = { + "tab": ord("\t"), + "newline": ord("\r"), + "esc": 27, + "up": 65 + ARROW_KEY_FLAG, + "down": 66 + ARROW_KEY_FLAG, + "right": 67 + ARROW_KEY_FLAG, + "left": 68 + ARROW_KEY_FLAG, + "mod_int": 91, + "undefined": sys.maxsize, + "interrupt": 3, + "insert": 50, + "delete": 51, + "pg_up": 53, + "pg_down": 54, +} + +KEYMAP["arrow_begin"] = KEYMAP["up"] +KEYMAP["arrow_end"] = KEYMAP["left"] + +if sys.platform == "win32": + WIN_CH_BUFFER = [] + WIN_KEYMAP = { + b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, + b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, + } + +for i in range(10): + KEYMAP[str(i)] = ord(str(i)) + + +def get_raw_chars(): + "Gets raw characters from inputs" + if os.name == "nt": + import msvcrt + + encoding = "mbcs" + # Flush the keyboard buffer + while msvcrt.kbhit(): + msvcrt.getch() + if len(WIN_CH_BUFFER) == 0: + # Read the keystroke + ch = msvcrt.getch() + + # If it is a prefix char, get second part + if ch in (b"\x00", b"\xe0"): + ch2 = ch + msvcrt.getch() + # Translate actual Win chars to bullet char types + try: + chx = chr(WIN_KEYMAP[ch2]) + WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"])) + WIN_CH_BUFFER.append(chx) + if ord(chx) in ( + KEYMAP["insert"] - 1 << 9, + KEYMAP["delete"] - 1 << 9, + KEYMAP["pg_up"] - 1 << 9, + KEYMAP["pg_down"] - 1 << 9, + ): + WIN_CH_BUFFER.append(chr(126)) + ch = chr(KEYMAP["esc"]) + except KeyError: + ch = ch2[1] + else: + ch = ch.decode(encoding) + else: + ch = WIN_CH_BUFFER.pop(0) + elif os.name == "posix": + import termios + import tty + + fd = sys.stdin.fileno() + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(fd) + ch = sys.stdin.read(1) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + return ch + + +def get_character(): + "Gets a character from the keyboard and returns the key code" + char = get_raw_chars() + if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]: + return char + + elif ord(char) == KEYMAP["esc"]: + combo = get_raw_chars() + if ord(combo) == KEYMAP["mod_int"]: + key = get_raw_chars() + if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: + return chr(ord(key) + ARROW_KEY_FLAG) + else: + return KEYMAP["undefined"] + else: + return get_raw_chars() + + else: + if char in string.printable: + return char + else: + return KEYMAP["undefined"] diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py new file mode 100644 index 0000000000000000000000000000000000000000..ee9a771a54ef666ee46b67ae6c75fb957d49efdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py @@ -0,0 +1,144 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Main driver for the selection menu, based on https://github.com/bchao1/bullet +""" + +import builtins +import sys + +from ...utils.imports import _is_package_available +from . import cursor, input +from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor +from .keymap import KEYMAP + + +in_colab = False +try: + in_colab = _is_package_available("google.colab") +except ModuleNotFoundError: + pass + + +@input.register +class BulletMenu: + """ + A CLI menu to select a choice from a list of choices using the keyboard. + """ + + def __init__(self, prompt: str = None, choices: list = []): + self.position = 0 + self.choices = choices + self.prompt = prompt + if sys.platform == "win32": + self.arrow_char = "*" + else: + self.arrow_char = "➔ " + + def write_choice(self, index, end: str = ""): + if sys.platform != "win32": + writeColor(self.choices[index], 32, end) + else: + forceWrite(self.choices[index], end) + + def print_choice(self, index: int): + "Prints the choice at the given index" + if index == self.position: + forceWrite(f" {self.arrow_char} ") + self.write_choice(index) + else: + forceWrite(f" {self.choices[index]}") + reset_cursor() + + def move_direction(self, direction: Direction, num_spaces: int = 1): + "Should not be directly called, used to move a direction of either up or down" + old_position = self.position + if direction == Direction.DOWN: + if self.position + 1 >= len(self.choices): + return + self.position += num_spaces + else: + if self.position - 1 < 0: + return + self.position -= num_spaces + clear_line() + self.print_choice(old_position) + move_cursor(num_spaces, direction.name) + self.print_choice(self.position) + + @input.mark(KEYMAP["up"]) + def move_up(self): + self.move_direction(Direction.UP) + + @input.mark(KEYMAP["down"]) + def move_down(self): + self.move_direction(Direction.DOWN) + + @input.mark(KEYMAP["newline"]) + def select(self): + move_cursor(len(self.choices) - self.position, "DOWN") + return self.position + + @input.mark(KEYMAP["interrupt"]) + def interrupt(self): + move_cursor(len(self.choices) - self.position, "DOWN") + raise KeyboardInterrupt + + @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)]) + def select_row(self): + index = int(chr(self.current_selection)) + movement = index - self.position + if index == self.position: + return + if index < len(self.choices): + if self.position > index: + self.move_direction(Direction.UP, -movement) + elif self.position < index: + self.move_direction(Direction.DOWN, movement) + else: + return + else: + return + + def run(self, default_choice: int = 0): + "Start the menu and return the selected choice" + if self.prompt: + linebreak() + forceWrite(self.prompt, "\n") + if in_colab: + forceWrite("Please input a choice index (starting from 0), and press enter", "\n") + else: + forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") + self.position = default_choice + for i in range(len(self.choices)): + self.print_choice(i) + forceWrite("\n") + move_cursor(len(self.choices) - self.position, "UP") + with cursor.hide(): + while True: + if in_colab: + try: + choice = int(builtins.input()) + except ValueError: + choice = default_choice + else: + choice = self.handle_input() + if choice is not None: + reset_cursor() + for _ in range(len(self.choices) + 1): + move_cursor(1, "UP") + clear_line() + self.write_choice(choice, "\n") + return choice diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/merge.py b/venv/lib/python3.10/site-packages/accelerate/commands/merge.py new file mode 100644 index 0000000000000000000000000000000000000000..475b53b5bbb71b959057126f8667d7f61eb9d0e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/merge.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from accelerate.commands.utils import CustomArgumentParser +from accelerate.utils import merge_fsdp_weights + + +description = """Utility to merge the weights from multiple FSDP checkpoints into a single combined checkpoint. Should be used if +`SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}`. + +This is a CPU-bound process and requires enough RAM to load the entire model state dict.""" + + +def merge_command(args): + merge_fsdp_weights( + args.checkpoint_directory, args.output_path, not args.unsafe_serialization, args.remove_checkpoint_dir + ) + + +def merge_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("merge-weights", description=description) + else: + parser = CustomArgumentParser(description=description) + + parser.add_argument("checkpoint_directory", type=str, help="A directory containing sharded weights saved by FSDP.") + parser.add_argument( + "output_path", + type=str, + help="The path to save the merged weights. Defaults to the current directory. ", + ) + parser.add_argument( + "--unsafe_serialization", + action="store_true", + default=False, + help="Whether to save the merged weights as `.bin` rather than `.safetensors` (not recommended).", + ) + parser.add_argument( + "--remove_checkpoint_dir", + action="store_true", + help="Whether to remove the checkpoint directory after merging.", + default=False, + ) + + if subparsers is not None: + parser.set_defaults(func=merge_command) + return parser + + +def main(): + parser = merge_command_parser() + args = parser.parse_args() + merge_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/test.py b/venv/lib/python3.10/site-packages/accelerate/commands/test.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d2f7bcf14727aa13e3438f4cd6e6f140f5bb2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/test.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package + + +def test_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("test") + else: + parser = argparse.ArgumentParser("Accelerate test command") + + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + if subparsers is not None: + parser.set_defaults(func=test_command) + return parser + + +def test_command(args): + script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py") + + if args.config_file is None: + test_args = [script_name] + else: + test_args = f"--config_file={args.config_file} {script_name}".split() + + cmd = ["accelerate-launch"] + test_args + result = execute_subprocess_async(cmd) + if result.returncode == 0: + print("Test is a success! You are ready for your distributed training!") + + +def main(): + parser = test_command_parser() + args = parser.parse_args() + test_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/to_fsdp2.py b/venv/lib/python3.10/site-packages/accelerate/commands/to_fsdp2.py new file mode 100644 index 0000000000000000000000000000000000000000..443407cd983dcb31711b75c2a6337f9a7af24584 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/to_fsdp2.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import logging +from pathlib import Path + +import yaml + +from accelerate.commands.utils import CustomArgumentParser + + +class ConversionStatus(enum.Enum): + NOT_YET_IMPLEMENTED = 0 + REMOVED = -1 + + +ARGUMENT_KEY_MAPPING = { + # New keys in FSDP2 + "fsdp_version": "fsdp_version", + "fsdp_reshard_after_forward": "fsdp_reshard_after_forward", + # https://github.com/pytorch/torchtitan/blob/main/docs/fsdp.md + # https://huggingface.co/docs/accelerate/en/usage_guides/fsdp + "fsdp_auto_wrap_policy": "fsdp_auto_wrap_policy", + "fsdp_backward_prefetch": ConversionStatus.REMOVED, + "fsdp_forward_prefetch": ConversionStatus.NOT_YET_IMPLEMENTED, + "fsdp_cpu_ram_efficient_loading": "fsdp_cpu_ram_efficient_loading", + "fsdp_offload_params": "fsdp_offload_params", + "fsdp_sharding_strategy": "fsdp_reshard_after_forward", + "fsdp_state_dict_type": "fsdp_state_dict_type", + "fsdp_sync_module_states": ConversionStatus.REMOVED, + "fsdp_transformer_layer_cls_to_wrap": "fsdp_transformer_layer_cls_to_wrap", + "fsdp_min_num_params": "fsdp_min_num_params", + "fsdp_use_orig_params": ConversionStatus.REMOVED, + "fsdp_activation_checkpointing": "fsdp_activation_checkpointing", +} + +ARGUMENT_VALUE_MAPPING = { + "fsdp_sharding_strategy": { + "FULL_SHARD": True, + "SHARD_GRAD_OP": False, + "HYBRID_SHARD": True, + "HYBRID_SHARD_ZERO2": False, + "NO_SHARD": False, + }, + "fsdp_reshard_after_forward": { # Needed to convert newly created configs using FSDP1 to FSDP2 + "FULL_SHARD": True, + "SHARD_GRAD_OP": False, + "HYBRID_SHARD": True, + "HYBRID_SHARD_ZERO2": False, + "NO_SHARD": False, + }, +} + +logger = logging.getLogger(__name__) + + +def _validate_to_fsdp2_args(args): + if not Path(args.config_file).exists(): + raise FileNotFoundError(f"Config file {args.config_file} not found") + + if not args.overwrite and args.output_file is None: + raise ValueError("If --overwrite is not set, --output_file must be provided") + + if not args.overwrite and Path(args.output_file).exists(): + raise FileExistsError(f"Output file {args.output_file} already exists and --overwrite is not set") + + +def convert_config_to_fsdp2(config: dict) -> dict: + fsdp_config = config.get("fsdp_config", {}) + + if not fsdp_config: + logger.info("No FSDP config found in the config file, skipping conversion...") + return config + + new_fsdp_config = {} + + if fsdp_config.get("fsdp_version", 1) == 2: + logger.warning("Config already specfies FSDP2, skipping conversion...") + logger.warning( + "If the config doesn't use new argument names, change `fsdp_version` to `1` and rerun the command." + ) + return config + + for key, value in fsdp_config.items(): + conversion_status = ARGUMENT_KEY_MAPPING.get(key, None) + if isinstance(conversion_status, ConversionStatus) or conversion_status is None: + conversion_status = key + new_fsdp_config[conversion_status] = value + continue + + if conversion_status == ConversionStatus.REMOVED: + logger.warning(f"Argument {key} has been removed in FSDP2, skipping this key...") + continue + + if conversion_status == ConversionStatus.NOT_YET_IMPLEMENTED: + logger.warning(f"Argument {key} is not yet implemented in FSDP2, skipping this key...") + continue + + if conversion_status is None: + logger.warning(f"Argument {key} is not being converted, skipping this key...") + new_fsdp_config[key] = value + else: + if key in ARGUMENT_VALUE_MAPPING: + value = ARGUMENT_VALUE_MAPPING[key].get(value, value) + new_fsdp_config[ARGUMENT_KEY_MAPPING[key]] = value + + new_fsdp_config["fsdp_version"] = 2 + config["fsdp_config"] = new_fsdp_config + return config + + +def to_fsdp2_command_parser(subparsers=None): + description = "Convert an Accelerate config from FSDP1 to FSDP2" + + if subparsers is not None: + parser = subparsers.add_parser("to-fsdp2", description=description) + else: + parser = CustomArgumentParser(description=description) + + parser.add_argument("--config_file", type=str, help="The config file to convert to FSDP2", required=True) + parser.add_argument( + "--overwrite", + action="store_true", + help="Overwrite the config file if it exists", + default=False, + ) + parser.add_argument( + "--output_file", + type=str, + help="The path to the output file to write the converted config to. If not provided, the input file will be overwritten (if --overwrite is set)", + default=None, + ) + if subparsers is not None: + parser.set_defaults(func=to_fsdp2_command) + + return parser + + +def load_config(config_file: str) -> dict: + with open(config_file) as f: + config = yaml.safe_load(f) + if not config: + raise ValueError("Config file is empty") + + return config + + +def to_fsdp2_command(args): + _validate_to_fsdp2_args(args) + config = load_config(args.config_file) + + if args.overwrite and args.output_file is None: + args.output_file = args.config_file + + new_config = convert_config_to_fsdp2(config) + + with open(args.output_file, "w") as f: + yaml.dump(new_config, f) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/tpu.py b/venv/lib/python3.10/site-packages/accelerate/commands/tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0f07bf8697bfdb6484d3bf817f2e18b1313b00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/tpu.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import subprocess + +from packaging.version import Version, parse + +from accelerate.commands.config.config_args import default_config_file, load_config_from_file + + +_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`." + + +def tpu_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("tpu-config", description=_description) + else: + parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description) + # Core arguments + config_args = parser.add_argument_group( + "Config Arguments", "Arguments that can be configured through `accelerate config`." + ) + config_args.add_argument( + "--config_file", + type=str, + default=None, + help="Path to the config file to use for accelerate.", + ) + config_args.add_argument( + "--tpu_name", + default=None, + help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.", + ) + config_args.add_argument( + "--tpu_zone", + default=None, + help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.", + ) + pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.") + pod_args.add_argument( + "--use_alpha", + action="store_true", + help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.", + ) + pod_args.add_argument( + "--command_file", + default=None, + help="The path to the file containing the commands to run on the pod on startup.", + ) + pod_args.add_argument( + "--command", + action="append", + nargs="+", + help="A command to run on the pod. Can be passed multiple times.", + ) + pod_args.add_argument( + "--install_accelerate", + action="store_true", + help="Whether to install accelerate on the pod. Defaults to False.", + ) + pod_args.add_argument( + "--accelerate_version", + default="latest", + help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.", + ) + pod_args.add_argument( + "--debug", action="store_true", help="If set, will print the command that would be run instead of running it." + ) + + if subparsers is not None: + parser.set_defaults(func=tpu_command_launcher) + return parser + + +def tpu_command_launcher(args): + defaults = None + + # Get the default from the config file if it exists. + if args.config_file is not None or os.path.isfile(default_config_file): + defaults = load_config_from_file(args.config_file) + if not args.command_file and defaults.command_file is not None and not args.command: + args.command_file = defaults.command_file + if not args.command and defaults.commands is not None: + args.command = defaults.commands + if not args.tpu_name: + args.tpu_name = defaults.tpu_name + if not args.tpu_zone: + args.tpu_zone = defaults.tpu_zone + if args.accelerate_version == "dev": + args.accelerate_version = "git+https://github.com/huggingface/accelerate.git" + elif args.accelerate_version == "latest": + args.accelerate_version = "accelerate -U" + elif isinstance(parse(args.accelerate_version), Version): + args.accelerate_version = f"accelerate=={args.accelerate_version}" + + if not args.command_file and not args.command: + raise ValueError("You must specify either a command file or a command to run on the pod.") + + if args.command_file: + with open(args.command_file) as f: + args.command = [f.read().splitlines()] + + # To turn list of lists into list of strings + if isinstance(args.command[0], list): + args.command = [line for cmd in args.command for line in cmd] + # Default to the shared folder and install accelerate + new_cmd = ["cd /usr/share"] + if args.install_accelerate: + new_cmd += [f"pip install {args.accelerate_version}"] + new_cmd += args.command + args.command = "; ".join(new_cmd) + + # Then send it to gcloud + # Eventually try to use google-api-core to do this instead of subprocess + cmd = ["gcloud"] + if args.use_alpha: + cmd += ["alpha"] + cmd += [ + "compute", + "tpus", + "tpu-vm", + "ssh", + args.tpu_name, + "--zone", + args.tpu_zone, + "--command", + args.command, + "--worker", + "all", + ] + if args.debug: + print(f"Running {' '.join(cmd)}") + return + subprocess.run(cmd) + print("Successfully setup pod.") + + +def main(): + parser = tpu_command_parser() + args = parser.parse_args() + + tpu_command_launcher(args) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/utils.py b/venv/lib/python3.10/site-packages/accelerate/commands/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..326f37d7f93de2417e4171e5ffe91193fb97225c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/utils.py @@ -0,0 +1,123 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + + +class _StoreAction(argparse.Action): + """ + Custom action that allows for `-` or `_` to be passed in for an argument. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + new_option_strings = [] + for option_string in self.option_strings: + new_option_strings.append(option_string) + if "_" in option_string[2:]: + # Add `-` version to the option string + new_option_strings.append(option_string.replace("_", "-")) + self.option_strings = new_option_strings + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + if not hasattr(namespace, "nondefault"): + namespace.nondefault = set() + namespace.nondefault.add(self.dest) + + +class _StoreConstAction(_StoreAction): + """ + Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`. + """ + + def __init__(self, option_strings, dest, const, default=None, required=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + super().__call__(parser, namespace, self.const, option_string) + + +class _StoreTrueAction(_StoreConstAction): + """ + Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`. + """ + + def __init__( + self, + option_strings, + dest, + default=None, + required=False, + help=None, + ): + super().__init__( + option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help + ) + + +class CustomArgumentGroup(argparse._ArgumentGroup): + """ + Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each + when applicable. + """ + + def _add_action(self, action): + args = vars(action) + if isinstance(action, argparse._StoreTrueAction): + action = _StoreTrueAction( + args["option_strings"], args["dest"], args["default"], args["required"], args["help"] + ) + elif isinstance(action, argparse._StoreConstAction): + action = _StoreConstAction( + args["option_strings"], + args["dest"], + args["const"], + args["default"], + args["required"], + args["help"], + ) + elif isinstance(action, argparse._StoreAction): + action = _StoreAction(**args) + action = super()._add_action(action) + return action + + +class CustomArgumentParser(argparse.ArgumentParser): + """ + Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each + when applicable. + """ + + def add_argument(self, *args, **kwargs): + if "action" in kwargs: + # Translate action -> class + if kwargs["action"] == "store_true": + kwargs["action"] = _StoreTrueAction + else: + kwargs["action"] = _StoreAction + super().add_argument(*args, **kwargs) + + def add_argument_group(self, *args, **kwargs): + group = CustomArgumentGroup(self, *args, **kwargs) + self._action_groups.append(group) + return group diff --git a/venv/lib/python3.10/site-packages/accelerate/data_loader.py b/venv/lib/python3.10/site-packages/accelerate/data_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb538908ddd10406396c8352dfffb051bca81ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/data_loader.py @@ -0,0 +1,1451 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import math +from contextlib import suppress +from typing import Callable, Optional, Union + +import torch +from packaging import version +from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler + +from .logging import get_logger +from .state import DistributedType, GradientState, PartialState, is_torch_xla_available +from .utils import ( + RNGType, + broadcast, + broadcast_object_list, + compare_versions, + concatenate, + find_batch_size, + get_data_structure, + initialize_tensors, + is_datasets_available, + is_torch_version, + is_torchdata_stateful_dataloader_available, + send_to_device, + slice_tensors, + synchronize_rng_states, +) + + +logger = get_logger(__name__) + +# kwargs of the DataLoader in min version 2.0 +_PYTORCH_DATALOADER_KWARGS = { + "batch_size": 1, + "shuffle": False, + "sampler": None, + "batch_sampler": None, + "num_workers": 0, + "collate_fn": None, + "pin_memory": False, + "drop_last": False, + "timeout": 0, + "worker_init_fn": None, + "multiprocessing_context": None, + "generator": None, + "prefetch_factor": 2, + "persistent_workers": False, + "pin_memory_device": "", +} + +# kwargs added after by version +_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {"2.6.0": {"in_order": True}} + +for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items(): + if is_torch_version(">=", v): + _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs) + + +class SeedableRandomSampler(RandomSampler): + """ + Same as a random sampler, except that in `__iter__` a seed can be used. + + Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed + and be fully reproducable on multiple iterations. + + If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on + (stored in `self.epoch`). + """ + + def __init__(self, *args, **kwargs): + data_seed = kwargs.pop("data_seed", None) + super().__init__(*args, **kwargs) + + self.initial_seed = data_seed if data_seed is not None else torch.random.initial_seed() + self.epoch = 0 + + def __iter__(self): + if self.generator is None: + self.generator = torch.Generator( + device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu" + ) + self.generator.manual_seed(self.initial_seed) + + # Allow `self.epoch` to modify the seed of the generator + seed = self.epoch + self.initial_seed + # print("Setting seed at epoch", self.epoch, seed) + self.generator.manual_seed(seed) + yield from super().__iter__() + self.set_epoch(self.epoch + 1) + + def set_epoch(self, epoch: int): + "Sets the current iteration of the sampler." + self.epoch = epoch + + +class BatchSamplerShard(BatchSampler): + """ + Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will + always yield a number of batches that is a round multiple of `num_processes` and that all have the same size. + Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration + at the first batch that would be too small / not present on all processes or loop with indices from the beginning. + + Args: + batch_sampler (`torch.utils.data.sampler.BatchSampler`): + The batch sampler to split in several shards. + num_processes (`int`, *optional*, defaults to 1): + The number of processes running concurrently. + process_index (`int`, *optional*, defaults to 0): + The index of the current process. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the shards should be created by splitting a batch to give a piece of it on each process, or by + yielding different full batches on each process. + + On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in: + + - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if + this argument is set to `False`. + - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]` + then `[6, 7]` if this argument is set to `True`. + even_batches (`bool`, *optional*, defaults to `True`): + Whether or not to loop back at the beginning of the sampler when the number of samples is not a round + multiple of (original batch size / number of processes). + + + + `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` + equal to `False` + + """ + + def __init__( + self, + batch_sampler: BatchSampler, + num_processes: int = 1, + process_index: int = 0, + split_batches: bool = False, + even_batches: bool = True, + ): + if split_batches and batch_sampler.batch_size % num_processes != 0: + raise ValueError( + f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + self.batch_sampler = batch_sampler + self.num_processes = num_processes + self.process_index = process_index + self.split_batches = split_batches + self.even_batches = even_batches + self.batch_size = getattr(batch_sampler, "batch_size", None) + self.drop_last = getattr(batch_sampler, "drop_last", False) + if self.batch_size is None and self.even_batches: + raise ValueError( + "You need to use `even_batches=False` when the batch sampler has no batch size. If you " + "are not calling this method directly, set `accelerator.even_batches=False` instead." + ) + + @property + def total_length(self): + return len(self.batch_sampler) + + def __len__(self): + if self.split_batches: + # Split batches does not change the length of the batch sampler + return len(self.batch_sampler) + if len(self.batch_sampler) % self.num_processes == 0: + # If the length is a round multiple of the number of processes, it's easy. + return len(self.batch_sampler) // self.num_processes + length = len(self.batch_sampler) // self.num_processes + if self.drop_last: + # Same if we drop the remainder. + return length + elif self.even_batches: + # When we even batches we always get +1 + return length + 1 + else: + # Otherwise it depends on the process index. + return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length + + def __iter__(self): + return self._iter_with_split() if self.split_batches else self._iter_with_no_split() + + def _iter_with_split(self): + initial_data = [] + batch_length = self.batch_sampler.batch_size // self.num_processes + for idx, batch in enumerate(self.batch_sampler): + if idx == 0: + initial_data = batch + if len(batch) == self.batch_size: + # If the batch is full, we yield the part of it this process is responsible of. + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + + # If drop_last is True of the last batch was full, iteration is over, otherwise... + if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size: + if not self.even_batches: + if len(batch) > batch_length * self.process_index: + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + else: + # For degenerate cases where the dataset has less than num_process * batch_size samples + while len(initial_data) < self.batch_size: + initial_data += initial_data + batch = batch + initial_data + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + + def _iter_with_no_split(self): + initial_data = [] + batch_to_yield = [] + for idx, batch in enumerate(self.batch_sampler): + # We gather the initial indices in case we need to circle back at the end. + if not self.drop_last and idx < self.num_processes: + initial_data += batch + # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually + # yielding it. + if idx % self.num_processes == self.process_index: + batch_to_yield = batch + if idx % self.num_processes == self.num_processes - 1 and ( + self.batch_size is None or len(batch) == self.batch_size + ): + yield batch_to_yield + batch_to_yield = [] + + # If drop_last is True, iteration is over, otherwise... + if not self.drop_last and len(initial_data) > 0: + if not self.even_batches: + if len(batch_to_yield) > 0: + yield batch_to_yield + else: + # ... we yield the complete batch we had saved before if it has the proper length + if len(batch_to_yield) == self.batch_size: + yield batch_to_yield + + # For degenerate cases where the dataset has less than num_process * batch_size samples + while len(initial_data) < self.num_processes * self.batch_size: + initial_data += initial_data + + # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next + if len(batch) == self.batch_size: + batch = [] + idx += 1 + + # Make sure we yield a multiple of self.num_processes batches + cycle_index = 0 + while idx % self.num_processes != 0 or len(batch) > 0: + end_index = cycle_index + self.batch_size - len(batch) + batch += initial_data[cycle_index:end_index] + if idx % self.num_processes == self.process_index: + yield batch + cycle_index = end_index + batch = [] + idx += 1 + + +class IterableDatasetShard(IterableDataset): + """ + Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will + always yield a number of samples that is a round multiple of the actual batch size (depending of the value of + `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the + `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would + be too small or loop with indices from the beginning. + + Args: + dataset (`torch.utils.data.dataset.IterableDataset`): + The batch sampler to split in several shards. + batch_size (`int`, *optional*, defaults to 1): + The size of the batches per shard (if `split_batches=False`) or the size of the batches (if + `split_batches=True`). + drop_last (`bool`, *optional*, defaults to `False`): + Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the + beginning. + num_processes (`int`, *optional*, defaults to 1): + The number of processes running concurrently. + process_index (`int`, *optional*, defaults to 0): + The index of the current process. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the shards should be created by splitting a batch to give a piece of it on each process, or by + yielding different full batches on each process. + + On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: + + - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this + argument is set to `False`. + - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if + this argument is set to `True`. + """ + + def __init__( + self, + dataset: IterableDataset, + batch_size: int = 1, + drop_last: bool = False, + num_processes: int = 1, + process_index: int = 0, + split_batches: bool = False, + ): + if split_batches and batch_size > 1 and batch_size % num_processes != 0: + raise ValueError( + f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + self.dataset = dataset + self.batch_size = batch_size + self.drop_last = drop_last + self.num_processes = num_processes + self.process_index = process_index + self.split_batches = split_batches + + def set_epoch(self, epoch): + self.epoch = epoch + if hasattr(self.dataset, "set_epoch"): + self.dataset.set_epoch(epoch) + + def __len__(self): + # We will just raise the downstream error if the underlying dataset is not sized + if self.drop_last: + return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size + else: + return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size + + def __iter__(self): + if ( + not hasattr(self.dataset, "set_epoch") + and hasattr(self.dataset, "generator") + and isinstance(self.dataset.generator, torch.Generator) + ): + self.dataset.generator.manual_seed(self.epoch) + real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) + process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size + process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) + + first_batch = None + current_batch = [] + for element in self.dataset: + current_batch.append(element) + # Wait to have a full batch before yielding elements. + if len(current_batch) == real_batch_size: + for i in process_slice: + yield current_batch[i] + if first_batch is None: + first_batch = current_batch.copy() + current_batch = [] + + # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. + if not self.drop_last and len(current_batch) > 0: + if first_batch is None: + first_batch = current_batch.copy() + while len(current_batch) < real_batch_size: + current_batch += first_batch + for i in process_slice: + yield current_batch[i] + + +class DataLoaderStateMixin: + """ + Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the + end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other + useful information that might be needed. + + **Available attributes:** + + - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch + - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total + batch size + + + + Inheriters of this class should ensure that the class creates a `GradientState()` instance, stored in + `self.gradient_state`. + + + + """ + + def __init_subclass__(cls, **kwargs): + cls.end_of_dataloader = False + cls.remainder = -1 + + def reset(self): + self.end_of_dataloader = False + self.remainder = -1 + + def begin(self): + "Prepares the gradient state for the current dataloader" + self.reset() + with suppress(Exception): + if not self._drop_last: + length = getattr(self.dataset, "total_dataset_length", len(self.dataset)) + self.remainder = length % self.total_batch_size + self.gradient_state._add_dataloader(self) + + def end(self): + "Cleans up the gradient state after exiting the dataloader" + self.gradient_state._remove_dataloader(self) + + +class DataLoaderAdapter: + """ + A class which wraps around a PyTorch `DataLoader` (or variants of it) to be used with the `Accelerator`. For + compatability reasons, this class inherits from the class it wraps around, so it can be used as a drop-in. + """ + + def __init__(self, dataset, use_stateful_dataloader=False, batch_sampler=None, **kwargs): + self.use_stateful_dataloader = use_stateful_dataloader + if is_torchdata_stateful_dataloader_available(): + from torchdata.stateful_dataloader import StatefulDataLoader + + if use_stateful_dataloader and not is_torchdata_stateful_dataloader_available(): + raise ImportError( + "StatefulDataLoader is not available. Please install torchdata version 0.8.0 or higher to use it." + ) + if use_stateful_dataloader: + torchdata_version = version.parse(importlib.metadata.version("torchdata")) + if ( + "in_order" in kwargs + and compare_versions(torchdata_version, "<", "0.11") + and is_torch_version(">=", "2.6.0") + ): + kwargs.pop("in_order") + self.base_dataloader = StatefulDataLoader(dataset, batch_sampler=batch_sampler, **kwargs) + else: + self.base_dataloader = DataLoader(dataset, batch_sampler=batch_sampler, **kwargs) + + if hasattr(self.base_dataloader, "state_dict"): + self.dl_state_dict = self.base_dataloader.state_dict() + + def __getattr__(self, name): + # Avoid infinite recursion if we try to access a nonexistent base_dataloader attribute. + if name == "base_dataloader": + raise AttributeError() + # Delegate attribute access to the internal dataloader + return getattr(self.base_dataloader, name) + + def state_dict(self): + return self.dl_state_dict + + def load_state_dict(self, state_dict): + self.base_dataloader.load_state_dict(state_dict) + + @property + def __class__(self): + """ + In order to maintain backwards compatability with other code, we need to ensure `isinstance(obj, DataLoader)` + returs true. This is because some downstream code assumes that the `DataLoader` is the base class of the + object. + """ + return self.base_dataloader.__class__ + + def __len__(self): + return len(self.base_dataloader) + + def adjust_state_dict_for_prefetch(self): + """ + Adjusts the state dict for prefetching. Natively, this will adjust all of the iters yielded keys in + `self.dl_state_dict` by a factor of `num_processes - 1`, however if a custom correction is needed, this can be + overridden. + + This should modify `self.dl_state_dict` directly + """ + # The state dict will be off by a factor of `n-1` batch too many during DDP, + # so we need to adjust it here + if PartialState().distributed_type != DistributedType.NO: + factor = PartialState().num_processes - 1 + if self.dl_state_dict["_sampler_iter_yielded"] > 0: + self.dl_state_dict["_sampler_iter_yielded"] -= factor + if self.dl_state_dict["_num_yielded"] > 0: + self.dl_state_dict["_num_yielded"] -= factor + if self.dl_state_dict["_index_sampler_state"] is not None: + if ( + "samples_yielded" in self.dl_state_dict["_index_sampler_state"] + and self.dl_state_dict["_index_sampler_state"]["samples_yielded"] > 0 + ): + self.dl_state_dict["_index_sampler_state"]["samples_yielded"] -= self.batch_size * factor + + def _update_state_dict(self): + # The state_dict of the underlying base_dataloader may be ahead of what is currently being yielded. + # E.g. the implementation of DataLoaderShard involves having an underlying iterator 1 element ahead of + # what it wants to yield. + # + # _update_state_dict is called to snapshot the state_dict that would properly recover the DataLoaderAdapter. + if hasattr(self.base_dataloader, "state_dict"): + self.dl_state_dict = self.base_dataloader.state_dict() + # Potentially modify the state_dict to adjust for prefetching + self.adjust_state_dict_for_prefetch() + # Then tag if we are at the end of the dataloader + self.dl_state_dict["_iterator_finished"] = self.end_of_dataloader + + +class DataLoaderShard(DataLoaderAdapter, DataLoaderStateMixin): + """ + Subclass of `DataLoaderAdapter` that will deal with device placement and current distributed setup. + + Args: + dataset (`torch.utils.data.dataset.Dataset`): + The dataset to use to build this dataloader. + device (`torch.device`, *optional*): + If passed, the device to put all batches on. + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration. Should be one or + several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: an optional `torch.Generator` + synchronized_generator (`torch.Generator`, *optional*): + A random number generator to keep synchronized across processes. + skip_batches (`int`, *optional*, defaults to 0): + The number of batches to skip at the beginning. + use_stateful_dataloader (`bool`, *optional*, defaults to `False`): + Whether to have this class adapt `StatefulDataLoader` from `torchdata` instead of the regular `DataLoader`. + **kwargs (additional keyword arguments, *optional*): + All other keyword arguments to pass to the regular `DataLoader` initialization. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__( + self, + dataset, + device=None, + rng_types=None, + synchronized_generator=None, + skip_batches=0, + use_stateful_dataloader=False, + _drop_last: bool = False, + _non_blocking: bool = False, + torch_device_mesh=None, + **kwargs, + ): + super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) + self.device = device + self.rng_types = rng_types + self.synchronized_generator = synchronized_generator + self.skip_batches = skip_batches + self.gradient_state = GradientState() + self._drop_last = _drop_last + self._non_blocking = _non_blocking + self.iteration = 0 + + def __iter__(self): + if self.rng_types is not None: + synchronize_rng_states(self.rng_types, self.synchronized_generator) + self.begin() + + self.set_epoch(self.iteration) + dataloader_iter = self.base_dataloader.__iter__() + # We iterate one batch ahead to check when we are at the end + try: + current_batch = next(dataloader_iter) + except StopIteration: + self.end() + return + + batch_index = 0 + while True: + try: + # But we still move it to the device so it is done before `StopIteration` is reached + if self.device is not None: + current_batch = send_to_device(current_batch, self.device, non_blocking=self._non_blocking) + self._update_state_dict() + next_batch = next(dataloader_iter) + if batch_index >= self.skip_batches: + yield current_batch + batch_index += 1 + current_batch = next_batch + except StopIteration: + self.end_of_dataloader = True + self._update_state_dict() + if batch_index >= self.skip_batches: + yield current_batch + break + + self.iteration += 1 + self.end() + + def __reduce__(self): + """ + Define the `__reduce__` method to ensure a `DataLoaderShard` can be pickled and unpickled. This needs to be + explicitly defined since default pickling behavior is broken by `DataLoaderAdapter` messing with its + `__class__` member. + """ + args = super().__reduce__() + return (DataLoaderShard, *args[1:]) + + def set_epoch(self, epoch: int): + # In case it is manually passed in, the user can set it to what they like + if self.iteration != epoch: + self.iteration = epoch + if hasattr(self.batch_sampler, "set_epoch"): + self.batch_sampler.set_epoch(epoch) + if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(epoch) + if ( + hasattr(self.batch_sampler, "batch_sampler") + and hasattr(self.batch_sampler.batch_sampler, "sampler") + and hasattr(self.batch_sampler.batch_sampler.sampler, "set_epoch") + ): + self.batch_sampler.batch_sampler.sampler.set_epoch(epoch) + # We support if a custom `Dataset` implementation has `set_epoch` + # or in general HF datasets `Datasets` + elif hasattr(self.dataset, "set_epoch"): + self.dataset.set_epoch(epoch) + + @property + def total_batch_size(self): + batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler + return ( + batch_sampler.batch_size + if getattr(batch_sampler, "split_batches", False) + else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1)) + ) + + @property + def total_dataset_length(self): + if hasattr(self.dataset, "total_length"): + return self.dataset.total_length + else: + return len(self.dataset) + + def get_sampler(self): + return get_sampler(self) + + def set_sampler(self, sampler): + sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) + if sampler_is_batch_sampler: + self.sampler.sampler = sampler + else: + self.batch_sampler.sampler = sampler + if hasattr(self.batch_sampler, "batch_sampler"): + self.batch_sampler.batch_sampler.sampler = sampler + + +if is_torch_xla_available(): + import torch_xla.distributed.parallel_loader as xpl + + class MpDeviceLoaderWrapper(xpl.MpDeviceLoader): + """ + Wrapper for the xpl.MpDeviceLoader class that knows the total batch size. + + XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to + prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main + thread only. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__(self, dataloader: DataLoaderShard, device: torch.device): + super().__init__(dataloader, device) + self._rng_types = self._loader.rng_types + self._loader.rng_types = None + self.device = device + + def __iter__(self): + if self._rng_types is not None: + synchronize_rng_states(self._rng_types, self._loader.synchronized_generator) + + return super().__iter__() + + def set_epoch(self, epoch: int): + if hasattr(self.dataloader, "set_epoch"): + self.dataloader.set_epoch(epoch) + + @property + def total_batch_size(self): + return self._loader.total_batch_size + + @property + def total_dataset_length(self): + return self._loader.total_dataset_length + + @property + def batch_sampler(self): + return self._loader.batch_sampler + + @property + def dataloader(self): + return self._loader + + +class DataLoaderDispatcher(DataLoaderAdapter, DataLoaderStateMixin): + """ + Subclass of `DataLoaderAdapter` that will iterate and preprocess on process 0 only, then dispatch on each process + their part of the batch. + + Args: + split_batches (`bool`, *optional*, defaults to `False`): + Whether the resulting `DataLoader` should split the batches of the original data loader across devices or + yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of + `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be + the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial + `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch + size of the `dataloader` is a round multiple of `batch_size`. + skip_batches (`int`, *optional*, defaults to 0): + The number of batches to skip at the beginning of an iteration. + use_stateful_dataloader (`bool`, *optional*, defaults to `False`): + Whether to have this class adapt `StatefulDataLoader` from `torchdata` instead of the regular `DataLoader`. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__( + self, + dataset, + split_batches: bool = False, + skip_batches=0, + use_stateful_dataloader=False, + _drop_last: bool = False, + _non_blocking: bool = False, + slice_fn=None, + torch_device_mesh=None, + **kwargs, + ): + shuffle = False + from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe + + # We need to save the shuffling state of the DataPipe + if isinstance(dataset, ShufflerIterDataPipe): + shuffle = dataset._shuffle_enabled + super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) + self.split_batches = split_batches + if shuffle: + torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) + + self.gradient_state = GradientState() + self.state = PartialState() + self._drop_last = _drop_last + self._non_blocking = _non_blocking + self.skip_batches = skip_batches + self.torch_device_mesh = torch_device_mesh + + self.slice_fn = slice_tensors if slice_fn is None else slice_fn + self.iteration = 0 + + # if a device mesh is provided extract each dimension (dp, fsdp, tp) + # device mesh may hold any number of dimensions, however, + # below code is for targetted support for dp, fsdp and tp + + # device mesh will be used only if there is tp involved + # or any multi-dimensional parallelism involving tp + # (dp, tp) (fsdp, tp) (dp, fsdp, tp) + # otherwise the default behavour not using device mesh should be sufficient + # since multi dimensional parallelism devoid of tp would anyway need + # different batches for each process irrespective of dp or fsdp + self.submesh_tp = None + self.submesh_dp = None + self.submesh_fsdp = None + if self.torch_device_mesh and "tp" in self.torch_device_mesh.mesh_dim_names: + self.submesh_tp = self.torch_device_mesh["tp"] + if "dp" in self.torch_device_mesh.mesh_dim_names: + self.submesh_dp = self.torch_device_mesh["dp"] + if "fsdp" in self.torch_device_mesh.mesh_dim_names: + self.submesh_fsdp = self.torch_device_mesh["fsdp"] + if self.submesh_tp and (self.submesh_dp or self.submesh_fsdp): + raise ValueError("TP + (DP/FSDP) is not yet supported in dispatch mode") + + def _fetch_batches(self, iterator): + batches, batch = None, None + # On process 0, we gather the batch to dispatch. + if self.state.process_index == 0: + # Procedure to support TP only is simpler + # since we want to dispatch the same batch of samples across all ranks + # this removes complexity of handling multiple tp rank groups when TP + DP + # combination is involved. + + try: + # for TP case avoid using split_batches + # since it would mean that the dataloader should be spilling out + # duplicates of batches. + if self.split_batches: + # One batch of the main iterator is dispatched and split. + if self.submesh_tp: + logger.warning( + "Use of split_batches for TP would need the dataloader to produce duplicate batches," + "otherwise, use dispatch_batches=True instead." + ) + self._update_state_dict() + batch = next(iterator) + else: + # num_processes batches of the main iterator are concatenated then dispatched and split. + # We add the batches one by one so we have the remainder available when drop_last=False. + batches = [] + if self.submesh_tp: + # when tp, extract single batch and then replicate + self._update_state_dict() + batch = next(iterator) + batches = [batch] * self.state.num_processes + else: + for _ in range(self.state.num_processes): + self._update_state_dict() + batches.append(next(iterator)) + try: + batch = concatenate(batches, dim=0) + except RuntimeError as e: + raise RuntimeError( + "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`." + "either pass `dispatch_batches=False` and have each process fetch its own batch " + " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and " + "slice it into `num_processes` batches for each process." + ) from e + # In both cases, we need to get the structure of the batch that we will broadcast on other + # processes to initialize the tensors with the right shape. + # data_structure, stop_iteration + batch_info = [get_data_structure(batch), False] + except StopIteration: + batch_info = [None, True] + else: + batch_info = [None, self._stop_iteration] + # This is inplace, so after this instruction, every process has the same `batch_info` as process 0. + broadcast_object_list(batch_info) + self._stop_iteration = batch_info[1] + if self._stop_iteration: + # If drop_last is False and split_batches is False, we may have a remainder to take care of. + if not self.split_batches and not self._drop_last: + if self.state.process_index == 0 and len(batches) > 0: + batch = concatenate(batches, dim=0) + batch_info = [get_data_structure(batch), False] + else: + batch_info = [None, True] + broadcast_object_list(batch_info) + return batch, batch_info + + def __iter__(self): + self.begin() + self.set_epoch(self.iteration) + main_iterator = None + if is_torch_version(">=", "2.0.1"): + # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts + # shared seed to all dist processes. Thus, we need to create iterator for all dist processes. + # But, we only iterate through the DataLoader on process 0. + main_iterator = self.base_dataloader.__iter__() + elif self.state.process_index == 0: + main_iterator = self.base_dataloader.__iter__() + stop_iteration = False + self._stop_iteration = False + first_batch = None + next_batch, next_batch_info = self._fetch_batches(main_iterator) + batch_index = 0 + while not stop_iteration: + batch, batch_info = next_batch, next_batch_info + + if self.state.process_index != 0: + # Initialize tensors on other processes than process 0. + batch = initialize_tensors(batch_info[0]) + batch = send_to_device(batch, self.state.device, non_blocking=self._non_blocking) + # Broadcast the batch before splitting it. + batch = broadcast(batch, from_process=0) + + if not self._drop_last and first_batch is None: + # We keep at least num processes elements of the first batch to be able to complete the last batch + first_batch = self.slice_fn( + batch, + slice(0, self.state.num_processes), + process_index=self.state.process_index, + num_processes=self.state.num_processes, + ) + + if batch is None: + raise ValueError( + f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration." + ) + + observed_batch_size = find_batch_size(batch) + batch_size = observed_batch_size // self.state.num_processes + + stop_iteration = self._stop_iteration + if not stop_iteration: + # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in + # the dataloader since the number of batches is a round multiple of the number of processes. + next_batch, next_batch_info = self._fetch_batches(main_iterator) + # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them. + if self._stop_iteration and next_batch_info[0] is None: + stop_iteration = True + + if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0: + # If the last batch is not complete, let's add the first batch to it. + batch = concatenate([batch, first_batch], dim=0) + # Batch size computation above is wrong, it's off by 1 so we fix it. + batch_size += 1 + + data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size) + batch = self.slice_fn( + batch, + data_slice, + process_index=self.state.process_index, + num_processes=self.state.num_processes, + ) + + if stop_iteration: + self.end_of_dataloader = True + self._update_state_dict() + self.remainder = observed_batch_size + if batch_index >= self.skip_batches: + yield batch + batch_index += 1 + self.iteration += 1 + self.end() + + def set_epoch(self, epoch: int): + # In case it is manually passed in, the user can set it to what they like + if self.iteration != epoch: + self.iteration = epoch + if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(epoch) + elif hasattr(self.dataset, "set_epoch"): + self.dataset.set_epoch(epoch) + + def __len__(self): + whole_length = len(self.base_dataloader) + if self.split_batches: + return whole_length + elif self._drop_last: + return whole_length // self.state.num_processes + else: + return math.ceil(whole_length / self.state.num_processes) + + def __reduce__(self): + """ + Define the `__reduce__` method to ensure a `DataLoaderDispatcher` can be pickled and unpickled. This needs to + be explicitly defined since default pickling behavior is broken by `DataLoaderAdapter` messing with its + `__class__` member. + """ + args = super().__reduce__() + return (DataLoaderDispatcher, *args[1:]) + + @property + def total_batch_size(self): + return ( + self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes) + ) + + @property + def total_dataset_length(self): + return len(self.dataset) + + def get_sampler(self): + return get_sampler(self) + + def set_sampler(self, sampler): + sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) + if sampler_is_batch_sampler: + self.sampler.sampler = sampler + else: + self.batch_sampler.sampler = sampler + if hasattr(self.batch_sampler, "batch_sampler"): + self.batch_sampler.batch_sampler.sampler = sampler + + +def get_sampler(dataloader): + """ + Get the sampler associated to the dataloader + + Args: + dataloader (`torch.utils.data.dataloader.DataLoader`): + The data loader to split across several devices. + Returns: + `torch.utils.data.Sampler`: The sampler associated to the dataloader + """ + sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) + if sampler_is_batch_sampler: + sampler = getattr(dataloader.sampler, "sampler", None) + else: + sampler = getattr(dataloader.batch_sampler, "sampler", None) + return sampler + + +def prepare_data_loader( + dataloader: DataLoader, + device: Optional[torch.device] = None, + num_processes: Optional[int] = None, + process_index: Optional[int] = None, + split_batches: bool = False, + put_on_device: bool = False, + rng_types: Optional[list[Union[str, RNGType]]] = None, + dispatch_batches: Optional[bool] = None, + even_batches: bool = True, + slice_fn_for_dispatch: Optional[Callable] = None, + use_seedable_sampler: bool = False, + data_seed: Optional[int] = None, + non_blocking: bool = False, + use_stateful_dataloader: bool = False, + torch_device_mesh=None, +) -> DataLoader: + """ + Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. + + Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration + at the first batch that would be too small / not present on all processes or loop with indices from the beginning. + + Args: + dataloader (`torch.utils.data.dataloader.DataLoader`): + The data loader to split across several devices. + device (`torch.device`): + The target device for the returned `DataLoader`. + num_processes (`int`, *optional*): + The number of processes running concurrently. Will default to the value given by [`~state.PartialState`]. + process_index (`int`, *optional*): + The index of the current process. Will default to the value given by [`~state.PartialState`]. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the resulting `DataLoader` should split the batches of the original data loader across devices or + yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of + `num_processes` batches at each iteration). + + Another way to see this is that the observed batch size will be the same as the initial `dataloader` if + this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` + otherwise. + + Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of + `batch_size`. + put_on_device (`bool`, *optional*, defaults to `False`): + Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or + dictionaries of tensors). + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration. Should be one or + several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your + dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. + + dispatch_batches (`bool`, *optional*): + If set to `True`, the dataloader prepared is only iterated through on the main process and then the batches + are split and broadcast to each process. Will default to `True` when the underlying dataset is an + `IterableDataset`, `False` otherwise. + even_batches (`bool`, *optional*, defaults to `True`): + If set to `True`, in cases where the total batch size across all processes does not exactly divide the + dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among + all workers. + slice_fn_for_dispatch (`Callable`, *optional*`): + If passed, this function will be used to slice tensors across `num_processes`. Will default to + [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be + ignored otherwise. + use_seedable_sampler (`bool`, *optional*, defaults to `False`): + Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better + reproducability. Comes at a cost of potentially different performances due to different shuffling + algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every + `self.set_epoch` + data_seed (`int`, *optional*, defaults to `None`): + The seed to use for the underlying generator when using `use_seedable_sampler`. If `None`, the generator + will use the current default seed from torch. + non_blocking (`bool`, *optional*, defaults to `False`): + If set to `True`, dataloader will utilize non-blocking host-to-device transfers. If the dataloader has + `pin_memory` set to `True`, this will help to increase overlap between data transfer and computations. + use_stateful_dataloader (`bool`, *optional*, defaults to `False`): + "If set to true, the dataloader prepared by the Accelerator will be backed by " + "[torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). + This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed." + torch_device_mesh (`torch.distributed.DeviceMesh`, *optional*, defaults to `None`): + PyTorch device mesh. + + + Returns: + `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches + + + + `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` + equal to `False` + + + """ + if dispatch_batches is None: + if not put_on_device: + dispatch_batches = False + else: + dispatch_batches = isinstance(dataloader.dataset, IterableDataset) + + if dispatch_batches and not put_on_device: + raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.") + # Grab defaults from PartialState + state = PartialState() + if num_processes is None: + num_processes = state.num_processes + + if process_index is None: + process_index = state.process_index + + if torch_device_mesh: + if state.distributed_type == DistributedType.DEEPSPEED: + # In DeepSpeed, the optimizer sharing level in DP is determined by the config file. + # Only considers "dp" and "tp". + # Given a device mesh (dp, tp) = (2, 3): + # - From the data parallel perspective, ranks should be structured as: 0 0 0 1 1 1 + # - Processes with the same DP rank will receive the same batch. + submesh_tp_size = 1 + if "tp" in torch_device_mesh.mesh_dim_names: + submesh_tp_size = torch_device_mesh["tp"].size() + process_index = process_index // submesh_tp_size + num_processes = num_processes // submesh_tp_size + else: + # when device mesh is used, specifically with TP + # then there is need to update process_index and num_processes + # to bring in the effect of generating same batch across TP ranks + # and different batch across FSDP and DP ranks. + # Example: + # if device mesh is (dp,fsdp,tp) = (2, 2, 3) + # ranks would range from 0...11 + # from data angle ranks should look like 0 0 0 1 1 1 2 2 2 3 3 3 + # processes with same ranks/ids would receive the same batch + # for CP the same as TP applies + submesh_fsdp_size = 1 + submesh_dp_size = 1 + submesh_tp_size = 1 + submesh_cp_size = 1 + if "tp" in torch_device_mesh.mesh_dim_names: + submesh_tp_size = torch_device_mesh["tp"].size() + if "cp" in torch_device_mesh.mesh_dim_names: + submesh_cp_size = torch_device_mesh["cp"].size() + if "dp_replicate" in torch_device_mesh.mesh_dim_names: + submesh_dp_size = torch_device_mesh["dp_replicate"].size() + if "dp_shard" in torch_device_mesh.mesh_dim_names: + submesh_fsdp_size = torch_device_mesh["dp_shard"].size() + process_index = process_index // (submesh_tp_size * submesh_cp_size) + num_processes = submesh_fsdp_size * submesh_dp_size + + # Sanity check + if split_batches: + if dataloader.batch_size is not None: + batch_size_for_check = dataloader.batch_size + else: + # For custom batch_sampler + if hasattr(dataloader.batch_sampler, "batch_size"): + batch_size_for_check = dataloader.batch_sampler.batch_size + else: + raise ValueError( + "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed " + "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. " + "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` " + f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set." + ) + + if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0: + raise ValueError( + f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + + new_dataset = dataloader.dataset + # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it + new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None + sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) + synchronized_generator = None + + sampler = get_sampler(dataloader) + if isinstance(sampler, RandomSampler) and use_seedable_sampler: + # When iterating through the dataloader during distributed processes + # we want to ensure that on each process we are iterating through the same + # samples in the same order if a seed is set. This requires a tweak + # to the `torch.utils.data.RandomSampler` class (if used). + sampler = SeedableRandomSampler( + data_source=sampler.data_source, + replacement=sampler.replacement, + num_samples=sampler._num_samples, + generator=getattr( + sampler, + "generator", + torch.Generator(device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu"), + ), + data_seed=data_seed, + ) + + if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA: + # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled. + generator = torch.Generator( + device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu" + ) + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + generator.manual_seed(seed) + dataloader.generator = generator + dataloader.sampler.generator = generator + # No change if no multiprocess + if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches: + if is_datasets_available(): + from datasets import IterableDataset as DatasetsIterableDataset + if ( + is_datasets_available() + and isinstance(new_dataset, DatasetsIterableDataset) + and not split_batches + and new_dataset.n_shards > num_processes + ): + new_dataset = new_dataset.shard(num_shards=num_processes, index=process_index) + elif isinstance(new_dataset, IterableDataset): + if getattr(dataloader.dataset, "generator", None) is not None: + synchronized_generator = dataloader.dataset.generator + new_dataset = IterableDatasetShard( + new_dataset, + batch_size=dataloader.batch_size, + drop_last=dataloader.drop_last, + num_processes=num_processes, + process_index=process_index, + split_batches=split_batches, + ) + else: + if not use_seedable_sampler and hasattr(sampler, "generator"): + if sampler.generator is None: + sampler.generator = torch.Generator( + device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu" + ) + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + sampler.generator.manual_seed(seed) + synchronized_generator = sampler.generator + batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler + new_batch_sampler = BatchSamplerShard( + batch_sampler, + num_processes=num_processes, + process_index=process_index, + split_batches=split_batches, + even_batches=even_batches, + ) + + # We ignore all of those since they are all dealt with by our new_batch_sampler + ignore_kwargs = [ + "batch_size", + "shuffle", + "sampler", + "batch_sampler", + "drop_last", + ] + + if rng_types is not None and synchronized_generator is None and "generator" in rng_types: + rng_types.remove("generator") + + kwargs = { + k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) + for k in _PYTORCH_DATALOADER_KWARGS + if k not in ignore_kwargs + } + + # Need to provide batch_size as batch_sampler is None for Iterable dataset + if new_batch_sampler is None: + kwargs["drop_last"] = dataloader.drop_last + kwargs["batch_size"] = ( + dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size + ) + if dispatch_batches: + kwargs.pop("generator") + dataloader = DataLoaderDispatcher( + new_dataset, + split_batches=split_batches, + batch_sampler=new_batch_sampler, + _drop_last=dataloader.drop_last, + _non_blocking=non_blocking, + slice_fn=slice_fn_for_dispatch, + use_stateful_dataloader=use_stateful_dataloader, + torch_device_mesh=torch_device_mesh, + **kwargs, + ) + elif sampler_is_batch_sampler: + dataloader = DataLoaderShard( + new_dataset, + device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, + sampler=new_batch_sampler, + batch_size=dataloader.batch_size, + rng_types=rng_types, + _drop_last=dataloader.drop_last, + _non_blocking=non_blocking, + synchronized_generator=synchronized_generator, + use_stateful_dataloader=use_stateful_dataloader, + **kwargs, + ) + else: + dataloader = DataLoaderShard( + new_dataset, + device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, + batch_sampler=new_batch_sampler, + rng_types=rng_types, + synchronized_generator=synchronized_generator, + _drop_last=dataloader.drop_last, + _non_blocking=non_blocking, + use_stateful_dataloader=use_stateful_dataloader, + **kwargs, + ) + + if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler: + dataloader.set_sampler(sampler) + if state.distributed_type == DistributedType.XLA: + return MpDeviceLoaderWrapper(dataloader, device) + return dataloader + + +class SkipBatchSampler(BatchSampler): + """ + A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`. + Should not be used if the original dataloader is a `StatefulDataLoader`. + """ + + def __init__(self, batch_sampler, skip_batches=0): + self.batch_sampler = batch_sampler + self.skip_batches = skip_batches + + def __iter__(self): + for index, samples in enumerate(self.batch_sampler): + if index >= self.skip_batches: + yield samples + + @property + def total_length(self): + return len(self.batch_sampler) + + def __len__(self): + return len(self.batch_sampler) - self.skip_batches + + +class SkipDataLoader(DataLoaderAdapter, DataLoaderStateMixin): + """ + Subclass of a PyTorch `DataLoader` that will skip the first batches. Generally it's preferable to use + `skip_first_batches`/`torchdata.StatefulDataLoader` instead of this class. + + Args: + dataset (`torch.utils.data.dataset.Dataset`): + The dataset to use to build this dataloader. + skip_batches (`int`, *optional*, defaults to 0): + The number of batches to skip at the beginning. + kwargs: + All other keyword arguments to pass to the regular `DataLoader` initialization. + """ + + def __init__(self, dataset, skip_batches=0, use_stateful_dataloader=False, **kwargs): + super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) + self.skip_batches = skip_batches + self.gradient_state = GradientState() + + def __iter__(self): + self.begin() + for index, batch in enumerate(self.base_dataloader.__iter__()): + if index >= self.skip_batches: + self._update_state_dict() + yield batch + self.end() + + def __len__(self): + return len(self.base_dataloader) - self.skip_batches + + def __reduce__(self): + """ + Define the `__reduce__` method to ensure a `SkipDataLoader` can be pickled and unpickled. This needs to be + explicitly defined since default pickling behavior is broken by `DataLoaderAdapter` messing with its + `__class__` member. + """ + args = super().__reduce__() + return (SkipDataLoader, *args[1:]) + + +def skip_first_batches(dataloader, num_batches=0): + """ + Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. Should not be used if + the original dataloader is a `StatefulDataLoader`. + """ + state = PartialState() + if state.distributed_type == DistributedType.XLA: + device = dataloader.device + dataloader = dataloader.dataloader + + dataset = dataloader.dataset + sampler_is_batch_sampler = False + if isinstance(dataset, IterableDataset): + new_batch_sampler = None + else: + sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) + batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler + new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches) + + # We ignore all of those since they are all dealt with by our new_batch_sampler + ignore_kwargs = [ + "batch_size", + "shuffle", + "sampler", + "batch_sampler", + "drop_last", + ] + + kwargs = { + k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) + for k in _PYTORCH_DATALOADER_KWARGS + if k not in ignore_kwargs + } + + # Need to provide batch_size as batch_sampler is None for Iterable dataset + if new_batch_sampler is None: + kwargs["drop_last"] = dataloader.drop_last + kwargs["batch_size"] = dataloader.batch_size + + if isinstance(dataloader, DataLoaderDispatcher): + if new_batch_sampler is None: + # Need to manually skip batches in the dataloader + kwargs["skip_batches"] = num_batches + dataloader = DataLoaderDispatcher( + dataset, + split_batches=dataloader.split_batches, + batch_sampler=new_batch_sampler, + _drop_last=dataloader._drop_last, + **kwargs, + ) + elif isinstance(dataloader, DataLoaderShard): + if new_batch_sampler is None: + # Need to manually skip batches in the dataloader + kwargs["skip_batches"] = num_batches + elif sampler_is_batch_sampler: + kwargs["sampler"] = new_batch_sampler + kwargs["batch_size"] = dataloader.batch_size + else: + kwargs["batch_sampler"] = new_batch_sampler + dataloader = DataLoaderShard( + dataset, + device=dataloader.device, + rng_types=dataloader.rng_types, + synchronized_generator=dataloader.synchronized_generator, + **kwargs, + ) + else: + if new_batch_sampler is None: + # Need to manually skip batches in the dataloader + dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs) + else: + dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs) + + if state.distributed_type == DistributedType.XLA: + dataloader = MpDeviceLoaderWrapper(dataloader, device) + + return dataloader diff --git a/venv/lib/python3.10/site-packages/accelerate/hooks.py b/venv/lib/python3.10/site-packages/accelerate/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..8bdbb447209333e19291882b555b54f4213edb5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/hooks.py @@ -0,0 +1,776 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from collections.abc import Mapping +from typing import Optional, Union + +import torch +import torch.nn as nn + +from .state import PartialState +from .utils import ( + PrefixedDataset, + find_device, + named_module_tensors, + send_to_device, + set_module_tensor_to_device, +) +from .utils.imports import ( + is_mlu_available, + is_musa_available, + is_npu_available, +) +from .utils.memory import clear_device_cache +from .utils.modeling import get_non_persistent_buffers +from .utils.other import recursive_getattr + + +_accelerate_added_attributes = ["to", "cuda", "npu", "xpu", "mlu", "sdaa", "musa"] + + +class ModelHook: + """ + A hook that contains callbacks to be executed just before and after the forward method of a model. The difference + with PyTorch existing hooks is that they get passed along the kwargs. + + Class attribute: + - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under + the `torch.no_grad()` context manager. + """ + + no_grad = False + + def init_hook(self, module): + """ + To be executed when the hook is attached to the module. + + Args: + module (`torch.nn.Module`): The module attached to this hook. + """ + return module + + def pre_forward(self, module, *args, **kwargs): + """ + To be executed just before the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass will be executed just after this event. + args (`Tuple[Any]`): The positional arguments passed to the module. + kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module. + + Returns: + `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`. + """ + return args, kwargs + + def post_forward(self, module, output): + """ + To be executed just after the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass been executed just before this event. + output (`Any`): The output of the module. + + Returns: + `Any`: The processed `output`. + """ + return output + + def detach_hook(self, module): + """ + To be executed when the hook is detached from a module. + + Args: + module (`torch.nn.Module`): The module detached from this hook. + """ + return module + + +class SequentialHook(ModelHook): + """ + A hook that can contain several hooks and iterates through them at each event. + """ + + def __init__(self, *hooks): + self.hooks = hooks + + def init_hook(self, module): + for hook in self.hooks: + module = hook.init_hook(module) + return module + + def pre_forward(self, module, *args, **kwargs): + for hook in self.hooks: + args, kwargs = hook.pre_forward(module, *args, **kwargs) + return args, kwargs + + def post_forward(self, module, output): + for hook in self.hooks: + output = hook.post_forward(module, output) + return output + + def detach_hook(self, module): + for hook in self.hooks: + module = hook.detach_hook(module) + return module + + +def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): + """ + Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove + this behavior and restore the original `forward` method, use `remove_hook_from_module`. + + + + If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks + together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. + + + + Args: + module (`torch.nn.Module`): + The module to attach a hook to. + hook (`ModelHook`): + The hook to attach. + append (`bool`, *optional*, defaults to `False`): + Whether the hook should be chained with an existing one (if module already contains a hook) or not. + + Returns: + `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can + be discarded). + """ + if append and (getattr(module, "_hf_hook", None) is not None): + old_hook = module._hf_hook + remove_hook_from_module(module) + hook = SequentialHook(old_hook, hook) + + if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): + # If we already put some hook on this module, we replace it with the new one. + old_forward = module._old_forward + else: + old_forward = module.forward + module._old_forward = old_forward + + module = hook.init_hook(module) + module._hf_hook = hook + + def new_forward(module, *args, **kwargs): + args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) + if module._hf_hook.no_grad: + with torch.no_grad(): + output = module._old_forward(*args, **kwargs) + else: + output = module._old_forward(*args, **kwargs) + return module._hf_hook.post_forward(module, output) + + # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. + # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 + if "GraphModuleImpl" in str(type(module)): + module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) + else: + module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) + + return module + + +def remove_hook_from_module(module: nn.Module, recurse=False): + """ + Removes any hook attached to a module via `add_hook_to_module`. + + Args: + module (`torch.nn.Module`): The module to attach a hook to. + recurse (`bool`, **optional**): Whether to remove the hooks recursively + + Returns: + `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can + be discarded). + """ + + if hasattr(module, "_hf_hook"): + module._hf_hook.detach_hook(module) + delattr(module, "_hf_hook") + + if hasattr(module, "_old_forward"): + # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. + # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 + if "GraphModuleImpl" in str(type(module)): + module.__class__.forward = module._old_forward + else: + module.forward = module._old_forward + delattr(module, "_old_forward") + + # Remove accelerate added warning hooks from dispatch_model + for attr in _accelerate_added_attributes: + module.__dict__.pop(attr, None) + + if recurse: + for child in module.children(): + remove_hook_from_module(child, recurse) + + return module + + +class AlignDevicesHook(ModelHook): + """ + A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the + associated module, potentially offloading the weights after the forward pass. + + Args: + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + io_same_device (`bool`, *optional*, defaults to `False`): + Whether or not the output should be placed on the same device as the input was. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + place_submodules (`bool`, *optional*, defaults to `False`): + Whether to place the submodules on `execution_device` during the `init_hook` event. + """ + + def __init__( + self, + execution_device: Optional[Union[int, str, torch.device]] = None, + offload: bool = False, + io_same_device: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + place_submodules: bool = False, + skip_keys: Optional[Union[str, list[str]]] = None, + tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None, + ): + self.execution_device = execution_device + self.offload = offload + self.io_same_device = io_same_device + self.weights_map = weights_map + self.offload_buffers = offload_buffers + self.place_submodules = place_submodules + self.skip_keys = skip_keys + + # Will contain the input device when `io_same_device=True`. + self.input_device = None + self.param_original_devices = {} + self.buffer_original_devices = {} + self.tied_params_names = set() + + # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory + # for tied weights already loaded on the target execution device. + self.tied_params_map = tied_params_map + + def __repr__(self): + return ( + f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, " + f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, " + f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})" + ) + + def init_hook(self, module): + # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero. + if self.execution_device == "meta" or self.execution_device == torch.device("meta"): + self.tied_params_map = None + + if not self.offload and self.execution_device is not None: + for name, _ in named_module_tensors(module, recurse=self.place_submodules): + set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map) + elif self.offload: + self.original_devices = { + name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) + } + if self.weights_map is None: + self.weights_map = { + name: param.to("cpu") + for name, param in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ) + } + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True + ): + # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer, + # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. + # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str] + # to add on the fly pointers to `tied_params_map` in the pre_forward call. + if ( + self.tied_params_map is not None + and recursive_getattr(module, name).data_ptr() in self.tied_params_map + ): + self.tied_params_names.add(name) + + set_module_tensor_to_device(module, name, "meta") + + if not self.offload_buffers and self.execution_device is not None: + for name, _ in module.named_buffers(recurse=self.place_submodules): + set_module_tensor_to_device( + module, name, self.execution_device, tied_params_map=self.tied_params_map + ) + elif self.offload_buffers and self.execution_device is not None: + for name in get_non_persistent_buffers(module, recurse=self.place_submodules): + set_module_tensor_to_device( + module, name, self.execution_device, tied_params_map=self.tied_params_map + ) + + return module + + def pre_forward(self, module, *args, **kwargs): + if self.io_same_device: + self.input_device = find_device([args, kwargs]) + if self.offload: + self.tied_pointers_to_remove = set() + + for name, _ in named_module_tensors( + module, + include_buffers=self.offload_buffers, + recurse=self.place_submodules, + remove_non_persistent=True, + ): + fp16_statistics = None + value = self.weights_map[name] + if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys(): + if value.dtype == torch.int8: + fp16_statistics = self.weights_map[name.replace("weight", "SCB")] + + # In case we are using offloading with tied weights, we need to keep track of the offloaded weights + # that are loaded on device at this point, as we will need to remove them as well from the dictionary + # self.tied_params_map in order to allow to free memory. + if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map: + self.tied_params_map[value.data_ptr()] = {} + + if ( + value is not None + and self.tied_params_map is not None + and value.data_ptr() in self.tied_params_map + and self.execution_device not in self.tied_params_map[value.data_ptr()] + ): + self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device)) + + set_module_tensor_to_device( + module, + name, + self.execution_device, + value=value, + fp16_statistics=fp16_statistics, + tied_params_map=self.tied_params_map, + ) + + return send_to_device(args, self.execution_device), send_to_device( + kwargs, self.execution_device, skip_keys=self.skip_keys + ) + + def post_forward(self, module, output): + if self.offload: + for name, _ in named_module_tensors( + module, + include_buffers=self.offload_buffers, + recurse=self.place_submodules, + remove_non_persistent=True, + ): + set_module_tensor_to_device(module, name, "meta") + if type(module).__name__ == "Linear8bitLt": + module.state.SCB = None + module.state.CxB = None + + # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from + # this dictionary to allow the garbage collector to do its job. + for value_pointer, device in self.tied_pointers_to_remove: + if isinstance(device, int): + if is_npu_available(): + device = f"npu:{device}" + elif is_mlu_available(): + device = f"mlu:{device}" + elif is_musa_available(): + device = f"musa:{device}" + if device in self.tied_params_map[value_pointer]: + del self.tied_params_map[value_pointer][device] + self.tied_pointers_to_remove = set() + if self.io_same_device and self.input_device is not None: + output = send_to_device(output, self.input_device, skip_keys=self.skip_keys) + + return output + + def detach_hook(self, module): + if self.offload: + for name, device in self.original_devices.items(): + if device != torch.device("meta"): + set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) + return module + + +def attach_execution_device_hook( + module: torch.nn.Module, + execution_device: Union[int, str, torch.device], + skip_keys: Optional[Union[str, list[str]]] = None, + preload_module_classes: Optional[list[str]] = None, + tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right + execution device + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`int`, `str` or `torch.device`): + The device on which inputs and model weights should be placed before the forward pass. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): + A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution + device, this parameter is useful to reuse the first available pointer of a shared weight for all others, + instead of duplicating memory. + """ + if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0: + add_hook_to_module( + module, + AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map), + ) + + # Break the recursion if we get to a preload module. + if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes: + return + + for child in module.children(): + attach_execution_device_hook( + child, + execution_device, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + tied_params_map=tied_params_map, + ) + + +def attach_align_device_hook( + module: torch.nn.Module, + execution_device: Optional[torch.device] = None, + offload: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + module_name: str = "", + skip_keys: Optional[Union[str, list[str]]] = None, + preload_module_classes: Optional[list[str]] = None, + tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or + buffers. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): + A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution + device, this parameter is useful to reuse the first available pointer of a shared weight for all others, + instead of duplicating memory. + """ + # Attach the hook on this module if it has any direct tensor. + directs = named_module_tensors(module) + full_offload = ( + offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes + ) + + if len(list(directs)) > 0 or full_offload: + if weights_map is not None: + prefix = f"{module_name}." if len(module_name) > 0 else "" + prefixed_weights_map = PrefixedDataset(weights_map, prefix) + else: + prefixed_weights_map = None + hook = AlignDevicesHook( + execution_device=execution_device, + offload=offload, + weights_map=prefixed_weights_map, + offload_buffers=offload_buffers, + place_submodules=full_offload, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook, append=True) + + # We stop the recursion in case we hit the full offload. + if full_offload: + return + + # Recurse on all children of the module. + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + + +def remove_hook_from_submodules(module: nn.Module): + """ + Recursively removes all hooks attached on the submodules of a given model. + + Args: + module (`torch.nn.Module`): The module on which to remove all hooks. + """ + remove_hook_from_module(module) + for child in module.children(): + remove_hook_from_submodules(child) + + +def attach_align_device_hook_on_blocks( + module: nn.Module, + execution_device: Optional[Union[torch.device, dict[str, torch.device]]] = None, + offload: Union[bool, dict[str, bool]] = False, + weights_map: Mapping = None, + offload_buffers: bool = False, + module_name: str = "", + skip_keys: Optional[Union[str, list[str]]] = None, + preload_module_classes: Optional[list[str]] = None, + tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None, +): + """ + Attaches `AlignDevicesHook` to all blocks of a given model as needed. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. It can be one device + for the whole module, or a dictionary mapping module name to device. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole + module, or a dictionary mapping module name to boolean. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): + A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution + device, this parameter is useful to reuse the first available pointer of a shared weight for all others, + instead of duplicating memory. + """ + # If one device and one offload, we've got one hook. + if not isinstance(execution_device, Mapping) and not isinstance(offload, dict): + if not offload: + hook = AlignDevicesHook( + execution_device=execution_device, + io_same_device=True, + skip_keys=skip_keys, + place_submodules=True, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + else: + attach_align_device_hook( + module, + execution_device=execution_device, + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + return + + if not isinstance(execution_device, Mapping): + execution_device = {key: execution_device for key in offload.keys()} + if not isinstance(offload, Mapping): + offload = {key: offload for key in execution_device.keys()} + + if module_name in execution_device and module_name in offload and not offload[module_name]: + hook = AlignDevicesHook( + execution_device=execution_device[module_name], + offload_buffers=offload_buffers, + io_same_device=(module_name == ""), + place_submodules=True, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + attach_execution_device_hook( + module, execution_device[module_name], skip_keys=skip_keys, tied_params_map=tied_params_map + ) + elif module_name in execution_device and module_name in offload: + attach_align_device_hook( + module, + execution_device=execution_device[module_name], + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + tied_params_map=tied_params_map, + ) + if not hasattr(module, "_hf_hook"): + hook = AlignDevicesHook( + execution_device=execution_device[module_name], + io_same_device=(module_name == ""), + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + attach_execution_device_hook( + module, + execution_device[module_name], + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + elif module_name == "": + hook = AlignDevicesHook( + execution_device=execution_device.get(""), + io_same_device=True, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook_on_blocks( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + + +class CpuOffload(ModelHook): + """ + Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after + the forward, the user needs to call the `init_hook` method again for this. + + Args: + execution_device(`str`, `int` or `torch.device`, *optional*): + The device on which the model should be executed. Will default to the MPS device if it's available, then + GPU 0 if there is a GPU, and finally to the CPU. + prev_module_hook (`UserCpuOffloadHook`, *optional*): + The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If + passed, its offload method will be called just before the forward of the model to which this hook is + attached. + """ + + def __init__( + self, + execution_device: Optional[Union[str, int, torch.device]] = None, + prev_module_hook: Optional["UserCpuOffloadHook"] = None, + ): + self.prev_module_hook = prev_module_hook + + self.execution_device = execution_device if execution_device is not None else PartialState().default_device + + def init_hook(self, module): + return module.to("cpu") + + def pre_forward(self, module, *args, **kwargs): + if self.prev_module_hook is not None and isinstance(self.prev_module_hook, UserCpuOffloadHook): + prev_module = self.prev_module_hook.model + prev_device = next(prev_module.parameters()).device + + # Only offload the previous module if it is not already on CPU. + if prev_device != torch.device("cpu"): + self.prev_module_hook.offload() + clear_device_cache() + + # If the current device is already the self.execution_device, we can skip the transfer. + current_device = next(module.parameters()).device + if current_device == self.execution_device: + return args, kwargs + + module.to(self.execution_device) + return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) + + +class UserCpuOffloadHook: + """ + A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook + or remove it entirely. + """ + + def __init__(self, model, hook): + self.model = model + self.hook = hook + + def offload(self): + self.hook.init_hook(self.model) + + def remove(self): + remove_hook_from_module(self.model) + + +class LayerwiseCastingHook(ModelHook): + r""" + A hook that casts the weights of a module to a high precision dtype for computation, and to a low precision dtype + for storage. This process may lead to quality loss in the output, but can significantly reduce the memory + footprint. + """ + + _is_stateful = False + + def __init__(self, storage_dtype: torch.dtype, compute_dtype: torch.dtype, non_blocking: bool) -> None: + self.storage_dtype = storage_dtype + self.compute_dtype = compute_dtype + self.non_blocking = non_blocking + + def init_hook(self, module: torch.nn.Module): + module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking) + return module + + def pre_forward(self, module: torch.nn.Module, *args, **kwargs): + module.to(dtype=self.compute_dtype, non_blocking=self.non_blocking) + return args, kwargs + + def post_forward(self, module: torch.nn.Module, output): + module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking) + return output diff --git a/venv/lib/python3.10/site-packages/accelerate/inference.py b/venv/lib/python3.10/site-packages/accelerate/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..d1659718b0b3020955f04060aa6f0cc7ef66d7ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/inference.py @@ -0,0 +1,184 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from types import MethodType +from typing import Any, Optional, Union + +from .state import PartialState +from .utils import ( + calculate_maximum_sizes, + convert_bytes, + copy_tensor_to_devices, + ignorant_find_batch_size, + infer_auto_device_map, + is_pippy_available, + pad_input_tensors, + send_to_device, +) + + +def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None): + """ + Calculates the device map for `model` with an offset for PiPPy + """ + if num_processes == 1: + return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False) + if max_memory is None: + model_size, shared = calculate_maximum_sizes(model) + + # Split into `n` chunks for each GPU + memory = (model_size + shared[0]) / num_processes + memory = convert_bytes(memory) + value, ending = memory.split(" ") + + # Add a chunk to deal with potential extra shared memory instances + memory = math.ceil(float(value)) * 1.1 + memory = f"{memory} {ending}" + max_memory = {i: memory for i in range(num_processes)} + device_map = infer_auto_device_map( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + clean_result=False, + ) + return device_map + + +def find_pippy_batch_size(args, kwargs): + found_batch_size = None + if args is not None: + for arg in args: + found_batch_size = ignorant_find_batch_size(arg) + if found_batch_size is not None: + break + if kwargs is not None and found_batch_size is None: + for kwarg in kwargs.values(): + found_batch_size = ignorant_find_batch_size(kwarg) + if found_batch_size is not None: + break + return found_batch_size + + +def build_pipeline(model, split_points, args, kwargs, num_chunks): + """ + Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing + in needed `args` and `kwargs` as the model needs on the CPU. + + Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use + `AcceleratorState.num_processes` + """ + # Note: We import here to reduce import time from general modules, and isolate outside dependencies + from torch.distributed.pipelining import ScheduleGPipe, SplitPoint, pipeline + + # We need to annotate the split points in the model for PiPPy + state = PartialState() + split_spec = {split_point: SplitPoint.BEGINNING for split_point in split_points} + pipe = pipeline( + model, + mb_args=args, + mb_kwargs=kwargs, + split_spec=split_spec, + ) + stage = pipe.build_stage(state.local_process_index, device=state.device) + schedule = ScheduleGPipe(stage, num_chunks) + + return schedule + + +def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs): + state = PartialState() + output = None + + if state.num_processes == 1: + output = forward(*args, **kwargs) + elif state.is_local_main_process: + found_batch_size = find_pippy_batch_size(args, kwargs) + if found_batch_size is None: + raise ValueError("Could not find batch size from args or kwargs") + else: + if found_batch_size != num_chunks: + args = pad_input_tensors(args, found_batch_size, num_chunks) + kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks) + forward(*args, **kwargs) + elif state.is_last_process: + output = forward() + else: + forward() + if gather_output: + # Each node will get a copy of the full output which is only on the last GPU + output = copy_tensor_to_devices(output) + return output + + +def prepare_pippy( + model, + split_points: Optional[Union[str, list[str]]] = "auto", + no_split_module_classes: Optional[list[str]] = None, + example_args: Optional[tuple[Any]] = (), + example_kwargs: Optional[dict[str, Any]] = None, + num_chunks: Optional[int] = None, + gather_output: Optional[bool] = False, +): + """ + Wraps `model` for pipeline parallel inference. + + Args: + model (`torch.nn.Module`): + A model we want to split for pipeline-parallel inference + split_points (`str` or `List[str]`, defaults to 'auto'): + How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced + split given any model. Should be a list of layer names in the model to split by otherwise. + no_split_module_classes (`List[str]`): + A list of class names for layers we don't want to be split. + example_args (tuple of model inputs): + The expected inputs for the model that uses order-based inputs for a *single process*. Recommended to use + this method if possible. + example_kwargs (dict of model inputs) + The expected inputs for the model that uses dictionary-based inputs for a *single process*. This is a + *highly* limiting structure that requires the same keys be present at *all* inference calls. Not + recommended unless the prior condition is true for all cases. + num_chunks (`int`, defaults to the number of available GPUs): + The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but + this can be tuned and played with. In general one should have num_chunks >= num_gpus. + gather_output (`bool`, defaults to `False`): + If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs. + """ + if not is_pippy_available(): + raise ImportError("Using `torch.distributed.pipelining` requires PyTorch 2.4.0 or later.") + state = PartialState() + example_args = send_to_device(example_args, "cpu") + example_kwargs = send_to_device(example_kwargs, "cpu") + if num_chunks is None: + num_chunks = state.num_processes + if split_points == "auto": + device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes) + split_points = [] + for i in range(1, num_chunks): + split_points.append(next(k for k, v in device_map.items() if v == i)) + model.hf_split_points = split_points + stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks) + model._original_forward = model.forward + model._original_call = model.__call__ + model.pippy_stage = stage + model.hf_split_points = split_points + + def forward(*args, **kwargs): + return pippy_forward(stage.step, num_chunks, gather_output, *args, **kwargs) + + # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` + # Note: creates an infinite recursion loop with `generate` + model_forward = MethodType(forward, model) + forward.__wrapped__ = model_forward + model.forward = forward + return model diff --git a/venv/lib/python3.10/site-packages/accelerate/launchers.py b/venv/lib/python3.10/site-packages/accelerate/launchers.py new file mode 100644 index 0000000000000000000000000000000000000000..532c0ad4450bb22870efa5a3d46cedbfbca5a28b --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/launchers.py @@ -0,0 +1,306 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import tempfile + +import torch + +from .state import AcceleratorState, PartialState +from .utils import ( + PrecisionType, + PrepareForLaunch, + are_libraries_initialized, + check_cuda_p2p_ib_support, + get_gpu_info, + is_mps_available, + is_torch_version, + patch_environment, +) +from .utils.constants import ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION + + +def test_launch(): + "Verify a `PartialState` can be initialized." + _ = PartialState() + + +def notebook_launcher( + function, + args=(), + num_processes=None, + mixed_precision="no", + use_port="29500", + master_addr="127.0.0.1", + node_rank=0, + num_nodes=1, + rdzv_backend="static", + rdzv_endpoint="", + rdzv_conf=None, + rdzv_id="none", + max_restarts=0, + monitor_interval=0.1, + log_line_prefix_template=None, +): + """ + Launches a training function, using several processes or multiple nodes if it's possible in the current environment + (TPU with multiple cores for instance). + + + + To use this function absolutely zero calls to a device must be made in the notebook session before calling. If any + have been made, you will need to restart the notebook and make sure no cells use any device capability. + + Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none + of those calls have been made. + + + + Args: + function (`Callable`): + The training function to execute. If it accepts arguments, the first argument should be the index of the + process run. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*): + The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to + the number of devices available otherwise. + mixed_precision (`str`, *optional*, defaults to `"no"`): + If `fp16` or `bf16`, will use mixed precision training on multi-device. + use_port (`str`, *optional*, defaults to `"29500"`): + The port to use to communicate between processes when launching a multi-device training. + master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): + The address to use for communication between processes. + node_rank (`int`, *optional*, defaults to 0): + The rank of the current node. + num_nodes (`int`, *optional*, defaults to 1): + The number of nodes to use for training. + rdzv_backend (`str`, *optional*, defaults to `"static"`): + The rendezvous method to use, such as 'static' (the default) or 'c10d' + rdzv_endpoint (`str`, *optional*, defaults to `""`): + The endpoint of the rdzv sync. storage. + rdzv_conf (`Dict`, *optional*, defaults to `None`): + Additional rendezvous configuration. + rdzv_id (`str`, *optional*, defaults to `"none"`): + The unique run id of the job. + max_restarts (`int`, *optional*, defaults to 0): + The maximum amount of restarts that elastic agent will conduct on workers before failure. + monitor_interval (`float`, *optional*, defaults to 0.1): + The interval in seconds that is used by the elastic_agent as a period of monitoring workers. + log_line_prefix_template (`str`, *optional*, defaults to `None`): + The prefix template for elastic launch logging. Available from PyTorch 2.2.0. + + Example: + + ```python + # Assume this is defined in a Jupyter Notebook on an instance with two devices + from accelerate import notebook_launcher + + + def train(*args): + # Your training function here + ... + + + notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") + ``` + """ + # Are we in a google colab or a Kaggle Kernel? + in_colab = False + in_kaggle = False + if any(key.startswith("KAGGLE") for key in os.environ.keys()): + in_kaggle = True + elif "IPython" in sys.modules: + in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) + + try: + mixed_precision = PrecisionType(mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + if (in_colab or in_kaggle) and ( + (os.environ.get("TPU_NAME", None) is not None) or (os.environ.get("PJRT_DEVICE", "") == "TPU") + ): + # TPU launch + import torch_xla.distributed.xla_multiprocessing as xmp + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " + "your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + + launcher = PrepareForLaunch(function, distributed_type="XLA") + print("Launching a training on TPU cores.") + xmp.spawn(launcher, args=args, start_method="fork") + elif in_colab and get_gpu_info()[1] < 2: + # No need for a distributed launch otherwise as it's either CPU or one GPU. + if torch.cuda.is_available(): + print("Launching training on one GPU.") + else: + print("Launching training on one CPU.") + function(*args) + else: + if num_processes is None: + raise ValueError( + "You have to specify the number of devices you would like to use, add `num_processes=...` to your call." + ) + if node_rank >= num_nodes: + raise ValueError("The node_rank must be less than the number of nodes.") + if num_processes > 1: + # Multi-device launch + from torch.distributed.launcher.api import LaunchConfig, elastic_launch + from torch.multiprocessing import start_processes + from torch.multiprocessing.spawn import ProcessRaisedException + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To launch a multi-device training from your notebook, the `Accelerator` should only be initialized " + "inside your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + # Check for specific libraries known to initialize device that users constantly use + problematic_imports = are_libraries_initialized("bitsandbytes") + if len(problematic_imports) > 0: + err = ( + "Could not start distributed process. Libraries known to initialize device upon import have been " + "imported already. Please keep these imports inside your training function to try and help with this:" + ) + for lib_name in problematic_imports: + err += f"\n\t* `{lib_name}`" + raise RuntimeError(err) + + patched_env = dict( + nproc=num_processes, + node_rank=node_rank, + world_size=num_nodes * num_processes, + master_addr=master_addr, + master_port=use_port, + mixed_precision=mixed_precision, + ) + + # Check for CUDA P2P and IB issues + if not check_cuda_p2p_ib_support(): + patched_env["nccl_p2p_disable"] = "1" + patched_env["nccl_ib_disable"] = "1" + + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment(**patched_env): + # First dummy launch + device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" + distributed_type = "MULTI_XPU" if device_type == "xpu" else "MULTI_GPU" + if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true": + launcher = PrepareForLaunch(test_launch, distributed_type=distributed_type) + try: + start_processes(launcher, args=(), nprocs=num_processes, start_method="fork") + except ProcessRaisedException as e: + err = "An issue was found when verifying a stable environment for the notebook launcher." + if f"Cannot re-initialize {device_type.upper()} in forked subprocess" in e.args[0]: + raise RuntimeError( + f"{err}" + "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " + "Please review your imports and test them when running the `notebook_launcher()` to identify " + f"which one is problematic and causing {device_type.upper()} to be initialized." + ) from e + else: + raise RuntimeError(f"{err} The following error was raised: {e}") from e + # Now the actual launch + launcher = PrepareForLaunch(function, distributed_type=distributed_type) + print(f"Launching training on {num_processes} {device_type.upper()}s.") + try: + if rdzv_conf is None: + rdzv_conf = {} + if rdzv_backend == "static": + rdzv_conf["rank"] = node_rank + if not rdzv_endpoint: + rdzv_endpoint = f"{master_addr}:{use_port}" + launch_config_kwargs = dict( + min_nodes=num_nodes, + max_nodes=num_nodes, + nproc_per_node=num_processes, + run_id=rdzv_id, + rdzv_endpoint=rdzv_endpoint, + rdzv_backend=rdzv_backend, + rdzv_configs=rdzv_conf, + max_restarts=max_restarts, + monitor_interval=monitor_interval, + start_method="fork", + ) + if is_torch_version(">=", ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION): + launch_config_kwargs["log_line_prefix_template"] = log_line_prefix_template + elastic_launch(config=LaunchConfig(**launch_config_kwargs), entrypoint=function)(*args) + except ProcessRaisedException as e: + if f"Cannot re-initialize {device_type.upper()} in forked subprocess" in e.args[0]: + raise RuntimeError( + f"{device_type.upper()} has been initialized before the `notebook_launcher` could create a forked subprocess. " + "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " + "Please review your imports and test them when running the `notebook_launcher()` to identify " + f"which one is problematic and causing {device_type.upper()} to be initialized." + ) from e + else: + raise RuntimeError(f"An issue was found when launching the training: {e}") from e + + else: + # No need for a distributed launch otherwise as it's either CPU, GPU, XPU or MPS. + if is_mps_available(): + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + print("Launching training on MPS.") + elif torch.cuda.is_available(): + print("Launching training on one GPU.") + elif torch.xpu.is_available(): + print("Launching training on one XPU.") + else: + print("Launching training on CPU.") + function(*args) + + +def debug_launcher(function, args=(), num_processes=2): + """ + Launches a training function using several processes on CPU for debugging purposes. + + + + This function is provided for internal testing and debugging, but it's not intended for real trainings. It will + only use the CPU. + + + + Args: + function (`Callable`): + The training function to execute. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*, defaults to 2): + The number of processes to use for training. + """ + from torch.multiprocessing import start_processes + + with tempfile.NamedTemporaryFile() as tmp_file: + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment( + world_size=num_processes, + master_addr="127.0.0.1", + master_port="29500", + accelerate_mixed_precision="no", + accelerate_debug_rdv_file=tmp_file.name, + accelerate_use_cpu="yes", + ): + launcher = PrepareForLaunch(function, debug=True) + start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") diff --git a/venv/lib/python3.10/site-packages/accelerate/local_sgd.py b/venv/lib/python3.10/site-packages/accelerate/local_sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..40b198d46aa24610d8eaf46424474d7c1c9881a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/local_sgd.py @@ -0,0 +1,106 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from accelerate import Accelerator, DistributedType + + +class LocalSGD: + """ + A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently + on each device, and averages model weights every K synchronization step. + + It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular, + this is a simple implementation that cannot support scenarios such as model parallelism. + + + Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes + back to at least: + + Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint + arXiv:1606.07365.](https://arxiv.org/abs/1606.07365) + + We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of). + + Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on + Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767) + + """ + + def __enter__(self): + if self.enabled: + self.model_sync_obj = self.model.no_sync() + self.model_sync_obj.__enter__() + + return self + + def __exit__(self, type, value, tb): + if self.enabled: + # Average all models on exit + self._sync_and_avg_model_params() + self.model_sync_obj.__exit__(type, value, tb) + + def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True): + """ + Constructor. + + Args: + model (`torch.nn.Module): + The model whose parameters we need to average. + accelerator (`Accelerator`): + Accelerator object. + local_sgd_steps (`int`): + A number of local SGD steps (before model parameters are synchronized). + enabled (`bool): + Local SGD is disabled if this parameter set to `False`. + """ + if accelerator.distributed_type not in [ + DistributedType.NO, + DistributedType.MULTI_CPU, + DistributedType.MULTI_GPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_HPU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + ]: + raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)") + self.enabled = enabled and accelerator.distributed_type != DistributedType.NO + self.num_steps = 0 + if self.enabled: + self.accelerator = accelerator + self.model = model + self.local_sgd_steps = local_sgd_steps + + def step(self): + """ + This function makes a "step" and synchronizes model parameters if necessary. + """ + self.num_steps += 1 + if not self.enabled: + return + + if self.num_steps % self.local_sgd_steps == 0: + self._sync_and_avg_model_params() + + def _sync_and_avg_model_params(self): + """ + Synchronize + Average model parameters across all GPUs + """ + + self.accelerator.wait_for_everyone() + with self.accelerator.autocast(): + for param in self.model.parameters(): + param.data = self.accelerator.reduce(param.data, reduction="mean") diff --git a/venv/lib/python3.10/site-packages/accelerate/logging.py b/venv/lib/python3.10/site-packages/accelerate/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..1615bc313b74d4f01166384435b6c499ff616f49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/logging.py @@ -0,0 +1,125 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import logging +import os + +from .state import PartialState + + +class MultiProcessAdapter(logging.LoggerAdapter): + """ + An adapter to assist with logging in multiprocess. + + `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes + or only the main executed one. Default is `main_process_only=True`. + + Does not require an `Accelerator` object to be created first. + """ + + @staticmethod + def _should_log(main_process_only): + "Check if log should be performed" + state = PartialState() + return not main_process_only or (main_process_only and state.is_main_process) + + def log(self, level, msg, *args, **kwargs): + """ + Delegates logger call after checking if we should log. + + Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes + or only the main executed one. Default is `True` if not passed + + Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to + read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not + break with the previous behavior. + + `in_order` is ignored if `main_process_only` is passed. + """ + if PartialState._shared_state == {}: + raise RuntimeError( + "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." + ) + main_process_only = kwargs.pop("main_process_only", True) + in_order = kwargs.pop("in_order", False) + # set `stacklevel` to exclude ourself in `Logger.findCaller()` while respecting user's choice + kwargs.setdefault("stacklevel", 2) + + if self.isEnabledFor(level): + if self._should_log(main_process_only): + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + + elif in_order: + state = PartialState() + for i in range(state.num_processes): + if i == state.process_index: + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + state.wait_for_everyone() + + @functools.lru_cache(None) + def warning_once(self, *args, **kwargs): + """ + This method is identical to `logger.warning()`, but will emit the warning with the same message only once + + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the + cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to + switch to another type of cache that includes the caller frame information in the hashing function. + """ + self.warning(*args, **kwargs) + + +def get_logger(name: str, log_level: str = None): + """ + Returns a `logging.Logger` for `name` that can handle multiprocessing. + + If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all + processes and in order, also pass `in_order=True` + + Args: + name (`str`): + The name for the logger, such as `__file__` + log_level (`str`, *optional*): + The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not + + Example: + + ```python + >>> from accelerate.logging import get_logger + >>> from accelerate import Accelerator + + >>> logger = get_logger(__name__) + + >>> accelerator = Accelerator() + >>> logger.info("My log", main_process_only=False) + >>> logger.debug("My log", main_process_only=True) + + >>> logger = get_logger(__name__, log_level="DEBUG") + >>> logger.info("My log") + >>> logger.debug("My second log") + + >>> array = ["a", "b", "c", "d"] + >>> letter_at_rank = array[accelerator.process_index] + >>> logger.info(letter_at_rank, in_order=True) + ``` + """ + if log_level is None: + log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) + logger = logging.getLogger(name) + if log_level is not None: + logger.setLevel(log_level.upper()) + logger.root.setLevel(log_level.upper()) + return MultiProcessAdapter(logger, {}) diff --git a/venv/lib/python3.10/site-packages/accelerate/memory_utils.py b/venv/lib/python3.10/site-packages/accelerate/memory_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2e2c8b9d7d0064c3e5e282737a7ad6919bde29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/memory_utils.py @@ -0,0 +1,22 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + + +warnings.warn( + "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " + "`from accelerate import find_executable_batch_size` to avoid this warning.", + FutureWarning, +) diff --git a/venv/lib/python3.10/site-packages/accelerate/optimizer.py b/venv/lib/python3.10/site-packages/accelerate/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f8faa15436fb5d1d02ae4d691a4fcac44cd236 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/optimizer.py @@ -0,0 +1,213 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect + +import torch + +from .state import AcceleratorState, GradientState +from .utils import DistributedType, honor_type, is_lomo_available, is_torch_xla_available + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + import torch_xla.runtime as xr + + +def move_to_device(state, device): + if isinstance(state, (list, tuple)): + return honor_type(state, (move_to_device(t, device) for t in state)) + elif isinstance(state, dict): + return type(state)({k: move_to_device(v, device) for k, v in state.items()}) + elif isinstance(state, torch.Tensor): + return state.to(device) + return state + + +class AcceleratedOptimizer(torch.optim.Optimizer): + """ + Internal wrapper around a torch optimizer. + + Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient + accumulation. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of + `optimizer` on the right device. + scaler (`torch.amp.GradScaler` or `torch.cuda.amp.GradScaler`, *optional*): + The scaler to use in the step function if training with mixed precision. + """ + + def __init__(self, optimizer, device_placement=True, scaler=None): + self.optimizer = optimizer + self.scaler = scaler + self.accelerator_state = AcceleratorState() + self.gradient_state = GradientState() + self.device_placement = device_placement + self._is_overflow = False + + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + # Handle device placement + if device_placement: + state_dict = self.optimizer.state_dict() + if self.accelerator_state.distributed_type == DistributedType.XLA: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + else: + state_dict = move_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + @property + def state(self): + return self.optimizer.state + + @state.setter + def state(self, state): + self.optimizer.state = state + + @property + def param_groups(self): + return self.optimizer.param_groups + + @param_groups.setter + def param_groups(self, param_groups): + self.optimizer.param_groups = param_groups + + @property + def defaults(self): + return self.optimizer.defaults + + @defaults.setter + def defaults(self, defaults): + self.optimizer.defaults = defaults + + def add_param_group(self, param_group): + self.optimizer.add_param_group(param_group) + + def load_state_dict(self, state_dict): + if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + def state_dict(self): + return self.optimizer.state_dict() + + def zero_grad(self, set_to_none=None): + if self.gradient_state.sync_gradients: + accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters + if accept_arg: + if set_to_none is None: + set_to_none = True + self.optimizer.zero_grad(set_to_none=set_to_none) + else: + if set_to_none is not None: + raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") + self.optimizer.zero_grad() + + def train(self): + """ + Sets the optimizer to "train" mode. Useful for optimizers like `schedule_free` + """ + if hasattr(self.optimizer, "train") and callable(self.optimizer.train): + self.optimizer.train() + elif ( + hasattr(self.optimizer, "optimizer") + and hasattr(self.optimizer.optimizer, "train") + and callable(self.optimizer.optimizer.train) + ): + # the deepspeed optimizer further wraps the optimizer + self.optimizer.optimizer.train() + + def eval(self): + """ + Sets the optimizer to "eval" mode. Useful for optimizers like `schedule_free` + """ + if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval): + self.optimizer.eval() + + def step(self, closure=None): + if is_lomo_available(): + from lomo_optim import AdaLomo, Lomo + + if ( + not self.gradient_state.is_xla_gradients_synced + and self.accelerator_state.distributed_type == DistributedType.XLA + ): + gradients = xm._fetch_gradients(self.optimizer) + xm.all_reduce("sum", gradients, scale=1.0 / xr.world_size()) + self.gradient_state.is_xla_gradients_synced = True + + if is_lomo_available(): + # `step` should be a no-op for LOMO optimizers. + if isinstance(self.optimizer, (Lomo, AdaLomo)): + return + + if self.gradient_state.sync_gradients: + if self.scaler is not None: + self.optimizer.step = self._optimizer_patched_step_method + + self.scaler.step(self.optimizer, closure) + self.scaler.update() + + if not self._accelerate_step_called: + # If the optimizer step was skipped, gradient overflow was detected. + self._is_overflow = True + else: + self._is_overflow = False + # Reset the step method to the original one + self.optimizer.step = self._optimizer_original_step_method + # Reset the indicator + self._accelerate_step_called = False + else: + self.optimizer.step(closure) + if self.accelerator_state.distributed_type == DistributedType.XLA: + self.gradient_state.is_xla_gradients_synced = False + + def _switch_parameters(self, parameters_map): + for param_group in self.optimizer.param_groups: + param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was skipped.""" + return self._is_overflow + + def __getstate__(self): + _ignored_keys = [ + "_accelerate_step_called", + "_optimizer_original_step_method", + "_optimizer_patched_step_method", + ] + return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} + + def __setstate__(self, state): + self.__dict__.update(state) + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + +def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): + def patched_step(*args, **kwargs): + accelerated_optimizer._accelerate_step_called = True + return method(*args, **kwargs) + + return patched_step diff --git a/venv/lib/python3.10/site-packages/accelerate/parallelism_config.py b/venv/lib/python3.10/site-packages/accelerate/parallelism_config.py new file mode 100644 index 0000000000000000000000000000000000000000..bc9d3cab12dae5607bdda03538701b0df0fcfc1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/parallelism_config.py @@ -0,0 +1,305 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import warnings +from dataclasses import dataclass +from typing import TYPE_CHECKING, Optional, Union + +from torch.distributed.device_mesh import init_device_mesh + +from accelerate.utils.dataclasses import TorchContextParallelConfig, TorchTensorParallelConfig + + +if TYPE_CHECKING: + from accelerate import Accelerator + + +@dataclass +class ParallelismConfig: + """ + A dataclass to configure parallelisms applied to the model. Inspired by torchtitan's `ParallelDims` + https://github.com/pytorch/torchtitan/blob/main/torchtitan/distributed/parallel_dims.py + + Args: + dp_replicate_size (`int`, defaults to `1`): + The size of the data parallel group. If `dp_replicate_size` is set to 1, the data parallel replication + group will not be used. + dp_shard_size (`int`, defaults to `1`): + The size of the model shard group. If `dp_replicate_size > 1` and `tp_size > 1`, `dp_shard_size` must also + be greater than 1, as composing DDP + TP is currently not supported. + tp_size (`int`, defaults to `1`): + The size of the tensor parallel group. If `tp_size` is set to `1`, the tensor parallel group will not be + used. + cp_size (`int`, defaults to `1`): + The size of the context parallel group. Currently not supported, but reserved for future use and enabled + for downstream libraries. + tp_handler (`~utils.TorchTensorParallelConfig`, defaults to `None`): + The handler for the tensor parallel group. + + You may obtain different distributed data parallel paradigms by configuring `dp_replicate_size` and `dp_shard_size` + together: + - `dp_replicate_size == 1` and `dp_shard_size > 1`, we obtain Fully Sharded Data Parallel (FSDP). + - `dp_replicate_size > 1` and `dp_shard_size > 1`, we obtain Hybrid Sharded Data Parallel (HSDP). + - `dp_replicate_size > 1` and `dp_shard_size == 1` is an invalid configuration, to use pure DP, use + `DistributedDataParallelKwargs` instead. + + """ + + dp_replicate_size: int = None + dp_shard_size: int = None + tp_size: int = None + cp_size: int = None + + # we use Union because we might support other x parallel plugins (i.e. deepspeed, etc) + tp_handler: Union[None, TorchTensorParallelConfig] = None + cp_handler: Union[None, TorchContextParallelConfig] = None + + device_mesh = None + + def __repr__(self): + return ( + "ParallelismConfig(\n " + f"\tdp_replicate_size={self.dp_replicate_size},\n" + f"\tdp_shard_size={self.dp_shard_size},\n" + f"\ttp_size={self.tp_size},\n" + f"\tcp_size={self.cp_size},\n" + f"\ttotal_size={self.total_size}\n" + f"\ttp_handler={self.tp_handler},\n" + f"\tcp_handler={self.cp_handler})\n" + ) + + @property + def dp_dim_names(self): + """Names of enabled dimensions across which data parallelism is applied.""" + dims = [] + if self.dp_replicate_enabled: + dims += ["dp_replicate"] + if self.dp_shard_enabled: + dims += ["dp_shard"] + return dims + + @property + def non_dp_dim_names(self): + """Names of enabled dimensions which will receive the same batch (non-data parallel dimensions).""" + dims = [] + if self.tp_enabled: + dims += ["tp"] + if self.cp_enabled: + dims += ["cp"] + return dims + + @property + def dp_shard_cp_dim_names(self): + """Names of enabled dimensions which will be flattened into a joint mesh across which is model sharded in FSDP.""" + dims = [] + if self.dp_shard_enabled: + dims += ["dp_shard"] + if self.cp_enabled: + dims += ["cp"] + return dims + + @property + def dp_cp_dim_names(self): + """Names of enabled dimensions across which loss should be averaged""" + dims = [] + if self.dp_replicate_enabled: + dims += ["dp_replicate"] + if self.dp_shard_enabled: + dims += ["dp_shard"] + if self.cp_enabled: + dims += ["cp"] + return dims + + @property + def fsdp_dim_names(self): + """Names of enabled dimensions across which FSDP is applied, including data parallel replication.""" + dims = [] + if self.dp_replicate_enabled: + dims += ["dp_replicate"] + dims += ["dp_shard_cp"] + return dims + + @property + def total_size(self): + """The total size of the parallelism configuration, which is the product of all sizes.""" + return self.dp_replicate_size * self.dp_shard_size * self.tp_size * self.cp_size + + @property + def non_data_parallel_size(self): + """The size of the non-data parallel dimensions, which is the product of tensor and context parallel sizes.""" + return self.tp_size * self.cp_size + + @property + def data_parallel_size(self): + """The size of the data parallel dimensions, which is the product of data parallel replication and""" + return self.dp_replicate_size * self.dp_shard_size + + @property + def dp_replicate_enabled(self): + """True if data parallel replication is enabled, i.e. `dp_replicate_size > 1`.""" + return self.dp_replicate_size > 1 + + @property + def dp_shard_enabled(self): + """True if data parallel sharding is enabled, i.e. `dp_shard_size > 1`.""" + return self.dp_shard_size > 1 + + @property + def tp_enabled(self): + """True if tensor parallelism is enabled, i.e. `tp_size > 1`.""" + return self.tp_size > 1 + + @property + def cp_enabled(self): + """True if context parallelism is enabled, i.e. `cp_size > 1`.""" + return self.cp_size > 1 + + @property + def active_mesh_dims(self): + """Names of all active mesh dimensions.""" + return self.dp_dim_names + self.non_dp_dim_names + + def build_device_mesh(self, device_type: str): + """Builds a device mesh for the given device type based on the parallelism configuration. + This method will also create required joint meshes (e.g. `dp_shard_cp`, `dp_cp`, `dp`). + + Args: + device_type (`str`): The type of device for which to build the mesh, e + """ + mesh = self._get_mesh() + if len(mesh) == 0: + return None + mesh_dim_names, mesh_shape = mesh + device_mesh = init_device_mesh( + device_type, + mesh_shape, + mesh_dim_names=mesh_dim_names, + ) + if self.dp_dim_names: + device_mesh[self.dp_dim_names]._flatten("dp") + if self.dp_shard_cp_dim_names: + device_mesh[self.dp_shard_cp_dim_names]._flatten("dp_shard_cp") + if self.dp_cp_dim_names: + device_mesh[self.dp_cp_dim_names]._flatten("dp_cp") + + return device_mesh + + def get_device_mesh(self, device_type: Optional[str] = None): + if self.device_mesh is None: + if device_type is not None: + self.device_mesh = self.build_device_mesh(device_type) + else: + raise ("You need to pass a device_type e.g cuda to build the device mesh") + else: + if device_type is not None: + if self.device_mesh.device_type != device_type: + raise ValueError( + f"The device_mesh is already created with device type {self.device_mesh.device_type}. However, you are trying to get a device mesh with device_type {device_type}. Please check if you correctly initialized your device_mesh" + ) + return self.device_mesh + + def _get_mesh(self) -> tuple[tuple[int, ...], tuple[str, ...]]: + """Generate mesh shape and dimension names for torch.distributed.init_device_mesh().""" + + # Build mesh dimensions dictionary + mesh_dims = {parallelism: self._sizes[parallelism] for parallelism in self.active_mesh_dims} + + # Apply canonical ordering + mesh_order = ["dp_replicate", "dp_shard", "cp", "tp"] + sorted_items = sorted( + mesh_dims.items(), + key=lambda x: (mesh_order.index(x[0])), + ) + return tuple(zip(*sorted_items)) + + def __post_init__(self): + # Basic size validation + if self.dp_replicate_size is None: + self.dp_replicate_size = int(os.environ.get("PARALLELISM_CONFIG_DP_REPLICATE_SIZE", "1")) + if self.dp_shard_size is None: + self.dp_shard_size = int(os.environ.get("PARALLELISM_CONFIG_DP_SHARD_SIZE", "1")) + if self.tp_size is None: + self.tp_size = int(os.environ.get("PARALLELISM_CONFIG_TP_SIZE", "1")) + if self.cp_size is None: + self.cp_size = int(os.environ.get("PARALLELISM_CONFIG_CP_SIZE", "1")) + + if self.tp_size > 1: + if self.tp_handler is None: + self.tp_handler = TorchTensorParallelConfig() + + if self.cp_size > 1: + if self.cp_handler is None: + self.cp_handler = TorchContextParallelConfig() + + if self.dp_replicate_size < 1: + raise ValueError(f"dp_replicate_size must be at least 1, but got {self.dp_replicate_size}") + if self.dp_shard_size < 1: + raise ValueError(f"dp_shard_size must be at least 1, but got {self.dp_shard_size}") + if self.tp_size < 1: + raise ValueError(f"tp_size must be at least 1, but got {self.tp_size}") + if self.cp_size < 1: + raise ValueError(f"cp_size must be at least 1, but got {self.cp_size}") + + if (self.tp_size > 1 or self.cp_size > 1) and self.dp_replicate_size > 1 and self.dp_shard_size == 1: + raise ValueError( + "Tensor/Context parallelism (tp/cp_size > 1) cannot be used with pure data parallelism (dp_replicate_size > 1 and dp_shard_size == 1). " + "Please set dp_shard_size > 1 and dp_replicate_size == 1 to compose FSDP + TP/CP for 2D parallel, " + "or set dp_replicate_size == 1 and dp_shard_size > 1 to compose HSDP + TP/CP for 3D parallel." + ) + self._sizes = { + "dp_replicate": self.dp_replicate_size, + "dp_shard": self.dp_shard_size, + "tp": self.tp_size, + "cp": self.cp_size, + } + + def _set_size(self, parallelism: str, size: int): + assert parallelism in self._sizes.keys(), f"Parallelism must be one of {self._sizes.keys()}" + self._sizes[parallelism] = size + setattr(self, f"{parallelism}_size", size) + + def _validate_accelerator(self, accelerator: "Accelerator"): + _warnings = set() + if not accelerator.multi_device and self.total_size == 1: + # No distributed setup, valid parallelism config + return + + # We need this to ensure DDP works + if self.total_size == 1: + self._set_size("dp_replicate", accelerator.num_processes) + + if self.total_size != accelerator.num_processes: + raise ValueError( + f"ParallelismConfig total_size ({self.total_size}) does not match " + f"num_processes ({accelerator.num_processes}). Please adjust dp_replicate_size/ " + f"dp_shard_size/tp_size/cp_size." + ) + + if self.total_size > 1 and not (accelerator.is_fsdp2 or accelerator.multi_device): + raise ValueError( + f"ParallelismConfig is only compatible DistributedType.FSDP (version 2) or DistributedType.Multi{{Device}}, but got {accelerator.distributed_type}." + ) + + for parallelism, size in self._sizes.items(): + if size == 1 and getattr(self, f"{parallelism}_handler", None) is not None: + _warnings.add( + f"ParallelismConfig.{parallelism}_handler is set, but {parallelism}_size is set to 1. This handler will be ignored." + ) + + if _warnings and accelerator.is_main_process: + warnings.warn( + "ParallelismConfig has the following warnings:\n" + "\n".join(_warnings), + UserWarning, + ) diff --git a/venv/lib/python3.10/site-packages/accelerate/scheduler.py b/venv/lib/python3.10/site-packages/accelerate/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa8a13f238afd7b908ee8e8cb8e0620f48d4ff8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/scheduler.py @@ -0,0 +1,98 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation + +import warnings + +from .state import AcceleratorState, GradientState + + +warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") + + +class AcceleratedScheduler: + """ + A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful + to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed + precision training) + + When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always + step the scheduler to account for it. + + Args: + scheduler (`torch.optim.lr_scheduler._LRScheduler`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + The optimizers used. + step_with_optimizer (`bool`, *optional*, defaults to `True`): + Whether or not the scheduler should be stepped at each optimizer step. + split_batches (`bool`, *optional*, defaults to `False`): + Whether or not the dataloaders split one batch across the different processes (so batch size is the same + regardless of the number of processes) or create batches on each process (so batch size is the original + batch size multiplied by the number of processes). + """ + + def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False): + self.scheduler = scheduler + self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers] + self.split_batches = split_batches + self.step_with_optimizer = step_with_optimizer + self.gradient_state = GradientState() + + def step(self, *args, **kwargs): + if not self.step_with_optimizer: + # No link between scheduler and optimizer -> just step + self.scheduler.step(*args, **kwargs) + return + + # Otherwise, first make sure the optimizer was stepped. + if not self.gradient_state.sync_gradients: + if self.gradient_state.adjust_scheduler: + self.scheduler._step_count += 1 + return + + for opt in self.optimizers: + if opt.step_was_skipped: + return + if self.split_batches: + # Split batches -> the training dataloader batch size is not changed so one step per training step + self.scheduler.step(*args, **kwargs) + else: + # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do + # num_processes steps per training step + num_processes = AcceleratorState().num_processes + for _ in range(num_processes): + # Special case when using OneCycle and `drop_last` was not used + if hasattr(self.scheduler, "total_steps"): + if self.scheduler._step_count <= self.scheduler.total_steps: + self.scheduler.step(*args, **kwargs) + else: + self.scheduler.step(*args, **kwargs) + + # Passthroughs + def get_last_lr(self): + return self.scheduler.get_last_lr() + + def state_dict(self): + return self.scheduler.state_dict() + + def load_state_dict(self, state_dict): + self.scheduler.load_state_dict(state_dict) + + def get_lr(self): + return self.scheduler.get_lr() + + def print_lr(self, *args, **kwargs): + return self.scheduler.print_lr(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/accelerate/state.py b/venv/lib/python3.10/site-packages/accelerate/state.py new file mode 100644 index 0000000000000000000000000000000000000000..9353c422eb527f9eb06fe973d4ee071673cfe3a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/state.py @@ -0,0 +1,1368 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +import os +import threading +import warnings +import weakref +from contextlib import contextmanager +from functools import partial +from typing import Any, Callable + +import torch + +from .utils import ( + DistributedType, + DynamoBackend, + GradientAccumulationPlugin, + check_cuda_fp8_capability, + check_cuda_p2p_ib_support, + deepspeed_required, + get_cpu_distributed_information, + get_int_from_env, + is_ccl_available, + is_datasets_available, + is_deepspeed_available, + is_fp8_available, + is_habana_gaudi1, + is_hpu_available, + is_ipex_available, + is_mlu_available, + is_mps_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_torch_xla_available, + is_xccl_available, + is_xpu_available, + parse_choice_from_env, + parse_flag_from_env, + set_numa_affinity, +) +from .utils.dataclasses import SageMakerDistributedType + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + import torch_xla.runtime as xr + +if is_mlu_available(check_device=False): + import torch_mlu # noqa: F401 + +if is_sdaa_available(check_device=False): + import torch_sdaa # noqa: F401 + +if is_musa_available(check_device=False): + import torch_musa # noqa: F401 + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + + +logger = logging.getLogger(__name__) + + +def is_initialized() -> bool: + """ + Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, + but works as a module method. + """ + return AcceleratorState._shared_state != {} + + +# Lambda function that does nothing +def do_nothing(*args, **kwargs): + return None + + +class ThreadLocalSharedDict(threading.local): + """ + Descriptor that holds a dict shared between instances of a class in the same thread. + + Note: Descriptors have slightly different semantics than just a dict field on its own. + `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the + underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside + the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor + object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`). + + See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html + + This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3). + + See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3 + """ + + def __init__(self, thread_local: bool = False): + self._storage = {} + + def __get__(self, obj, objtype=None): + return self._storage + + def __set__(self, obj, value): + self._storage = value + + +# Prefer global shared dictionary, except when using TPU. +SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict + + +# Inspired by Alex Martelli's 'Borg'. +class PartialState: + """ + Singleton class that has information about the current training environment and functions to help with process + control. Designed to be used when only process control and device execution states are needed. Does *not* need to + be initialized from `Accelerator`. + + Args: + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to + `True` and force the execution on the CPU. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments to pass to the relevant `init_process_group` function. Valid `kwargs` can be + found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + + Example: + ```python + from accelerate.utils import InitProcessGroupKwargs + + # To include `InitProcessGroupKwargs`, init then call `.to_kwargs()` + kwargs = InitProcessGroupKwargs(...).to_kwargs() + state = PartialState(**kwargs) + ``` + """ + + _shared_state = SharedDict() + _known_attrs = [ + "_cpu", + "_mixed_precision", + "_shared_state", + "backend", + "debug", + "device", + "distributed_type", + "fork_launched", + "local_process_index", + "num_processes", + "process_index", + ] + + def __init__(self, cpu: bool = False, **kwargs): + self.__dict__ = self._shared_state + if not self.initialized: + self._cpu = cpu + self.backend = None + env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) + self.device = torch.device(env_device) if env_device is not None else None + self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") + use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) + dist_information = None + if use_sagemaker_dp is None: + use_sagemaker_dp = ( + os.environ.get("ACCELERATE_USE_SAGEMAKER", "false").lower() == "true" + and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO + ) + + # Sets up self.backend + imports + original_backend = kwargs.pop("backend", None) + backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, original_backend) + if original_backend is not None and backend != original_backend: + raise ValueError(f"Your assigned backend {original_backend} is not avaliable, please use {backend}") + self.backend = backend + self.distributed_type = distributed_type + use_deepspeed = False + if not cpu and self.backend != "xla": + if int(os.environ.get("LOCAL_RANK", -1)) != -1: + # Deal with spawning deepspeed + if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false").lower() == "true": + if not is_deepspeed_available(): + raise ImportError( + "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" + ) + from deepspeed import comm as dist + + if not dist.is_initialized(): + if self.backend == "tccl": + local_rank = os.environ.get("LOCAL_RANK", -1) + torch.sdaa.set_device(f"sdaa:{local_rank}") + dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) + # We need to flag to `use_deepspeed` to be True to override `distributed_type` later + use_deepspeed = True + # Deal with all other backends but XPU and CPU, that gets handled special later + elif ( + self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU) + and not torch.distributed.is_initialized() + ): + if self.backend == "tccl": + local_rank = os.environ.get("LOCAL_RANK", -1) + torch.sdaa.set_device(f"sdaa:{local_rank}") + if ( + self.backend == "nccl" + and os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true" + and ( + os.environ.get("FSDP_OFFLOAD_PARAMS", "false").lower() == "true" + or os.environ.get("FSDP_STATE_DICT_TYPE", "SHARDED_STATE_DICT") == "FULL_STATE_DICT" + ) + ): + self.backend = "cuda:nccl,cpu:gloo" + torch.distributed.init_process_group(backend=self.backend, **kwargs) + + # XPU and CPU require special env configs to be set + if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU): + dist_information = get_cpu_distributed_information() + os.environ["RANK"] = str(dist_information.rank) + os.environ["WORLD_SIZE"] = str(dist_information.world_size) + os.environ["LOCAL_RANK"] = str(dist_information.local_rank) + os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size) + if not os.environ.get("MASTER_PORT", None): + os.environ["MASTER_PORT"] = "29500" + if ( + not os.environ.get("MASTER_ADDR", None) + and dist_information.local_world_size != dist_information.world_size + and self.backend != "mpi" + ): + raise ValueError( + "Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, " + "please try exporting rank 0's hostname as `MASTER_ADDR`" + ) + kwargs["rank"] = dist_information.rank + kwargs["world_size"] = dist_information.world_size + + if ( + self.distributed_type == DistributedType.MULTI_CPU + and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0 + ): + import psutil + + num_cpu_threads_per_process = int( + psutil.cpu_count(logical=False) / dist_information.local_world_size + ) + if num_cpu_threads_per_process == 0: + num_cpu_threads_per_process = 1 + torch.set_num_threads(num_cpu_threads_per_process) + warnings.warn( + f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" + " performance." + ) + + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend=self.backend, **kwargs) + + # No backend == no distributed training + if self.backend is None: + self.distributed_type = DistributedType.NO + self.num_processes = 1 + self.process_index = 0 + self.local_process_index = 0 + elif self.backend == "xla": + # XLA needs device setting first for `set_replication` + self.set_device() + xm.set_replication(self.device, xm.get_xla_supported_devices()) + self.num_processes = xr.world_size() + self.process_index = xr.global_ordinal() + if is_torch_xla_available(check_is_tpu=True): + self.local_process_index = xm.get_local_ordinal() + else: + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + else: + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = ( + int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank + ) + self.set_device() + # Now we can change to deepseed + if use_deepspeed: + self.distributed_type = DistributedType.DEEPSPEED + + # Set CPU affinity if enabled + if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False): + set_numa_affinity(self.local_process_index) + + # Check for old RTX 4000's that can't use P2P or IB and are on old drivers + if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): + if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: + raise NotImplementedError( + "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " + 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' + "will do this automatically." + ) + + # Important: This should be the *only* code outside of `self.initialized!` + self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) + + def __repr__(self) -> str: + return ( + f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" + f"Num processes: {self.num_processes}\n" + f"Process index: {self.process_index}\n" + f"Local process index: {self.local_process_index}\n" + f"Device: {self.device}\n" + ) + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + PartialState._shared_state.clear() + + @property + def initialized(self) -> bool: + "Returns whether the `PartialState` has been initialized" + return self._shared_state != {} + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.distributed_type != DistributedType.NO and self.num_processes > 1 + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return self.process_index == self.num_processes - 1 + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return ( + self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process + ) + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return ( + self.local_process_index == 0 + if self.distributed_type != DistributedType.MEGATRON_LM + else self.is_last_process + ) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> import time + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> if state.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> state.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` + """ + if self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + DistributedType.MULTI_HPU, + DistributedType.DEEPSPEED, + DistributedType.FSDP, + ): + torch.distributed.barrier() + elif self.distributed_type == DistributedType.XLA: + xm.rendezvous("accelerate.utils.wait_for_everyone") + + def _goes_first(self, is_main: bool): + if not is_main: + self.wait_for_everyone() + + yield + + if is_main: + self.wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate import PartialState + + state = PartialState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + if self.num_processes == 1: + yield inputs + return + length = len(inputs) + # Nested dictionary of any types + if isinstance(inputs, dict): + length = len(inputs[list(inputs.keys())[0]]) + if not all(len(v) == length for v in inputs.values()): + raise ValueError("All values in the dictionary must have the same length") + num_samples_per_process, num_extras = divmod(length, self.num_processes) + start_index = self.process_index * num_samples_per_process + min(self.process_index, num_extras) + end_index = start_index + num_samples_per_process + (1 if self.process_index < num_extras else 0) + + def _split_values(inputs, start_index, end_index): + if isinstance(inputs, (list, tuple, torch.Tensor)): + if start_index >= len(inputs): + result = inputs[-1:] + else: + result = inputs[start_index:end_index] + if apply_padding: + if isinstance(result, torch.Tensor): + from accelerate.utils import pad_across_processes, send_to_device + + # The tensor needs to be on the device before we can pad it + tensorized_result = send_to_device(result, self.device) + result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) + else: + result += [result[-1]] * (num_samples_per_process + (1 if num_extras > 0 else 0) - len(result)) + return result + elif isinstance(inputs, dict): + for key in inputs.keys(): + inputs[key] = _split_values(inputs[key], start_index, end_index) + return inputs + else: + if is_datasets_available(): + from datasets import Dataset + + if isinstance(inputs, Dataset): + if start_index >= len(inputs): + start_index = len(inputs) - 1 + if end_index > len(inputs): + end_index = len(inputs) + result_idcs = list(range(start_index, end_index)) + if apply_padding: + result_idcs += [end_index - 1] * ( + num_samples_per_process + (1 if num_extras > 0 else 0) - len(result_idcs) + ) + return inputs.select(result_idcs) + return inputs + + yield _split_values(inputs, start_index, end_index) + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.main_process_first(): + ... # This will be printed first by process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.process_index}") + ``` + """ + yield from self._goes_first(self.is_main_process) + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> with state.local_main_process_first(): + ... # This will be printed first by local process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {state.local_process_index}") + ``` + """ + yield from self._goes_first(self.is_local_main_process) + + def on_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + + + >>> @state.on_main_process + ... def print_something(): + ... print("This will be printed by process 0 only.") + + + >>> print_something() + "This will be printed by process 0 only" + ``` + """ + if not self.initialized: + raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") + if self.is_main_process or not self.use_distributed: + return function + return do_nothing + + def on_local_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the local main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_local_main_process + def print_something(): + print("This will be printed by process 0 only on each server.") + + + print_something() + # On server 1: + "This will be printed by process 0 only" + # On server 2: + "This will be printed by process 0 only" + ``` + """ + if self.is_local_main_process or not self.use_distributed: + return function + return do_nothing + + def on_last_process(self, function: Callable[..., Any]): + """ + Decorator that only runs the decorated function on the last process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_last_process + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 3" + ``` + """ + if self.is_last_process or not self.use_distributed: + return function + return do_nothing + + def on_process(self, function: Callable[..., Any] = None, process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index. + + Args: + function (`Callable`, `optional`): + The function to decorate. + process_index (`int`, `optional`): + The index of the process on which to run the function. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_process(process_index=2) + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_process, process_index=process_index) + if (self.process_index == process_index) or (not self.use_distributed): + return function + return do_nothing + + def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index on the current node. + + Args: + function (`Callable`, *optional*): + The function to decorate. + local_process_index (`int`, *optional*): + The index of the local process on which to run the function. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_process(local_process_index=2) + def print_something(): + print(f"Printed on process {accelerator.local_process_index}") + + + print_something() + # On server 1: + "Printed on process 2" + # On server 2: + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_local_process, local_process_index=local_process_index) + if (self.local_process_index == local_process_index) or (not self.use_distributed): + return function + return do_nothing + + def print(self, *args, **kwargs): + if self.is_local_main_process: + print(*args, **kwargs) + + @property + def default_device(self) -> torch.device: + """ + Returns the default device which is: + - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. + - CUDA if `torch.cuda.is_available()` + - MLU if `is_mlu_available()` + - SDAA if `is_sdaa_available()` + - MUSA if `is_musa_available()` + - NPU if `is_npu_available()` + - HPU if `is_hpu_available()` + - CPU otherwise + """ + if is_mps_available(): + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + return torch.device("mps") + elif is_mlu_available(): + return torch.device("mlu") + elif is_sdaa_available(): + return torch.device("sdaa") + elif is_musa_available(): + return torch.device("musa") + # NPU should be checked before CUDA when using `transfer_to_npu` + # See issue #3020: https://github.com/huggingface/accelerate/issues/3020 + elif is_npu_available(): + return torch.device("npu") + elif is_hpu_available(): + return torch.device("hpu") + elif torch.cuda.is_available(): + return torch.device("cuda") + elif is_xpu_available(): + return torch.device("xpu") + else: + return torch.device("cpu") + + def _prepare_backend( + self, cpu: bool = False, sagemaker_dp=False, backend: str = None + ) -> tuple[str, DistributedType]: + "Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly" + distributed_type = None + if sagemaker_dp: + import smdistributed.dataparallel.torch.torch_smddp # noqa + + backend = "smddp" + distributed_type = DistributedType.MULTI_GPU + elif is_torch_xla_available(): + backend = "xla" + distributed_type = DistributedType.XLA + + elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu: + if is_mlu_available(): + backend = "cncl" + distributed_type = DistributedType.MULTI_MLU + if is_sdaa_available(): + backend = "tccl" + distributed_type = DistributedType.MULTI_SDAA + elif is_musa_available(): + backend = "mccl" + distributed_type = DistributedType.MULTI_MUSA + # NPU should be checked before CUDA when using `transfer_to_npu` + # See issue #3020: https://github.com/huggingface/accelerate/issues/3020 + elif is_npu_available(): + backend = "hccl" + distributed_type = DistributedType.MULTI_NPU + elif is_hpu_available(init_hccl=True): + if backend is None: + backend = "hccl" + distributed_type = DistributedType.MULTI_HPU + elif torch.cuda.is_available(): + if backend is None: + backend = "nccl" + distributed_type = DistributedType.MULTI_GPU + elif is_xpu_available() and is_xccl_available(): + if backend is None: + backend = "xccl" + distributed_type = DistributedType.MULTI_XPU + + if distributed_type is None and ( + int(os.environ.get("LOCAL_RANK", -1)) != -1 + or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 + ): + if not cpu and is_xpu_available(): + distributed_type = DistributedType.MULTI_XPU + else: + distributed_type = DistributedType.MULTI_CPU + + if ( + backend in (None, "ccl") + and is_ccl_available() + and (get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU) + ): + import oneccl_bindings_for_pytorch # noqa: F401 + + backend = "ccl" + elif backend in (None, "mpi") and torch.distributed.is_mpi_available(): + backend = "mpi" + else: + backend = "gloo" + if distributed_type is None: + distributed_type = DistributedType.NO + + return backend, distributed_type + + def set_device(self): + """ + Sets the device in `self.device` to the current distributed environment. + """ + if self.device is not None: + return + if self.distributed_type == DistributedType.NO: + self.device = torch.device("cpu") if self._cpu else self.default_device + return + device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower() + if device not in ("cpu", "gpu", "mlu", "musa", "npu", "xpu", "xla", "hpu", "sdaa"): + raise ValueError( + f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!" + ) + if device == "xla": + self.device = xm.xla_device() + elif device == "hpu": + self.device = torch.device("hpu", torch.hpu.current_device()) + else: + if device == "gpu": + device = "cuda" + device_module = getattr(torch, device) + device_index = self.local_process_index % device_module.device_count() + self.device = torch.device(device, device_index) + device_module.set_device(self.device) + + def destroy_process_group(self, group=None): + """ + Destroys the process group. If one is not specified, the default process group is destroyed. + """ + if self.fork_launched and group is None: + return + # needed when using torch.distributed.init_process_group + if torch.distributed.is_initialized(): + torch.distributed.destroy_process_group(group) + + def __getattr__(self, name: str): + # By this point we know that no attributes of `self` contain `name`, + # so we just modify the error message + if name in self._known_attrs: + raise AttributeError( + f"`PartialState` object has no attribute `{name}`. " + "This happens if `PartialState._reset_state()` was called and " + "an `Accelerator` or `PartialState` was not reinitialized." + ) + # Raise a typical AttributeError + raise AttributeError(f"'PartialState' object has no attribute '{name}'") + + +class AcceleratorState: + """ + Singleton class that has information about the current training environment. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **parallelism_config** ([`~accelerate.utils.ParallelismConfig`]) -- The parallelism configuration for the + current training environment. This is used to configure the distributed training environment. + - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + """ + + _shared_state = SharedDict() + _known_attrs = PartialState._known_attrs + [ + "deepspeed_plugin", + "use_ipex", + "fsdp_plugin", + "megatron_lm_plugin", + "dynamo_plugin", + ] + + def __init__( + self, + mixed_precision: str = None, + cpu: bool = False, + dynamo_plugin=None, + deepspeed_plugin=None, + fsdp_plugin=None, + torch_tp_plugin=None, + megatron_lm_plugin=None, + parallelism_config=None, + _from_accelerator: bool = False, + **kwargs, + ): + self.__dict__ = self._shared_state + if parse_flag_from_env("ACCELERATE_USE_CPU"): + cpu = True + if PartialState._shared_state == {}: + PartialState(cpu, **kwargs) + self.__dict__.update(PartialState._shared_state) + self._check_initialized(mixed_precision, cpu) + if not self.initialized: + self.deepspeed_plugins = None + self.use_ipex = None + self.torch_tp_plugin = torch_tp_plugin + self.parallelism_config = parallelism_config + self.device_mesh = None + mixed_precision = ( + parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") + if mixed_precision is None + else mixed_precision.lower() + ) + if mixed_precision == "fp8": + # this is confusing, why is is_fp8_available only checks for library availability ? + if not is_fp8_available(): + raise ValueError( + "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." + ) + elif torch.cuda.is_available() and not check_cuda_fp8_capability(): + logger.warning( + f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " + "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " + "or higher, compute capability of 8.9 or higher). Will use FP16 instead." + ) + mixed_precision = "fp16" + elif is_habana_gaudi1(): + logger.warning( + "The current HPU device is Gaudi1 which does not support FP8 mixed precision training (requires " + "Gaudi2 or higher). Will use BF16 instead." + ) + mixed_precision = "bf16" + + self.dynamo_plugin = dynamo_plugin + if not _from_accelerator: + raise ValueError( + "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " + "before using any functionality from the `accelerate` library." + ) + # deepspeed handles mixed_precision using deepspeed_config + self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision + if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True): + if mixed_precision == "bf16": + if os.environ.get("ACCELERATE_DOWNCAST_BF16"): + os.environ["XLA_USE_BF16"] = str(0) + os.environ["XLA_DOWNCAST_BF16"] = str(1) + self.downcast_bfloat = True + else: + os.environ["XLA_USE_BF16"] = str(1) + os.environ["XLA_DOWNCAST_BF16"] = str(0) + self.downcast_bfloat = False + elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false").lower() == "true" and not cpu: + self.distributed_type = DistributedType.DEEPSPEED + if not isinstance(deepspeed_plugin, dict): + deepspeed_plugin.set_mixed_precision(mixed_precision) + deepspeed_plugin.select(_from_accelerator_state=True) + else: + for plugin in deepspeed_plugin.values(): + plugin.set_mixed_precision(mixed_precision) + # The first plugin passed in is always the active one + first_plugin = next(iter(deepspeed_plugin.values())) + first_plugin.select(_from_accelerator_state=True) + self.deepspeed_plugins = deepspeed_plugin + elif self.distributed_type in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + ]: + # TODO: Siro - remove when axolotl fixes their side + if not os.environ.get("ACCELERATE_ALLOW_CP_STANDALONE", "false").lower() == "true": + if self.parallelism_config and self.parallelism_config.cp_enabled and fsdp_plugin is None: + raise ValueError( + "`cp_size > 1` specified in the `parallelism_config`, but no `fsdp_plugin` was provided. We need a `fsdp_plugin` to use context parallelism, as we also shard the model across the device mesh to save more memory" + ) + if ( + self.parallelism_config is not None + and self.parallelism_config.cp_enabled + and fsdp_plugin.fsdp_version == 1 + ): + raise ValueError( + "Using `cp_size>1` requires FSDP2, but the provided `fsdp_plugin` is using FSDP1. " + ) + if (os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true" or fsdp_plugin is not None) or ( + self.parallelism_config is not None and self.parallelism_config.cp_enabled + ): + self.distributed_type = DistributedType.FSDP + if self._mixed_precision != "no" and fsdp_plugin is not None: + fsdp_plugin.set_mixed_precision(self._mixed_precision) + self.fsdp_plugin = fsdp_plugin + if os.environ.get( + "ACCELERATE_USE_MEGATRON_LM", "false" + ).lower() == "true" and self.distributed_type not in [ + DistributedType.MULTI_XPU, + ]: + self.distributed_type = DistributedType.MEGATRON_LM + megatron_lm_plugin.set_mixed_precision(self._mixed_precision) + self.megatron_lm_plugin = megatron_lm_plugin + elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: + if is_ipex_available(): + # check if user disables it explicitly + self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) + else: + self.use_ipex = False + if ( + self.dynamo_plugin.backend != DynamoBackend.NO + and self._mixed_precision == "no" + and self.device.type == "cuda" + ): + torch.backends.cuda.matmul.allow_tf32 = True + if ( + self.dynamo_plugin.backend != DynamoBackend.NO + and self._mixed_precision == "no" + and self.device.type == "musa" + ): + torch.backends.musa.matmul.allow_tf32 = True + PartialState._shared_state["distributed_type"] = self.distributed_type + + @property + def initialized(self) -> bool: + return self._shared_state != PartialState._shared_state + + def __repr__(self): + repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" + if self.distributed_type == DistributedType.DEEPSPEED: + repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" + return repr + + def _check_initialized(self, mixed_precision=None, cpu=None): + "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" + if self.initialized: + err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." + if cpu and self.device.type != "cpu": + raise ValueError(err.format(flag="cpu=True")) + if ( + mixed_precision is not None + and mixed_precision != self._mixed_precision + and self.distributed_type != DistributedType.DEEPSPEED + ): + raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) + + @property + def mixed_precision(self): + if self.distributed_type == DistributedType.DEEPSPEED: + config = self.deepspeed_plugin.deepspeed_config + if config.get("fp16", {}).get("enabled", False): + mixed_precision = "fp16" + elif config.get("bf16", {}).get("enabled", False): + mixed_precision = "bf16" + else: + mixed_precision = "no" + else: + mixed_precision = self._mixed_precision + return mixed_precision + + @staticmethod + def _reset_state(reset_partial_state: bool = False): + "Resets `_shared_state`, is used internally and should not be called" + AcceleratorState._shared_state.clear() + if reset_partial_state: + PartialState._reset_state() + + def destroy_process_group(self, group=None): + """ + Destroys the process group. If one is not specified, the default process group is destroyed. + + If `self.fork_lauched` is `True` and `group` is `None`, nothing happens. + """ + PartialState().destroy_process_group(group) + + @property + def fork_launched(self): + return PartialState().fork_launched + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return PartialState().use_distributed + + @property + def is_fsdp2(self) -> bool: + return self.distributed_type == DistributedType.FSDP and self.fsdp_plugin.fsdp_version == 2 + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return PartialState().is_last_process + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return PartialState().is_main_process + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return PartialState().is_local_main_process + + def wait_for_everyone(self): + PartialState().wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate.state import AcceleratorState + + state = AcceleratorState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: + yield inputs + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().main_process_first(): + yield + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().local_main_process_first(): + yield + + @property + def deepspeed_plugin(self): + """ + Returns the currently active DeepSpeedPlugin. + + If not using deepspeed, returns `None`. + """ + # To maintain original behavior, return None if not using deepspeed. + if self.distributed_type != DistributedType.DEEPSPEED: + return None + from accelerate.utils.deepspeed import get_active_deepspeed_plugin + + return get_active_deepspeed_plugin(self) + + @deepspeed_required + def get_deepspeed_plugin(self, name: str): + """ + Returns the DeepSpeedPlugin with the given plugin_key. + """ + return self.deepspeed_plugins[name] + + @deepspeed_required + def select_deepspeed_plugin(self, name: str = None): + """ + Activates the DeepSpeedPlugin with the given `name`, and will disable all other plugins. + """ + for key, plugin in self.deepspeed_plugins.items(): + if key != name: + plugin._unselect() + self.deepspeed_plugins[name].select(_from_accelerator_state=True) + + def print(self, *args, **kwargs): + PartialState().print(*args, **kwargs) + + def __getattr__(self, name: str): + # By this point we know that no attributes of `self` contain `name`, + # so we just modify the error message + if name in self._known_attrs: + raise AttributeError( + f"`AcceleratorState` object has no attribute `{name}`. " + "This happens if `AcceleratorState._reset_state()` was called and " + "an `Accelerator` or `PartialState` was not reinitialized." + ) + # Raise a typical AttributeError + raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'") + + +class GradientState: + """ + Singleton class that has information related to gradient synchronization for gradient accumulation + + **Available attributes:** + + - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader + - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader + - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices + - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over + - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are + being iterated over + - **num_steps** (`int`) -- The number of steps to accumulate over + - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient + accumulation + - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader + iteration and the number of total steps reset + - **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized + as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently, + after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence + is_xla_gradients_synced is always true. + """ + + _shared_state = SharedDict() + + def __init__(self, gradient_accumulation_plugin: GradientAccumulationPlugin | None = None): + self.__dict__ = self._shared_state + if not self.initialized: + self.sync_gradients = True + self._dataloader_references_ref = [None] + self.plugin_kwargs = ( + gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {} + ) + self._is_xla_gradients_synced = False + + # Plugin args are different and can be updated + if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs(): + self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs() + + @property + def num_steps(self) -> int: + "Returns the number of steps to accumulate over" + return self.plugin_kwargs.get("num_steps", 1) + + @property + def adjust_scheduler(self) -> bool: + "Returns whether the scheduler should be adjusted" + return self.plugin_kwargs.get("adjust_scheduler", False) + + @property + def sync_with_dataloader(self) -> bool: + "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset" + return self.plugin_kwargs.get("sync_with_dataloader", True) + + @property + def initialized(self) -> bool: + "Returns whether the `GradientState` has been initialized" + return GradientState._shared_state != {} + + @property + def end_of_dataloader(self) -> bool: + "Returns whether we have reached the end of the current dataloader" + if not self.in_dataloader: + return False + return self.active_dataloader.end_of_dataloader + + @property + def remainder(self) -> int: + "Returns the number of extra samples that were added from padding the dataloader" + if not self.in_dataloader: + return -1 + return self.active_dataloader.remainder + + def __repr__(self): + return ( + f"Sync Gradients: {self.sync_gradients}\n" + f"At end of current dataloader: {self.end_of_dataloader}\n" + f"Extra samples added: {self.remainder}\n" + f"Gradient accumulation plugin: {self.plugin_kwargs}\n" + ) + + @property + def is_xla_gradients_synced(self): + "Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true." + if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False): + return True + return self._is_xla_gradients_synced + + @is_xla_gradients_synced.setter + def is_xla_gradients_synced(self, is_synced): + "Set the _is_xla_gradients_synced attribute." + self._is_xla_gradients_synced = is_synced + + def _set_sync_gradients(self, sync_gradients): + "Private function that sets whether gradients should be synchronized. Users should not have to call this." + self.sync_gradients = sync_gradients + # Allow grad-sync to automatically work on TPUs + if ( + self.sync_gradients + and is_torch_xla_available(check_is_tpu=True) + and PartialState().distributed_type == DistributedType.XLA + ): + xm.mark_step() + + def _add_dataloader(self, dataloader): + "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this." + # We explicitly use assignment to ensure that the property setter is triggered, which is required for garbage collection. + # Avoid using self.dataloader_references.append as it will not trigger the setter. + self.dataloader_references += [dataloader] + + def _remove_dataloader(self, dataloader): + "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this." + # We explicitly use assignment to ensure that the property setter is triggered. + self.dataloader_references = [ + dataloader_ref for dataloader_ref in self.dataloader_references if dataloader_ref != dataloader + ] + + @property + def active_dataloader(self): + return self.dataloader_references[-1] + + @property + def dataloader_references(self): + # We use a property getter and setter with weakrefs to avoid circular references that prevent garbage collection + return [reference() if reference is not None else reference for reference in self._dataloader_references_ref] + + @dataloader_references.setter + def dataloader_references(self, references): + self._dataloader_references_ref = [ + weakref.ref(dataloader) if dataloader is not None else dataloader for dataloader in references + ] + + @property + def in_dataloader(self) -> bool: + "Returns whether the current process is in a dataloader" + return self.active_dataloader is not None + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + GradientState._shared_state.clear() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/__init__.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14f2f4f5a459fef9e9862ab32ce42c36e2ed573e --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .testing import ( + DEFAULT_LAUNCH_COMMAND, + are_the_same_tensors, + assert_exception, + capture_call_output, + device_count, + execute_subprocess_async, + get_launch_command, + get_torch_dist_unique_port, + memory_allocated_func, + path_in_accelerate_package, + pytest_xdist_worker_id, + require_bnb, + require_cpu, + require_cuda, + require_cuda_or_hpu, + require_cuda_or_xpu, + require_fp8, + require_fp16, + require_huggingface_suite, + require_mlu, + require_mps, + require_multi_device, + require_multi_gpu, + require_multi_gpu_or_xpu, + require_multi_xpu, + require_musa, + require_non_cpu, + require_non_hpu, + require_non_torch_xla, + require_non_xpu, + require_npu, + require_pippy, + require_sdaa, + require_single_device, + require_single_gpu, + require_single_xpu, + require_torch_min_version, + require_torchao, + require_torchvision, + require_tpu, + require_transformer_engine, + require_xpu, + run_first, + skip, + slow, + torch_device, +) +from .training import RegressionDataset, RegressionModel, RegressionModel4XPU + + +from .scripts import test_script, test_sync, test_ops # isort: skip diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5964e0ce631ca51f899776aecdeede350c6ab936 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d6e3c7c5f238013b8d6a7315d0d0d851a790dfe Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bdf629a9718f25f10b544499d7a2c125b59c735 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5221245f440ed0e4d166c890ceca0445ebb61869 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/examples.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/examples.py new file mode 100644 index 0000000000000000000000000000000000000000..79d09eedbc263dcab49519bd7e15d08c7cf15ed2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/examples.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each +`examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the +others are used to either get the code that matters, or to preprocess them (such as stripping comments) +""" + +import os + + +def get_function_contents_by_name(lines: list[str], name: str): + """ + Extracts a function from `lines` of segmented source code with the name `name`. + + Args: + lines (`List[str]`): + Source code of a script separated by line. + name (`str`): + The name of the function to extract. Should be either `training_function` or `main` + """ + if name != "training_function" and name != "main": + raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'") + good_lines, found_start = [], False + for line in lines: + if not found_start and f"def {name}" in line: + found_start = True + good_lines.append(line) + continue + if found_start: + if name == "training_function" and "def main" in line: + return good_lines + if name == "main" and "if __name__" in line: + return good_lines + good_lines.append(line) + + +def clean_lines(lines: list[str]): + """ + Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n') + + Args: + lines (`List[str]`): + Source code of a script separated by line. + """ + return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"] + + +def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None): + """ + Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be + used when testing to see if `complete_*_.py` examples have all of the implementations from each of the + `examples/by_feature/*` scripts. + + It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code + is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the + `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter. + + Args: + base_filename (`str` or `os.PathLike`): + The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py` + feature_filename (`str` or `os.PathLike`): + The filepath of a single feature example script. The contents of this script are checked to see if they + exist in `base_filename` + parser_only (`bool`): + Whether to compare only the `main()` sections in both files, or to compare the contents of + `training_loop()` + secondary_filename (`str`, *optional*): + A potential secondary filepath that should be included in the check. This function extracts the base + functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than + `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py` + """ + with open(base_filename) as f: + base_file_contents = f.readlines() + with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f: + full_file_contents = f.readlines() + with open(feature_filename) as f: + feature_file_contents = f.readlines() + if secondary_filename is not None: + with open(secondary_filename) as f: + secondary_file_contents = f.readlines() + + # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content + if parser_only: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main")) + if secondary_filename is not None: + secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main")) + else: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function")) + if secondary_filename is not None: + secondary_file_func = clean_lines( + get_function_contents_by_name(secondary_file_contents, "training_function") + ) + + _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n" + + # Specific code in our script that differs from the full version, aka what is new + new_feature_code = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + it = iter(feature_file_func) + for i in range(len(feature_file_func) - 1): + if i not in passed_idxs: + line = next(it) + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_feature_code.append(line) + passed_idxs.append(i) + else: + # Skip over the `config['num_epochs'] = 2` statement + _ = next(it) + + # Extract out just the new parts from the full_file_training_func + new_full_example_parts = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + for i, line in enumerate(base_file_func): + if i not in passed_idxs: + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_full_example_parts.append(line) + passed_idxs.append(i) + + # Finally, get the overall diff + diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts] + if secondary_filename is not None: + diff_from_two = [line for line in full_file_contents if line not in secondary_file_func] + diff_from_example = [line for line in diff_from_example if line not in diff_from_two] + + return diff_from_example diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f16b2783ec99fae3e1e15085085b751a56be8766 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fddf691455a25056919a47bdeceb548cd1bbe2a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ddp_comm_hook.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ddp_comm_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91f2db5c1b81833ac2976c7d8b591c7426a54391 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ddp_comm_hook.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3abb5d4c610cbe051cb18c0e8b752070212c5bfa Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_merge_weights.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_merge_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..674a7a48a3b3e6a28afb27bef6e4f1373a8e7f53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_merge_weights.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79b41423d2bc1a0c07f5aa807c9b20b3e4a0620a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..568aa21ddda32ac61710e9995efcecb68b35feab Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e666a8d035b7fd102006220497704c1531953c7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d90e59b6a3e75ad622a6246fefd0db300207733 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6efa794b7d7e84ad522aedc95912ac3b0d2490f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b42e1bbdbb4c7bb336721ed28f3b86208af344a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_ds_multiple_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_ds_multiple_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f5747201984c5ae71e7bfa6d580569a5501e34b Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_ds_multiple_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5c897a1d3cb2d9fc6b7316946c67c6e2584ae7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9de2a25e5ef4f017fb9ddb7bc2fe6784f1786664 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a8a7d1b8525f085728f7d470b18ab95d875eb5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e58a78c7ea2ed6401de915c0182e3dda78794ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..488f79a6689fad8575266e8734e8812fa136ab0f Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1553898ec3d55e64822c204ddf7e705069ce8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py @@ -0,0 +1,269 @@ +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os + +import evaluate +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def evaluation_loop(accelerator, model, eval_dataloader, metric): + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + return eval_metric["accuracy"] + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + metric = evaluate.load("glue", "mrpc") + ending_epoch = num_epochs + + if args.partial_train_epoch is not None: + ending_epoch = args.partial_train_epoch + + if args.resume_from_checkpoint: + accelerator.load_state(args.resume_from_checkpoint) + epoch_string = args.resume_from_checkpoint.split("epoch_")[1] + state_epoch_num = "" + for char in epoch_string: + if char.isdigit(): + state_epoch_num += char + else: + break + starting_epoch = int(state_epoch_num) + 1 + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + accelerator.print("resumed checkpoint performance:", accuracy) + accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0]) + accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"]) + with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f: + resumed_state = json.load(f) + assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" + assert resumed_state["lr"] == lr_scheduler.get_lr()[0], ( + "Scheduler learning rate mismatch, loading from checkpoint failed" + ) + assert resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"], ( + "Optimizer learning rate mismatch, loading from checkpoint failed" + ) + assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" + return + + # Now we train the model + state = {} + for epoch in range(starting_epoch, ending_epoch): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + output_dir = f"epoch_{epoch}" + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + state["accuracy"] = accuracy + state["lr"] = lr_scheduler.get_lr()[0] + state["optimizer_lr"] = optimizer.param_groups[0]["lr"] + state["epoch"] = epoch + state["step"] = overall_step + accelerator.print(f"epoch {epoch}:", state) + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f: + json.dump(state, f) + accelerator.end_training() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--partial_train_epoch", + type=int, + default=None, + help="If passed, the training will stop after this number of epochs.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=2, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py new file mode 100644 index 0000000000000000000000000000000000000000..3729ecf4c72190bf865d620b6941206ab904818c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py @@ -0,0 +1,332 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Test script for verifying multiple models can be utilized with Accelerate + DeepSpeed: + +Scenario 1: One model is training, another model is being used for inference/logits to impact training in some form. +Scenario 2: Two models are training simultaneously, which means two optimizers, etc. +""" + +import argparse +from pathlib import Path + +import evaluate +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup + +from accelerate import Accelerator, DeepSpeedPlugin, DistributedType +from accelerate.state import AcceleratorState +from accelerate.utils.deepspeed import get_active_deepspeed_plugin + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +class NoiseModel(torch.nn.Module): + def __init__(self, noise_factor=0.1): + super().__init__() + self.noise_factor = torch.nn.Parameter(torch.tensor(noise_factor, dtype=torch.float32)) + + def forward(self, loss): + return loss * self.noise_factor + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +test_file_path = __file__ +path = Path(test_file_path).resolve() +test_file_dir_str = str(path.parent.parent.parent.parent.parent.parent) + +# Create our DS plugins +# We use custom schedulers and optimizers, hence `model_only` +ds_config_file = dict( + zero2=f"{test_file_dir_str}/tests/deepspeed/ds_config_zero2_model_only.json", + zero3=f"{test_file_dir_str}/tests/deepspeed/ds_config_zero3_model_only.json", +) + + +def single_model_training(config, args): + # Training a single model, we have a `noise` model that is untrainable used to inject some noise into the training process + num_epochs = config["num_epochs"] + zero2_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero2"]) + zero3_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero3"]) + + deepspeed_plugins = {"training": zero2_plugin, "inference": zero3_plugin} + + # Initialize accelerator + accelerator = Accelerator( + deepspeed_plugins=deepspeed_plugins, + mixed_precision="bf16", + ) + + # Initialize model under zero2 plugin + assert get_active_deepspeed_plugin(accelerator.state) is zero2_plugin + train_model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) + train_dataloader, eval_dataloader = get_dataloaders( + accelerator, batch_size=config["batch_size"], model_name=args.model_name_or_path + ) + max_training_steps = len(train_dataloader) * config["num_epochs"] + optimizer = AdamW(train_model.parameters(), lr=config["lr"]) + lr_scheduler = get_linear_schedule_with_warmup( + optimizer, num_warmup_steps=0, num_training_steps=max_training_steps + ) + + train_dataloader, eval_dataloader, train_model, optimizer, lr_scheduler = accelerator.prepare( + train_dataloader, eval_dataloader, train_model, optimizer, lr_scheduler + ) + + # Now prepare the model under zero3 plugin + accelerator.state.select_deepspeed_plugin("inference") + assert get_active_deepspeed_plugin(accelerator.state) is zero3_plugin + inference_model = NoiseModel() + inference_model = accelerator.prepare(inference_model) + inference_model.eval() + + # Run training loop + accelerator.state.select_deepspeed_plugin("training") + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + best_performance = 0 + metric = evaluate.load("glue", "mrpc") + performance_metric = {} + for epoch in range(starting_epoch, num_epochs): + train_model.train() + inference_model.train() + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(train_model): + outputs_1 = train_model(**batch) + with torch.no_grad(): + outputs_2 = inference_model(outputs_1.loss) + # Combine the losses + loss = outputs_1.loss + outputs_2 + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + train_model.eval() + for step, batch in enumerate(eval_dataloader): + with torch.no_grad(): + outputs = train_model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"] + + if best_performance < eval_metric["accuracy"]: + best_performance = eval_metric["accuracy"] + assert best_performance > performance_metric["epoch-0"] + + +def multiple_model_training(config, args): + # This will essentially be like a k-fold model, but one model is Zero-2 and another model is Zero-3 + num_epochs = config["num_epochs"] + zero2_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero2"]) + zero3_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero3"]) + + deepspeed_plugins = {"zero2": zero2_plugin, "zero3": zero3_plugin} + + # Initialize accelerator + zero2_accelerator = Accelerator( + deepspeed_plugins=deepspeed_plugins, + mixed_precision="bf16", + ) + + # Since an `AcceleratorState` has already been made, we can just reuse it here + zero3_accelerator = Accelerator() + + # Initialize model under zero2 plugin + assert get_active_deepspeed_plugin(zero2_accelerator.state) is zero2_plugin + zero2_model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) + train_dataloader, eval_dataloader = get_dataloaders( + zero2_accelerator, batch_size=config["batch_size"], model_name=args.model_name_or_path + ) + max_training_steps = len(train_dataloader) * config["num_epochs"] + zero2_optimizer = AdamW(zero2_model.parameters(), lr=config["lr"]) + zero2_lr_scheduler = get_linear_schedule_with_warmup( + zero2_optimizer, num_warmup_steps=0, num_training_steps=max_training_steps + ) + + train_dataloader, eval_dataloader, zero2_model, zero2_optimizer, zero2_lr_scheduler = zero2_accelerator.prepare( + train_dataloader, eval_dataloader, zero2_model, zero2_optimizer, zero2_lr_scheduler + ) + assert zero2_accelerator.deepspeed_engine_wrapped.engine is zero2_model + + # now do Zero3 + zero3_accelerator.state.select_deepspeed_plugin("zero3") + zero3_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = zero2_plugin.deepspeed_config[ + "train_micro_batch_size_per_gpu" + ] + assert get_active_deepspeed_plugin(zero3_accelerator.state) is zero3_plugin + zero3_model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) + zero3_optimizer = AdamW(zero3_model.parameters(), lr=config["lr"]) + zero3_lr_scheduler = get_linear_schedule_with_warmup( + zero3_optimizer, num_warmup_steps=0, num_training_steps=max_training_steps + ) + zero3_model, zero3_optimizer, zero3_lr_scheduler = zero3_accelerator.prepare( + zero3_model, zero3_optimizer, zero3_lr_scheduler + ) + assert zero3_accelerator.deepspeed_engine_wrapped.engine is zero3_model + + # Run training loop + starting_epoch = 0 + + # Now we train the model + best_performance_a = 0 + best_performance_b = 0 + metric_a = evaluate.load("glue", "mrpc") + metric_b = evaluate.load("glue", "mrpc") + performance_metric_a = {} + performance_metric_b = {} + for epoch in range(starting_epoch, num_epochs): + zero2_model.train() + zero3_model.train() + for step, batch in enumerate(train_dataloader): + with zero2_accelerator.accumulate(zero2_model, zero3_model): + outputs_1 = zero2_model(**batch) + zero2_accelerator.backward(outputs_1.loss) + zero2_optimizer.step() + zero2_lr_scheduler.step() + zero2_optimizer.zero_grad() + outputs_2 = zero3_model(**batch) + zero3_accelerator.backward(outputs_2.loss) + zero3_optimizer.step() + zero3_lr_scheduler.step() + zero3_optimizer.zero_grad() + + zero2_model.eval() + zero3_model.eval() + for step, batch in enumerate(eval_dataloader): + with torch.no_grad(): + logits_a = zero2_model(**batch).logits + logits_b = zero3_model(**batch).logits + # Combine the logits from both models + predictions_a = logits_a.argmax(dim=-1) + predictions_b = logits_b.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions_a, predictions_b, references = zero2_accelerator.gather_for_metrics( + (predictions_a, predictions_b, batch["labels"]) + ) + metric_a.add_batch( + predictions=predictions_a, + references=references, + ) + metric_b.add_batch( + predictions=predictions_b, + references=references, + ) + + eval_metric_a = metric_a.compute() + eval_metric_b = metric_b.compute() + # Use accelerator.print to print only on the main process. + zero2_accelerator.print(f"epoch {epoch}:", eval_metric_a, eval_metric_b) + performance_metric_a[f"epoch-{epoch}"] = eval_metric_a["accuracy"] + performance_metric_b[f"epoch-{epoch}"] = eval_metric_b["accuracy"] + + if best_performance_a < eval_metric_a["accuracy"]: + best_performance_a = eval_metric_a["accuracy"] + if best_performance_b < eval_metric_b["accuracy"]: + best_performance_b = eval_metric_b["accuracy"] + assert best_performance_a > performance_metric_a["epoch-0"] + assert best_performance_b > performance_metric_b["epoch-0"] + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--performance_lower_bound", + type=float, + default=None, + help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=2, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + single_model_training(config, args) + AcceleratorState._reset_state(True) + multiple_model_training(config, args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..d1bfe351509148ebc48067584e9d61b93e7210a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py @@ -0,0 +1,307 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import os +from copy import deepcopy + +import datasets +import evaluate +import torch +import transformers +from datasets import load_dataset +from torch.utils.data import DataLoader, IterableDataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer + +from accelerate import Accelerator, DataLoaderConfiguration, DistributedType +from accelerate.data_loader import DataLoaderDispatcher +from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device +from accelerate.utils import is_torch_xla_available, set_seed + + +os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true" + + +class ListHandler(logging.Handler): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.logs = [] + + def emit(self, record): + self.logs.append(record) + + +def get_basic_setup(accelerator, num_samples=82, batch_size=16): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=num_samples) + dataloader = DataLoader(dset, batch_size=batch_size) + model.to(accelerator.device) + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + return model, ddp_model, dataloader + + +def get_dataloader(accelerator: Accelerator, use_longest=False): + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased") + dataset = load_dataset("glue", "mrpc", split="validation") + + def tokenize_function(examples): + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + with accelerator.main_process_first(): + tokenized_datasets = dataset.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + if use_longest: + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + + return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16) + + +def get_mrpc_setup(dispatch_batches, split_batches): + dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, split_batches=split_batches) + accelerator = Accelerator(dataloader_config=dataloader_config) + dataloader = get_dataloader(accelerator, not dispatch_batches) + model = AutoModelForSequenceClassification.from_pretrained( + "hf-internal-testing/mrpc-bert-base-cased", return_dict=True + ) + ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader) + return { + "ddp": [ddp_model, ddp_dataloader, torch_device], + "no": [model, dataloader, accelerator.device], + }, accelerator + + +def generate_predictions(model, dataloader, accelerator): + logits_and_targets = [] + for batch in dataloader: + input, target = batch.values() + with torch.no_grad(): + logit = model(input) + logit, target = accelerator.gather_for_metrics((logit, target)) + logits_and_targets.append((logit, target)) + logits, targs = [], [] + for logit, targ in logits_and_targets: + logits.append(logit) + targs.append(targ) + logits, targs = torch.cat(logits), torch.cat(targs) + return logits, targs + + +def test_torch_metrics( + accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16 +): + _, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size) + logits, _ = generate_predictions(ddp_model, dataloader, accelerator) + assert len(logits) == num_samples, ( + f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}" + ) + + +def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False): + metric = evaluate.load("glue", "mrpc") + setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches) + # First do baseline + model, dataloader, device = setup["no"] + model.to(device) + model.eval() + for batch in dataloader: + batch.to(device) + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + metric.add_batch(predictions=preds, references=batch["labels"]) + baseline = metric.compute() + + # Then do distributed + model, dataloader, device = setup["ddp"] + model.eval() + for batch in dataloader: + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + references = batch["labels"] + preds, references = accelerator.gather_for_metrics((preds, references)) + metric.add_batch(predictions=preds, references=references) + distributed = metric.compute() + + for key in "accuracy f1".split(): + assert math.isclose(baseline[key], distributed[key]), ( + f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" + ) + + +def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset(): + class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __iter__(self): + yield from self.data + + iterable_dataset = DummyIterableDataset([n for n in range(30)]) + dataloader = DataLoader(iterable_dataset, batch_size=4) + accelerator = Accelerator() + prepared_dataloader = accelerator.prepare(dataloader) + + if accelerator.is_main_process: + logger = logging.root.manager.loggerDict["accelerate.accelerator"] + list_handler = ListHandler() + logger.addHandler(list_handler) + + batches_for_metrics = [] + for batch in prepared_dataloader: + batches_for_metrics.append(accelerator.gather_for_metrics(batch)) + + assert torch.cat(batches_for_metrics).size(0) == 30 + + if accelerator.is_main_process: + assert len(list_handler.logs) == 0 + logger.removeHandler(list_handler) + + +def test_gather_for_metrics_with_iterable_dataset(): + class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __iter__(self): + yield from self.data + + iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30))) + dataloader = DataLoader(iterable_dataset, batch_size=4) + + accelerator = Accelerator() + prepared_dataloader = accelerator.prepare(dataloader) + + assert isinstance(prepared_dataloader, DataLoaderDispatcher) + + if accelerator.is_main_process: + logger = logging.root.manager.loggerDict["accelerate.accelerator"] + list_handler = ListHandler() + logger.addHandler(list_handler) + + batches_for_metrics = [] + for batch in prepared_dataloader: + batches_for_metrics.append(accelerator.gather_for_metrics(batch)) + + assert torch.cat(batches_for_metrics).size(0) == 30 + + if accelerator.is_main_process: + assert len(list_handler.logs) == 0 + + logger.removeHandler(list_handler) + + +def test_gather_for_metrics_drop_last(): + accelerator = Accelerator() + per_device_batch_size = 5 + num_items = (10 * accelerator.num_processes) + 1 + dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True) + dataloader = accelerator.prepare(dataloader) + + iterator = iter(dataloader) + next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0') + batch = next(iterator) + gathered_items = accelerator.gather_for_metrics(batch) + + # Should return a full set of complete batches from each GPU + num_expected_items = per_device_batch_size * accelerator.num_processes + assert gathered_items.size(0) == (num_expected_items), ( + f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}" + ) + + +def main(): + dataloader_config = DataLoaderConfiguration(split_batches=False, dispatch_batches=False) + accelerator = Accelerator(dataloader_config=dataloader_config) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + # TorchXLA does not support batch dispatching. 'put_on_device' is always False for + # TorchXLA, which can cause a value error in 'prepare_data_loader' function. + dispatch_batches_options = [False] if accelerator.state.distributed_type == DistributedType.XLA else [True, False] + + # Temporarily close this test for TorchXLA due to the 'Cannot set version_counter for + # inference tensor' error in inference mode. Reopen it after TorchXLA fixes this bug. + # These are a bit slower so they should only be ran on the GPU or TPU + if accelerator.device.type != "cpu" and not is_torch_xla_available(): + if accelerator.is_local_main_process: + print("**Testing gather_for_metrics**") + for split_batches in [True, False]: + for dispatch_batches in dispatch_batches_options: + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`") + test_mrpc(dispatch_batches, split_batches) + accelerator.state._reset_state() + print("test_gather_for_metrics_with_iterable_dataset") + test_gather_for_metrics_with_iterable_dataset() + print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset") + test_gather_for_metrics_with_non_tensor_objects_iterable_dataset() + + # MpDeviceLoader in TorchXLA is an asynchronous loader that preloads several batches into cache. + # This can cause the 'end_of_dataloader' of DataLoaderStateMixin to be set earlier than intended. + # Skip this test when TorchXLA is enabled. + if accelerator.state.distributed_type != DistributedType.XLA: + if accelerator.is_local_main_process: + print("**Test torch metrics**") + for split_batches in [True, False]: + for dispatch_batches in dispatch_batches_options: + dataloader_config = DataLoaderConfiguration( + split_batches=split_batches, dispatch_batches=dispatch_batches + ) + accelerator = Accelerator(dataloader_config=dataloader_config) + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99") + test_torch_metrics(accelerator, 99) + accelerator.state._reset_state() + if accelerator.is_local_main_process: + print("**Test last batch is not dropped when perfectly divisible**") + accelerator = Accelerator() + test_torch_metrics(accelerator, 512) + accelerator.state._reset_state() + if accelerator.is_local_main_process: + print("**Test that `drop_last` is taken into account**") + test_gather_for_metrics_drop_last() + accelerator.end_training() + accelerator.state._reset_state() + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..723e5497656020516bf072cf4112f61f59c2e5cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py @@ -0,0 +1,314 @@ +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import json +import os + +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils import ( + is_hpu_available, + is_mlu_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_xpu_available, +) +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +# Converting Bytes to Megabytes +def b2mb(x): + return int(x / 2**20) + + +# This context manager is used to track the peak memory usage of the process +class TorchTracemalloc: + def __enter__(self): + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.cuda.memory_allocated() + elif is_mlu_available(): + torch.mlu.empty_cache() + torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.mlu.memory_allocated() + elif is_sdaa_available(): + torch.sdaa.empty_cache() + torch.sdaa.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.sdaa.memory_allocated() + elif is_musa_available(): + torch.musa.empty_cache() + torch.musa.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.musa.memory_allocated() + elif is_npu_available(): + torch.npu.empty_cache() + torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.npu.memory_allocated() + elif is_xpu_available(): + torch.xpu.empty_cache() + torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.xpu.memory_allocated() + elif is_hpu_available(): + # torch.hpu.empty_cache() # not available on hpu as it reserves all device memory for the current process + torch.hpu.reset_peak_memory_stats() # reset the peak gauge to zero + self.begin = torch.hpu.memory_allocated() + return self + + def __exit__(self, *exc): + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + self.end = torch.cuda.memory_allocated() + self.peak = torch.cuda.max_memory_allocated() + elif is_mlu_available(): + torch.mlu.empty_cache() + self.end = torch.mlu.memory_allocated() + self.begin = torch.mlu.max_memory_allocated() + elif is_sdaa_available(): + torch.sdaa.empty_cache() + self.end = torch.sdaa.memory_allocated() + self.begin = torch.sdaa.max_memory_allocated() + elif is_musa_available(): + torch.musa.empty_cache() + self.end = torch.musa.memory_allocated() + self.begin = torch.musa.max_memory_allocated() + elif is_npu_available(): + torch.npu.empty_cache() + self.end = torch.npu.memory_allocated() + self.peak = torch.npu.max_memory_allocated() + elif is_xpu_available(): + torch.xpu.empty_cache() + self.end = torch.xpu.memory_allocated() + self.peak = torch.xpu.max_memory_allocated() + elif is_hpu_available(): + # torch.hpu.empty_cache() # not available on hpu as it reserves all device memory for the current process + self.end = torch.hpu.memory_allocated() + self.peak = torch.hpu.max_memory_allocated() + self.used = b2mb(self.end - self.begin) + self.peaked = b2mb(self.peak - self.begin) + # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") + + +def get_dataloaders( + accelerator: Accelerator, + batch_size: int = 16, + model_name: str = "bert-base-cased", + n_train: int = 320, + n_val: int = 160, +): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + The name of the model to use. + n_train (`int`, *optional*): + The number of training examples to use. + n_val (`int`, *optional*): + The number of validation examples to use. + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset( + "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} + ) + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + train_total_peak_memory = {} + for epoch in range(starting_epoch, num_epochs): + with TorchTracemalloc() as tracemalloc: + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + + # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage + accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}") + accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}") + accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") + accelerator.print( + f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" + ) + train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin) + if args.peak_memory_upper_bound is not None: + assert train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound, ( + "Peak memory usage exceeded the upper bound" + ) + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f: + json.dump(train_total_peak_memory, f) + accelerator.end_training() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--peak_memory_upper_bound", + type=float, + default=None, + help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", + ) + parser.add_argument( + "--n_train", + type=int, + default=320, + help="Number of training examples to use.", + ) + parser.add_argument( + "--n_val", + type=int, + default=160, + help="Number of validation examples to use.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=1, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..8e500bdd4c01013904375f010e99d22aea4e4ff9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py @@ -0,0 +1,299 @@ +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os +from contextlib import nullcontext +from pathlib import Path + +import evaluate +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup + +from accelerate import Accelerator, DistributedType +from accelerate.parallelism_config import ParallelismConfig +from accelerate.utils import SAFE_WEIGHTS_NAME, set_seed +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + accelerator_kwargs = {} + # need this for DeepSpeed tests as `args.tp_size` would be None and `torch.distributed.init_device_mesh` would fail + if args.tp_size is not None: + accelerator_kwargs["parallelism_config"] = ParallelismConfig(tp_size=args.tp_size) + + # Initialize accelerator + accelerator = Accelerator(**accelerator_kwargs) + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Add TP related kwargs if provided + model_kwargs = {} + if args.tp_plan is not None: + model_kwargs["tp_plan"] = args.tp_plan + if args.tp_size is not None: + model_kwargs["tp_size"] = args.tp_size + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True, **model_kwargs) + + if args.add_pad_token: + if model.config.pad_token_id is None: + model.config.pad_token_id = 0 + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + max_training_steps = len(train_dataloader) * num_epochs + + # Instantiate scheduler + linear_decay_scheduler = False + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + linear_decay_scheduler = True + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + metric = evaluate.load("glue", "mrpc") + best_performance = 0 + performance_metric = {} + expected_lr_after_first_optim_step = lr * ( + 1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps) + ) + lr_scheduler_check_completed = False + for epoch in range(starting_epoch, num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + accelerator.backward(loss) + context = nullcontext + if args.tp_plan is not None: + from torch.distributed._tensor.experimental import implicit_replication + + context = implicit_replication + with context(): + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # assert the learning rate after first optimizer step + if ( + accelerator.sync_gradients + and not lr_scheduler_check_completed + and linear_decay_scheduler + and accelerator.state.mixed_precision == "no" + ): + assert lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step, ( + f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}" + ) + lr_scheduler_check_completed = True + + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"] + + if best_performance < eval_metric["accuracy"]: + best_performance = eval_metric["accuracy"] + + # check that the LR is 0 + if linear_decay_scheduler and accelerator.state.mixed_precision == "no": + assert lr_scheduler.get_last_lr()[0] == 0, ( + f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}" + ) + + if args.performance_lower_bound is not None: + assert args.performance_lower_bound <= best_performance, ( + f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" + ) + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: + json.dump(performance_metric, f) + + # TODO: skip saving of the model test for TP until the feature lands + if args.tp_plan is None: + # Finally try saving the model + accelerator.save_model(model, args.output_dir) + accelerator.wait_for_everyone() + if args.tp_plan is None: + assert Path(args.output_dir, SAFE_WEIGHTS_NAME).exists(), ( + "Model was not saved when calling `Accelerator.save_model`" + ) + accelerator.end_training() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--performance_lower_bound", + type=float, + default=None, + help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=3, + help="Number of train epochs.", + ) + parser.add_argument( + "--add_pad_token", + type=bool, + default=False, + help="To add pad token if not exists.", + ) + parser.add_argument( + "--tp_plan", + type=str, + default=None, + help="pass 'auto' to use TP", + ) + parser.add_argument( + "--tp_size", + type=int, + default=None, + help="TP size to be used to shard the model", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py new file mode 100644 index 0000000000000000000000000000000000000000..1dbd86c46b4a0c12df8ea4d736c7cd1e03f81813 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py @@ -0,0 +1,117 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from transformers import ( + BertConfig, + BertForMaskedLM, + GPT2Config, + GPT2ForSequenceClassification, +) + +from accelerate import PartialState +from accelerate.inference import prepare_pippy +from accelerate.test_utils import torch_device +from accelerate.utils import DistributedType, set_seed + + +model_to_config = { + "bert": (BertForMaskedLM, BertConfig, 512), + "gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024), +} + + +def get_model_and_data_for_text(model_name, device, num_processes: int = 2): + initializer, config, seq_len = model_to_config[model_name] + config_args = {} + # Eventually needed for batch inference tests on gpt-2 when bs != 1 + # if model_name == "gpt2": + # config_args["pad_token_id"] = 0 + model_config = config(**config_args) + model = initializer(model_config) + kwargs = dict(low=0, high=model_config.vocab_size, device=device, dtype=torch.int64, requires_grad=False) + trace_input = torch.randint(size=(1, seq_len), **kwargs) + inference_inputs = torch.randint(size=(num_processes, seq_len), **kwargs) + return model, trace_input, inference_inputs + + +def test_bert(batch_size: int = 2): + set_seed(42) + state = PartialState() + model, trace_input, inference_inputs = get_model_and_data_for_text("bert", "cpu", batch_size) + model = prepare_pippy(model, example_args=(trace_input,), no_split_module_classes=model._no_split_modules) + # For inference args need to be a tuple + inputs = inference_inputs.to(torch_device) + with torch.no_grad(): + output = model(inputs) + # Zach: Check that we just grab the real outputs we need at the end + if not state.is_last_process: + assert output is None, "Output was not generated on just the last process!" + else: + assert output is not None, "Output was not generated in the last process!" + + +def test_gpt2(batch_size: int = 2): + set_seed(42) + state = PartialState() + model, trace_input, inference_inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size) + model = prepare_pippy(model, example_args=(trace_input,), no_split_module_classes=model._no_split_modules) + # For inference args need to be a tuple + inputs = inference_inputs.to(torch_device) + with torch.no_grad(): + output = model(inputs) + # Zach: Check that we just grab the real outputs we need at the end + if not state.is_last_process: + assert output is None, "Output was not generated on just the last process!" + else: + assert output is not None, "Output was not generated in the last process!" + + +# Currently disabled, enable again once PyTorch pippy interface can trace a resnet34 +# def test_resnet(batch_size: int = 2): +# set_seed(42) +# state = PartialState() +# model = resnet34() +# input_tensor = torch.rand(1, 3, 224, 224) +# model = prepare_pippy( +# model, +# example_args=(input_tensor,), +# ) +# inference_inputs = torch.rand(batch_size, 3, 224, 224) +# inputs = send_to_device(inference_inputs, torch_device) +# with torch.no_grad(): +# output = model(inputs) +# # Zach: Check that we just grab the real outputs we need at the end +# if not state.is_last_process: +# assert output is None, "Output was not generated on just the last process!" +# else: +# assert output is not None, "Output was not generated in the last process!" + + +if __name__ == "__main__": + state = PartialState() + state.print("Testing pippy integration...") + try: + if state.distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_HPU]: + state.print("Testing GPT2...") + test_gpt2() + # Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue + # due to references + # NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope + # test_gpt2(3) + state.print("Testing BERT...") + test_bert() + else: + print("Less than two GPUs found, not running tests!") + finally: + state.destroy_process_group() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..f5352b19980288115d2442229620105b8440d03d --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py @@ -0,0 +1,59 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch.distributed + +from accelerate.test_utils import require_huggingface_suite, torch_device +from accelerate.utils import is_transformers_available + + +if is_transformers_available(): + from transformers import AutoModel, TrainingArguments + + +GPT2_TINY = "sshleifer/tiny-gpt2" + + +@require_huggingface_suite +def init_torch_dist_then_launch_deepspeed(): + if torch_device == "xpu": + backend = "ccl" + elif torch_device == "hpu": + backend = "hccl" + else: + backend = "nccl" + + torch.distributed.init_process_group(backend=backend) + deepspeed_config = { + "zero_optimization": { + "stage": 3, + }, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + } + train_args = TrainingArguments( + output_dir="./", + deepspeed=deepspeed_config, + ) + model = AutoModel.from_pretrained(GPT2_TINY) + assert train_args is not None + assert model is not None + + +def main(): + init_torch_dist_then_launch_deepspeed() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..fc9dd1d36e8f2949d6e3cebb8ce65efbf0b9e5e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py @@ -0,0 +1,32 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from accelerate.utils import is_xpu_available + + +def main(): + accelerator_type = "GPU" + num_accelerators = 0 + if torch.cuda.is_available(): + num_accelerators = torch.cuda.device_count() + accelerator_type = "GPU" + elif is_xpu_available(): + num_accelerators = torch.xpu.device_count() + accelerator_type = "XPU" + print(f"Successfully ran on {num_accelerators} {accelerator_type}s") + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ddp_comm_hook.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ddp_comm_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..0db5844e026d1c035670e518a8f81d33136ea665 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ddp_comm_hook.py @@ -0,0 +1,85 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs, PartialState +from accelerate.utils import is_hpu_available + + +class MockModel(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.p = torch.nn.Parameter(torch.randn(40, 20)) + + def forward(self, x, rank): + return self.p * (x ** (1 + rank)) + + +def _run_and_get_grads(model, rank): + torch.manual_seed(2024) + input = torch.randn(40, 20) + output = model(input, rank) + output.mean().backward() + param = next(model.parameters()) + return param.grad + + +def test_ddp_comm_hook(comm_hook, comm_wrapper, comm_state_option): + ddp_kwargs = DistributedDataParallelKwargs( + comm_hook=comm_hook, + comm_wrapper=comm_wrapper, + comm_state_option=comm_state_option, + ) + accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) + + model = accelerator.prepare(MockModel()) + hook_grads = _run_and_get_grads(model, accelerator.local_process_index) + + reference_model = torch.nn.parallel.DistributedDataParallel( + MockModel().to(accelerator.device), + device_ids=[accelerator.local_process_index], + output_device=accelerator.local_process_index, + ) + reference_grads = _run_and_get_grads(reference_model, accelerator.local_process_index) + + torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-2, atol=1e-2) + + +def main(): + for comm_hook, comm_wrapper, comm_state_option in [ + (DDPCommunicationHookType.NO, DDPCommunicationHookType.NO, {}), + (DDPCommunicationHookType.FP16, DDPCommunicationHookType.NO, {}), + (DDPCommunicationHookType.BF16, DDPCommunicationHookType.NO, {}), + (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.NO, {}), + (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.FP16, {}), + (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.BF16, {}), + (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.NO, {"matrix_approximation_rank": 2}), + (DDPCommunicationHookType.BATCHED_POWER_SGD, DDPCommunicationHookType.NO, {}), + (DDPCommunicationHookType.BATCHED_POWER_SGD, DDPCommunicationHookType.FP16, {}), + (DDPCommunicationHookType.BATCHED_POWER_SGD, DDPCommunicationHookType.BF16, {}), + ]: + if is_hpu_available(): + HPU_UNSUPPORTED_COMM_HOOKS = {DDPCommunicationHookType.FP16, DDPCommunicationHookType.BF16} + if comm_hook in HPU_UNSUPPORTED_COMM_HOOKS or comm_wrapper in HPU_UNSUPPORTED_COMM_HOOKS: + print(f"Skipping test DDP comm hook: {comm_hook}, comm wrapper: {comm_wrapper} on HPU") + continue + + print(f"Test DDP comm hook: {comm_hook}, comm wrapper: {comm_wrapper}") + test_ddp_comm_hook(comm_hook, comm_wrapper, comm_state_option) + PartialState().destroy_process_group() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..08cbbeb844bcc3189c99d328580c251aeafc2052 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py @@ -0,0 +1,410 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pickle +import tempfile +import warnings +from unittest.mock import Mock + +import torch +from torch.utils.data import ( + BatchSampler, + DataLoader, + Dataset, + IterableDataset, + RandomSampler, + TensorDataset, + default_collate, +) + +from accelerate.accelerator import Accelerator, DataLoaderConfiguration +from accelerate.utils.dataclasses import DistributedType + + +NUM_ELEMENTS = 22 +NUM_WORKERS = 4 +BATCH_SIZE = 4 + + +class DummyDataset(Dataset): + def __len__(self): + return NUM_ELEMENTS + + def __getitem__(self, index): + squeeze = False + + if isinstance(index, int): + index = [index] + squeeze = True + elif isinstance(index, slice): + index = list(range(*index.indices(self.size))) + else: + index = list(index) + + batch = [{"index": i, "label": i % 2, "random_augmentation": torch.rand(1).item()} for i in index] + + if squeeze: + batch = batch[0] + + return batch + + +class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __iter__(self): + yield from self.data + + +def create_accelerator(even_batches=True): + dataloader_config = DataLoaderConfiguration(even_batches=even_batches) + accelerator = Accelerator(dataloader_config=dataloader_config) + assert accelerator.num_processes == 2, "this script expects that two GPUs are available" + return accelerator + + +def create_dataloader( + accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False, shuffle: bool = False +): + """ + Create a simple DataLoader to use during the test cases + """ + values = torch.as_tensor(range(dataset_size)) + if shuffle: + values = values[torch.randperm(values.size(0))] + if iterable: + dataset = DummyIterableDataset(values) + else: + dataset = TensorDataset(torch.as_tensor(range(dataset_size))) + + dl = DataLoader(dataset, batch_size=batch_size) + dl = accelerator.prepare(dl) + + return dl + + +def verify_dataloader_batch_sizes( + accelerator: Accelerator, + dataset_size: int, + batch_size: int, + process_0_expected_batch_sizes: list[int], + process_1_expected_batch_sizes: list[int], +): + """ + A helper function for verifying the batch sizes coming from a prepared dataloader in each process + """ + dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size) + + batch_sizes = [len(batch[0]) for batch in dl] + + if accelerator.process_index == 0: + assert batch_sizes == process_0_expected_batch_sizes + elif accelerator.process_index == 1: + assert batch_sizes == process_1_expected_batch_sizes + + +def test_default_ensures_even_batch_sizes(): + accelerator = create_accelerator() + + # without padding, we would expect a different number of batches + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1, 1], + ) + + # without padding, we would expect the same number of batches, but different sizes + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 2], + ) + + +def test_can_disable_even_batches(): + accelerator = create_accelerator(even_batches=False) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1], + ) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 1], + ) + + +def test_can_join_uneven_inputs(): + accelerator = create_accelerator(even_batches=False) + + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + + dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + batch_idxs = [] + with accelerator.join_uneven_inputs([ddp_model]): + for batch_idx, batch in enumerate(dl): + output = ddp_model(batch[0].float()) + loss = output.sum() + loss.backward() + batch_idxs.append(batch_idx) + + accelerator.wait_for_everyone() + + if accelerator.process_index == 0: + assert batch_idxs == [0, 1] + elif accelerator.process_index == 1: + assert batch_idxs == [0] + + +def test_join_raises_warning_for_non_ddp_distributed(accelerator): + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([Mock()]): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for multi-GPU" in str(w[-1].message) + + +def test_join_can_override_even_batches(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + train_dl_overridden_value = train_dl.batch_sampler.even_batches + valid_dl_overridden_value = valid_dl.batch_sampler.even_batches + + assert train_dl_overridden_value == overridden_even_batches + assert valid_dl_overridden_value == overridden_even_batches + assert train_dl.batch_sampler.even_batches == default_even_batches + assert valid_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_can_override_for_mixed_type_dataloaders(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + batch_dl_overridden_value = batch_dl.batch_sampler.even_batches + except AttributeError: + # ensure attribute error is not raised when processing iterable dl + raise AssertionError + + assert batch_dl_overridden_value == overridden_even_batches + assert batch_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_raises_warning_for_iterable_when_overriding_even_batches(): + accelerator = create_accelerator() + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for map-style datasets" in str(w[-1].message) + + +def test_pickle_accelerator(): + accelerator = create_accelerator() + data_loader = create_dataloader(accelerator, dataset_size=32, batch_size=4) + _ = accelerator.prepare(data_loader) + pickled_accelerator = pickle.dumps(accelerator) + unpickled_accelerator = pickle.loads(pickled_accelerator) + # TODO: Maybe this should be implemented as __eq__ for AcceleratorState? + assert accelerator.state.__dict__ == unpickled_accelerator.state.__dict__ + + +def test_data_loader(data_loader, accelerator): + # Prepare the DataLoader + data_loader = accelerator.prepare(data_loader) + + all_examples = [] + for i, batch in enumerate(data_loader): + index, _ = accelerator.gather_for_metrics((batch["index"], batch["label"])) + all_examples.extend(index.detach().cpu().numpy().tolist()) + + # Sort the examples + sorted_all_examples = sorted(all_examples) + + # Check if all elements are present in the sorted list of iterated samples + assert len(set(sorted_all_examples)) == NUM_ELEMENTS, ( + "Not all the dataset elements have been iterated in an epoch due to duplication of samples across processes." + ) + + +def test_stateful_dataloader(accelerator): + """ + Tests that a stateful dataloader can be iterated over, saved after a few batches using `load_state_dict`, and then + resumed from the saved state. + + The result should be the same as the rest of the data that iterated over after saving. + """ + old_dataloader_config = accelerator.dataloader_config + try: + accelerator.dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=True) + prepared_dl = create_dataloader( + accelerator, dataset_size=32 * accelerator.num_processes, batch_size=4, iterable=True, shuffle=True + ) + untrained_batches = [] + # Calculate what step that will be + total_batches = 32 * accelerator.num_processes // (4 * accelerator.num_processes) + last_batch_num = total_batches - 1 + for step, batch in enumerate(prepared_dl): + # Step just before + if step == last_batch_num - 1: + state_dict = prepared_dl.state_dict() + if step >= last_batch_num: + # Otherwise grab the "unseen" batches + untrained_batches.append(batch) + not_skipped_batches = accelerator.gather(untrained_batches) + prepared_dl.load_state_dict(state_dict) + resumed_batches = [] + for batch in prepared_dl: + resumed_batches.append(batch) + resumed_batches = accelerator.gather(resumed_batches) + for b1, b2 in zip(not_skipped_batches, resumed_batches): + for v1, v2 in zip(b1, b2): + assert torch.equal(v1, v2), f"Batch {b1} and {b2} are not equal" + finally: + accelerator.dataloader_config = old_dataloader_config + + +def test_stateful_dataloader_save_state(accelerator): + """ + Tests that a stateful dataloader can be iterated over, saved after a few batches using `Accelerator.save_state`, + and then resumed from the saved state. + + The result should be the same as the rest of the data that iterated over after saving. + """ + old_dataloader_config = accelerator.dataloader_config + try: + with tempfile.TemporaryDirectory() as tmpdir: + accelerator.dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=True) + prepared_dl = create_dataloader( + accelerator, dataset_size=32 * accelerator.num_processes, batch_size=4, iterable=True, shuffle=True + ) + untrained_batches = [] + # Calculate what step that will be + total_batches = 32 * accelerator.num_processes // (4 * accelerator.num_processes) + last_batch_num = total_batches - 1 + for step, batch in enumerate(prepared_dl): + # Step just before + if step == last_batch_num - 1: + accelerator.save_state(tmpdir) + if step >= last_batch_num: + # Otherwise grab the "unseen" batches + untrained_batches.append(batch) + not_skipped_batches = accelerator.gather(untrained_batches) + accelerator.load_state(tmpdir) + resumed_batches = [] + for batch in prepared_dl: + resumed_batches.append(batch) + resumed_batches = accelerator.gather(resumed_batches) + for b1, b2 in zip(not_skipped_batches, resumed_batches): + for v1, v2 in zip(b1, b2): + assert torch.equal(v1, v2), f"Batch {b1} and {b2} are not equal" + finally: + accelerator.dataloader_config = old_dataloader_config + + +def main(): + accelerator = create_accelerator() + torch.manual_seed(accelerator.process_index) + + accelerator.print("Test that even_batches variable ensures uniform batches across processes") + test_default_ensures_even_batch_sizes() + + accelerator.print("Run tests with even_batches disabled") + test_can_disable_even_batches() + + accelerator.print("Test joining uneven inputs") + test_can_join_uneven_inputs() + + accelerator.print("Test overriding even_batches when joining uneven inputs") + test_join_can_override_even_batches() + + accelerator.print("Test overriding even_batches for mixed dataloader types") + test_join_can_override_for_mixed_type_dataloaders() + + accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders") + test_join_raises_warning_for_iterable_when_overriding_even_batches() + + accelerator.print("Test join with non DDP distributed raises warning") + original_state = accelerator.state.distributed_type + accelerator.state.distributed_type = DistributedType.FSDP + test_join_raises_warning_for_non_ddp_distributed(accelerator) + accelerator.state.distributed_type = original_state + + accelerator.print("Test pickling an accelerator") + test_pickle_accelerator() + + dataset = DummyDataset() + + accelerator.print("Test DataLoader with shuffle=False") + loader = DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + + accelerator.print("Test DataLoader with shuffle=True") + loader = DataLoader(dataset, shuffle=True, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + + accelerator.print("Test DataLoader with batch_sampler") + sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) + loader = DataLoader(dataset, batch_sampler=sampler, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + + accelerator.print("Test DataLoader with sampler as an instance of `BatchSampler`") + sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) + loader = DataLoader(dataset, sampler=sampler, batch_size=None, collate_fn=default_collate, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + test_stateful_dataloader(accelerator) + test_stateful_dataloader_save_state(accelerator) + + accelerator.end_training() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_merge_weights.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_merge_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..f280c8fa17919367001389a8efcd78faaa041f0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_merge_weights.py @@ -0,0 +1,158 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import logging +import shutil +from pathlib import Path + +import torch +from safetensors.torch import load_file +from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy, StateDictType +from torch.utils.data import DataLoader + +from accelerate import Accelerator, FullyShardedDataParallelPlugin +from accelerate.commands.merge import merge_command, merge_command_parser +from accelerate.state import AcceleratorState +from accelerate.test_utils import torch_device +from accelerate.test_utils.training import RegressionDataset +from accelerate.utils import merge_fsdp_weights, patch_environment, save_fsdp_model + + +logging.basicConfig(level=logging.INFO) + +parser = merge_command_parser() + + +class TinyModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = torch.nn.Linear(16, 16) + self.activation = torch.nn.ReLU() + self.linear2 = torch.nn.Linear(16, 16) + self.softmax = torch.nn.Softmax() + + def forward(self, x): + return self.linear2(self.activation(self.linear1(x))) + + +def setup(): + if AcceleratorState._shared_state != {}: + AcceleratorState()._reset_state() + plugin = FullyShardedDataParallelPlugin( + sharding_strategy=ShardingStrategy.FULL_SHARD, state_dict_type=StateDictType.SHARDED_STATE_DICT + ) + model = TinyModel() + with patch_environment(fsdp_auto_wrap_policy="SIZE_BASED_WRAP"): + plugin.set_auto_wrap_policy(model) + accelerator = Accelerator(fsdp_plugin=plugin) + model = accelerator.prepare(model) + return model, plugin, accelerator + + +def mock_training(accelerator, model): + train_set = RegressionDataset(length=128, seed=42) + train_dl = DataLoader(train_set, batch_size=16, shuffle=False) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + return model + + +def check_weights(operation, state_1, state_2): + for weight_1, weight_2 in zip(state_1.values(), state_2.values()): + if operation == "same": + assert torch.allclose(weight_1, weight_2) + else: + assert not torch.allclose(weight_1, weight_2) + + +def check_safetensors_weights(path, model): + safe_state_dict = load_file(path / "model.safetensors") + safe_loaded_model = TinyModel().to(torch_device) + check_weights("diff", model.state_dict(), safe_loaded_model.state_dict()) + safe_loaded_model.load_state_dict(safe_state_dict) + check_weights("same", model.state_dict(), safe_loaded_model.state_dict()) + + +def check_pytorch_weights(path, model): + nonsafe_state_dict = torch.load(path / "pytorch_model.bin", weights_only=True) + nonsafe_loaded_model = TinyModel().to(torch_device) + check_weights("diff", model.state_dict(), nonsafe_loaded_model.state_dict()) + nonsafe_loaded_model.load_state_dict(nonsafe_state_dict) + check_weights("same", model.state_dict(), nonsafe_loaded_model.state_dict()) + + +def test_merge_weights_safetensors(model, path): + # Should now be saved at `path/merged.safetensors` + merge_fsdp_weights(path / "pytorch_model_fsdp_0", path, safe_serialization=True) + check_safetensors_weights(path, model) + + +def test_merge_weights_command_safetensors(model, path): + args = parser.parse_args([str(path / "pytorch_model_fsdp_0"), str(path)]) + merge_command(args) + check_safetensors_weights(path, model) + + +def test_merge_weights_pytorch(model, path): + # Should now be saved at `path/merged.bin` + merge_fsdp_weights(path / "pytorch_model_fsdp_0", path, safe_serialization=False) + check_pytorch_weights(path, model) + + +def test_merge_weights_command_pytorch(model, path): + args = parser.parse_args([str(path / "pytorch_model_fsdp_0"), str(path), "--unsafe_serialization"]) + merge_command(args) + check_pytorch_weights(path, model) + + +if __name__ == "__main__": + # Note this test requires at least two accelerators! + model, plugin, accelerator = setup() + if accelerator.num_processes > 1: + try: + # Initial setup for things + out_path = Path("test_merge_weights_fsdp_weights") + if not out_path.exists(): + out_path.mkdir(parents=True, exist_ok=True) + + # Train briefly once weights aren't the baseline + model = mock_training(accelerator, model) + accelerator.wait_for_everyone() + + gc.collect() # Needed for some lingering refs after training + save_fsdp_model(plugin, accelerator, model, out_path) + accelerator.wait_for_everyone() + + # Finally we can test + test_merge_weights_safetensors(model, out_path) + test_merge_weights_command_safetensors(model, out_path) + test_merge_weights_pytorch(model, out_path) + test_merge_weights_command_pytorch(model, out_path) + except Exception: + raise + finally: + # Cleanup in case of any failures + if accelerator.is_main_process: + shutil.rmtree(out_path) + accelerator.wait_for_everyone() + accelerator.end_training() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py new file mode 100644 index 0000000000000000000000000000000000000000..267c11b50b22250e781f94e3643b8895cc6aeb02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py @@ -0,0 +1,118 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Test file to ensure that in general certain situational setups for notebooks work. +""" + +import os +import time +from multiprocessing import Queue + +from pytest import mark, raises +from torch.distributed.elastic.multiprocessing.errors import ChildFailedError + +from accelerate import PartialState, notebook_launcher +from accelerate.test_utils import require_bnb +from accelerate.utils import is_bnb_available + + +def basic_function(): + # Just prints the PartialState + print(f"PartialState:\n{PartialState()}") + + +def tough_nut_function(queue: Queue): + if queue.empty(): + return + trial = queue.get() + if trial > 0: + queue.put(trial - 1) + raise RuntimeError("The nut hasn't cracked yet! Try again.") + + print(f"PartialState:\n{PartialState()}") + + +def bipolar_sleep_function(sleep_sec: int): + state = PartialState() + if state.process_index % 2 == 0: + raise RuntimeError("I'm an even process. I don't like to sleep.") + else: + time.sleep(sleep_sec) + + +NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1)) + + +def test_can_initialize(): + notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) + + +@mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test static rendezvous backends") +def test_static_rdzv_backend(): + notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES, rdzv_backend="static") + + +@mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test c10d rendezvous backends") +def test_c10d_rdzv_backend(): + notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES, rdzv_backend="c10d") + + +@mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test fault tolerance") +def test_fault_tolerant(max_restarts: int = 3): + queue = Queue() + queue.put(max_restarts) + notebook_launcher(tough_nut_function, (queue,), num_processes=NUM_PROCESSES, max_restarts=max_restarts) + + +@mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test monitoring") +def test_monitoring(monitor_interval: float = 0.01, sleep_sec: int = 100): + start_time = time.time() + with raises(ChildFailedError, match="I'm an even process. I don't like to sleep."): + notebook_launcher( + bipolar_sleep_function, + (sleep_sec,), + num_processes=NUM_PROCESSES, + monitor_interval=monitor_interval, + ) + assert time.time() - start_time < sleep_sec, "Monitoring did not stop the process in time." + + +@require_bnb +def test_problematic_imports(): + with raises(RuntimeError, match="Please keep these imports"): + import bitsandbytes as bnb # noqa: F401 + + notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) + + +def main(): + print("Test basic notebook can be ran") + test_can_initialize() + print("Test static rendezvous backend") + test_static_rdzv_backend() + print("Test c10d rendezvous backend") + test_c10d_rdzv_backend() + print("Test fault tolerant") + test_fault_tolerant() + print("Test monitoring") + test_monitoring() + if is_bnb_available(): + print("Test problematic imports (bnb)") + test_problematic_imports() + if NUM_PROCESSES > 1: + PartialState().destroy_process_group() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f8f535d7b25a7bda527901787261591364545c09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from accelerate import PartialState +from accelerate.test_utils.testing import assert_exception +from accelerate.utils.dataclasses import DistributedType +from accelerate.utils.operations import ( + DistributedOperationException, + broadcast, + copy_tensor_to_devices, + gather, + gather_object, + pad_across_processes, + reduce, +) + + +def create_tensor(state): + return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device) + + +def test_gather(state): + tensor = create_tensor(state) + gathered_tensor = gather(tensor) + assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1)) + + +def test_gather_object(state): + # Gather objects in TorchXLA is not supported. + if state.distributed_type == DistributedType.XLA: + return + obj = [state.process_index] + gathered_obj = gather_object(obj) + assert len(gathered_obj) == state.num_processes, f"{gathered_obj}, {len(gathered_obj)} != {state.num_processes}" + assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}" + + +def test_gather_non_contigous(state): + # Skip this test because the 'is_contiguous' function of XLA tensor always returns True. + if state.distributed_type == DistributedType.XLA: + return + + # Create a non-contiguous tensor (enforce non-contiguity after device memory allocation) + tensor = torch.arange(12, device=state.device).view(4, 3).t() + assert not tensor.is_contiguous() + # Shouldn't error out + _ = gather(tensor) + + +def test_broadcast(state): + tensor = create_tensor(state) + broadcasted_tensor = broadcast(tensor) + assert broadcasted_tensor.shape == torch.Size([state.num_processes]) + assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1)) + + +def test_pad_across_processes(state): + # We need to pad the tensor with one more element if we are the main process + # to ensure that we can pad + if state.is_main_process: + tensor = torch.arange(state.num_processes + 1).to(state.device) + else: + tensor = torch.arange(state.num_processes).to(state.device) + padded_tensor = pad_across_processes(tensor) + assert padded_tensor.shape == torch.Size([state.num_processes + 1]) + if not state.is_main_process: + assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0] + + +def test_reduce_sum(state): + # For now runs on only two processes + if state.num_processes != 2: + return + tensor = create_tensor(state) + reduced_tensor = reduce(tensor, "sum") + truth_tensor = torch.tensor([4.0, 6]).to(state.device) + assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" + + +def test_reduce_mean(state): + # For now runs on only two processes + if state.num_processes != 2: + return + tensor = create_tensor(state) + reduced_tensor = reduce(tensor, "mean") + truth_tensor = torch.tensor([2.0, 3]).to(state.device) + assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" + + +def test_op_checker(state): + # Must be in a distributed state, and gathering is currently not supported in TorchXLA. + if state.distributed_type in [DistributedType.NO, DistributedType.XLA]: + return + state.debug = True + # `pad_across_processes` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4, 5]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + pad_across_processes(data, dim=0) + + # `reduce` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + reduce(data) + + # `broadcast` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + broadcast(data) + + state.debug = False + + +def test_copy_tensor_to_devices(state): + if state.distributed_type not in [DistributedType.MULTI_GPU, DistributedType.XLA]: + return + if state.is_main_process: + tensor = torch.tensor([1, 2, 3], dtype=torch.int).to(state.device) + else: + tensor = None + tensor = copy_tensor_to_devices(tensor) + assert torch.allclose(tensor, torch.tensor([1, 2, 3], dtype=torch.int, device=state.device)) + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +def main(): + state = PartialState() + state.print(f"State: {state}") + state.print("testing gather") + test_gather(state) + state.print("testing gather_object") + test_gather_object(state) + state.print("testing gather non-contigous") + test_gather_non_contigous(state) + state.print("testing broadcast") + test_broadcast(state) + state.print("testing pad_across_processes") + test_pad_across_processes(state) + state.print("testing reduce_sum") + test_reduce_sum(state) + state.print("testing reduce_mean") + test_reduce_mean(state) + state.print("testing op_checker") + test_op_checker(state) + state.print("testing sending tensors across devices") + test_copy_tensor_to_devices(state) + state.destroy_process_group() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py new file mode 100644 index 0000000000000000000000000000000000000000..fa0e3b601e38057e254067984042197098efdfd6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py @@ -0,0 +1,952 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import io +import math +import time +from copy import deepcopy +from pathlib import Path + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset + +from accelerate import Accelerator +from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader +from accelerate.state import AcceleratorState +from accelerate.test_utils import RegressionDataset, are_the_same_tensors +from accelerate.utils import ( + DataLoaderConfiguration, + DistributedType, + gather, + gather_object, + is_bf16_available, + is_datasets_available, + is_fp16_available, + is_hpu_available, + is_ipex_available, + is_pytest_available, + is_xpu_available, + set_seed, + synchronize_rng_states, +) + + +# TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting. +if is_xpu_available(): + from accelerate.test_utils import RegressionModel4XPU as RegressionModel +else: + from accelerate.test_utils import RegressionModel + +if is_hpu_available(): + ATOL = 1e-3 + RTOL = 1e-3 +else: + ATOL = 1e-6 + RTOL = 1e-6 + + +def generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler=False): + "Creates a dataloader that can also use the `SeedableRandomSampler`" + if use_seedable_sampler: + # The SeedableRandomSampler is needed during distributed setups + # for full reproducibility across processes with the `DataLoader` + sampler = SeedableRandomSampler( + generator=generator, + data_source=train_set, + num_samples=len(train_set), + ) + return DataLoader(train_set, batch_size=batch_size, sampler=sampler) + else: + return DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + + +def print_main(state): + print(f"Printing from the main process {state.process_index}") + + +def print_local_main(state): + print(f"Printing from the local main process {state.local_process_index}") + + +def print_last(state): + print(f"Printing from the last process {state.process_index}") + + +def print_on(state, process_idx): + print(f"Printing from process {process_idx}: {state.process_index}") + + +def process_execution_check(): + accelerator = Accelerator() + num_processes = accelerator.num_processes + # Test main_process_first context manager + path = Path("check_main_process_first.txt") + with accelerator.main_process_first(): + if accelerator.is_main_process: + time.sleep(0.1) # ensure main process takes longest + with open(path, "a+") as f: + f.write("Currently in the main process\n") + else: + with open(path, "a+") as f: + f.write("Now on another process\n") + accelerator.wait_for_everyone() + + if accelerator.is_main_process: + with open(path) as f: + text = "".join(f.readlines()) + try: + assert text.startswith("Currently in the main process\n"), "Main process was not first" + if num_processes > 1: + assert text.endswith("Now on another process\n"), "Main process was not first" + assert text.count("Now on another process\n") == accelerator.num_processes - 1, ( + f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}" + ) + except AssertionError: + path.unlink() + raise + + if accelerator.is_main_process and path.exists(): + path.unlink() + accelerator.wait_for_everyone() + # Test the decorators + f = io.StringIO() + with contextlib.redirect_stdout(f): + accelerator.on_main_process(print_main)(accelerator.state) + result = f.getvalue().rstrip() + if accelerator.is_main_process: + assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0" + else: + assert f.getvalue().rstrip() == "", f'{result} != ""' + f.truncate(0) + f.seek(0) + + with contextlib.redirect_stdout(f): + accelerator.on_local_main_process(print_local_main)(accelerator.state) + if accelerator.is_local_main_process: + assert f.getvalue().rstrip() == "Printing from the local main process 0" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + with contextlib.redirect_stdout(f): + accelerator.on_last_process(print_last)(accelerator.state) + if accelerator.is_last_process: + assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + for process_idx in range(num_processes): + with contextlib.redirect_stdout(f): + accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx) + if accelerator.process_index == process_idx: + assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + +def init_state_check(): + # Test we can instantiate this twice in a row. + state = AcceleratorState() + if state.local_process_index == 0: + print("Testing, testing. 1, 2, 3.") + print(state) + + +def rng_sync_check(): + state = AcceleratorState() + synchronize_rng_states(["torch"]) + assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU." + if state.distributed_type == DistributedType.MULTI_GPU: + synchronize_rng_states(["cuda"]) + assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU." + elif state.distributed_type == DistributedType.MULTI_XPU: + synchronize_rng_states(["xpu"]) + assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU." + generator = torch.Generator() + synchronize_rng_states(["generator"], generator=generator) + assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator." + + if state.local_process_index == 0: + print("All rng are properly synched.") + + +def dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled dataloader passing.") + + +def central_dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled central dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled central dataloader passing.") + + +def custom_sampler_check(): + state = AcceleratorState() + + class CustomDataset(Dataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __getitem__(self, index): + return self.data[index] + + class CustomBatchSampler: + def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True): + self.batch_size = batch_size + self.data_index = np.arange(dataset_length) + self.shuffle = shuffle + + def __iter__(self): + num_batches = len(self) + if self.shuffle: + index = np.random.permutation(self.data_index) + else: + index = self.data_index + output = np.array_split(index, num_batches) + yield from output + + def __len__(self): + return math.ceil(len(self.data_index) / self.batch_size) + + dataset = CustomDataset(range(32 * state.num_processes)) + sampler = CustomBatchSampler(len(dataset), batch_size=8) + dl = DataLoader(dataset, batch_sampler=sampler) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index) + # We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler + if hasattr(dl.batch_sampler, "batch_sampler"): + assert isinstance(dl.batch_sampler.batch_sampler, CustomBatchSampler), ( + "Custom sampler was changed after calling `prepare_data_loader`" + ) + else: + assert isinstance(dl.batch_sampler, CustomBatchSampler), ( + "Custom sampler was changed after calling `prepare_data_loader`" + ) + + +def check_seedable_sampler(): + # Set seed + set_seed(42) + train_set = RegressionDataset(length=10, seed=42) + train_dl = DataLoader(train_set, batch_size=2, shuffle=True) + + config = DataLoaderConfiguration(use_seedable_sampler=True) + accelerator = Accelerator(dataloader_config=config) + train_dl = accelerator.prepare(train_dl) + original_items = [] + for _ in range(3): + for batch in train_dl: + original_items.append(batch["x"]) + original_items = torch.cat(original_items) + + # Set seed again and the epoch + set_seed(42) + train_dl.set_epoch(0) + new_items = [] + for _ in range(3): + for batch in train_dl: + new_items.append(batch["x"]) + new_items = torch.cat(new_items) + assert torch.allclose(original_items, new_items), "Did not obtain the same items with the same seed and epoch." + + +def check_seedable_sampler_in_batch_sampler_shard(): + set_seed(42) + + config = DataLoaderConfiguration(use_seedable_sampler=True) + accelerator = Accelerator(dataloader_config=config) + assert accelerator.num_processes > 1, "This test requires more than one process." + + dataloader = DataLoader(list(range(10)), batch_size=1, shuffle=True) + prepared_data_loader = prepare_data_loader( + dataloader=dataloader, + use_seedable_sampler=True, + ) + + target_sampler = prepared_data_loader.batch_sampler.batch_sampler.sampler + assert isinstance(target_sampler, SeedableRandomSampler), ( + "Sampler in BatchSamplerShard is not SeedableRandomSampler." + ) + + +def check_seedable_sampler_with_data_seed(): + # Set seed + set_seed(42) + data_seed = 42 + train_set = RegressionDataset(length=10, seed=42) + train_dl = DataLoader(train_set, batch_size=2, shuffle=True) + + config = DataLoaderConfiguration(use_seedable_sampler=True, data_seed=data_seed) + accelerator = Accelerator(dataloader_config=config) + prepared_dl = accelerator.prepare(train_dl) + original_items = [] + for _ in range(3): + for batch in prepared_dl: + original_items.append(batch["x"]) + original_items = torch.cat(original_items) + + # Set new data seed + config.data_seed = 43 + accelerator = Accelerator(dataloader_config=config) + prepared_dl = accelerator.prepare(train_dl) + new_items = [] + for _ in range(3): + for batch in prepared_dl: + new_items.append(batch["x"]) + new_items = torch.cat(new_items) + assert not torch.allclose(original_items, new_items), "Obtained the same items with different data seed." + + +def mock_training(length, batch_size, generator, use_seedable_sampler=False): + set_seed(42) + generator.manual_seed(42) + train_set = RegressionDataset(length=length, seed=42) + + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + for epoch in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + loss.backward() + optimizer.step() + return train_set, model + + +def training_check(use_seedable_sampler=False): + state = AcceleratorState() + generator = torch.Generator() + batch_size = 8 + length = batch_size * 4 * state.num_processes + + train_set, old_model = mock_training(length, batch_size * state.num_processes, generator, use_seedable_sampler) + assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes." + assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes." + + accelerator = Accelerator() + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + torch.testing.assert_close( + old_model.a, + model.a, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + torch.testing.assert_close( + old_model.b, + model.b, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + + accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.") + + dataloader_config = DataLoaderConfiguration(split_batches=True, use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader( + train_set, generator, batch_size * state.num_processes, use_seedable_sampler + ) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + torch.testing.assert_close( + old_model.a, + model.a, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + torch.testing.assert_close( + old_model.b, + model.b, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + + accelerator.print("Training yielded the same results on one CPU or distributed setup with batch split.") + + # FP32 wrapper check + if torch.cuda.is_available(): + # Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True) + print("Keep fp32 wrapper check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="fp16") + + model = torch.nn.Linear(2, 4) + model = accelerator.prepare(model) + model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True) + + # Run forward with fp16 as input. + # When the model is with mixed precision wrapper, no error will be raised. + input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device) + output = model_with_fp32_wrapper(input_tensor) + + # BF16 support + if is_bf16_available(): + # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16 + print("BF16 training check.") + AcceleratorState._reset_state() + dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(mixed_precision="bf16", dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + torch.testing.assert_close( + old_model.a, + model.a, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + torch.testing.assert_close( + old_model.b, + model.b, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + + # FP16 support (HPU fp16 model seems to be off by 10% from the CPU, which is a lot of numerical error) + if is_fp16_available() and not is_hpu_available(): + # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16 + print("FP16 training check.") + AcceleratorState._reset_state() + dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(mixed_precision="fp16", dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + torch.testing.assert_close( + old_model.a, + model.a, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + torch.testing.assert_close( + old_model.b, + model.b, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + + # IPEX CPU tests + if is_ipex_available(): + print("ipex BF16 training check.") + AcceleratorState._reset_state() + dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(mixed_precision="bf16", cpu=True, dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + torch.testing.assert_close( + old_model.a, + model.a, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + torch.testing.assert_close( + old_model.b, + model.b, + atol=ATOL, + rtol=RTOL, + msg=lambda msg: f"Did not obtain the same model on CPU or distributed training.\n{msg}", + ) + + +def test_split_between_processes_dataset(datasets_Dataset): + state = AcceleratorState() + data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes)]) + with state.split_between_processes(data, apply_padding=False) as results: + assert len(results) == 2, ( + f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" + ) + + data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) + with state.split_between_processes(data, apply_padding=False) as results: + if state.is_last_process: + assert len(results) == 1, ( + f"Last process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}" + ) + else: + assert len(results) == 2, ( + f"One of the intermediate processes did not receive two items. Process index: {state.process_index}; Length: {len(results)}" + ) + state.wait_for_everyone() + + odd_data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) + even_data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes)]) + + for data in [odd_data, even_data]: + expected_output = data["k"] + + with state.split_between_processes(data, apply_padding=True) as results: + if state.num_processes == 1: + assert len(results) == len(data), ( + f"Single process did not receive all items. Process index: {state.process_index}; Length: {len(results)}" + ) + else: + assert len(results) == 2, ( + f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" + ) + + results_per_process = [] + for result in results: + results_per_process.append(result) + + state.wait_for_everyone() + + gathered_results = gather_object(results_per_process) + output = [r["k"] for r in gathered_results[: len(data)]] + + assert expected_output == output, f"Gathered results is incorrect. Expected: {expected_output}; Got: {output}" + + +def test_split_between_processes_list(): + state = AcceleratorState() + data = list(range(0, 2 * state.num_processes)) + with state.split_between_processes(data) as results: + assert len(results) == 2, ( + f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" + ) + state.wait_for_everyone() + + even_data = list(range(0, (2 * state.num_processes))) + odd_data = list(range(0, (2 * state.num_processes) - 1)) + for data in [odd_data, even_data]: + expected_output = data + + with state.split_between_processes(data, apply_padding=True) as results: + num_samples_per_device = math.ceil(len(data) / state.num_processes) + # Test all processes gets the correct number of item(s) + assert len(results) == num_samples_per_device, ( + f"Process {state.device} did not get the correct number of item(s). Process index: {state.process_index}; Length: {len(results)}" + ) + + results_per_process = [] + for result in results: + results_per_process.append(result) + + state.wait_for_everyone() + + gathered_results = gather_object(results_per_process) + output = gathered_results[: len(data)] + + assert expected_output == output, f"Gathered results is incorrect. Expected: {expected_output}; Got: {output}" + + +def test_split_between_processes_nested_dict(): + state = AcceleratorState() + a = [1, 2, 3, 4, 5, 6, 7, 8] + b = ["a", "b", "c", "d", "e", "f", "g", "h"] + c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]) + if state.num_processes in (1, 2, 4): + data = {"a": a, "b": b, "c": c} + data_copy = deepcopy(data) + with state.split_between_processes(data) as results: + if state.process_index == 0: + assert results["a"] == data_copy["a"][: 8 // state.num_processes] + elif state.num_processes == 2: + assert results["a"] == data_copy["a"][4:] + elif state.process_index == 3: + # We return a list each time + assert results["a"] == data_copy["a"][-2:], f"Expected: {data_copy['a'][-2]}, Actual: {results['a']}" + if state.process_index == 0: + assert results["b"] == data_copy["b"][: 8 // state.num_processes] + elif state.num_processes == 2: + assert results["b"] == data_copy["b"][4:] + elif state.process_index == 3: + assert results["b"] == data_copy["b"][-2:] + if state.process_index == 0: + assert torch.allclose(results["c"], data_copy["c"][: 8 // state.num_processes]), ( + f"Did not obtain expected values on process 0, expected `{data['c'][: 8 // state.num_processes]}`, received: {results['c']}" + ) + elif state.num_processes == 2: + assert torch.allclose(results["c"], data_copy["c"][4:]), ( + f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}" + ) + elif state.process_index == 3: + assert torch.allclose(results["c"], data_copy["c"][-2:]), ( + f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}" + ) + + state.wait_for_everyone() + + +def test_split_between_processes_tensor(): + state = AcceleratorState() + if state.num_processes > 1: + data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device) + with state.split_between_processes(data) as results: + if state.process_index == 0: + expected = torch.tensor([[0, 1, 2, 3]]).to(state.device) + else: + expected = torch.tensor([[4, 5, 6, 7]]).to(state.device) + torch.testing.assert_close(results, expected) + state.wait_for_everyone() + + even_data = torch.tensor([[i] for i in range(2 * state.num_processes)]).to(state.device) + odd_data = torch.tensor([[i] for i in range(2 * state.num_processes - 1)]).to(state.device) + for data in [even_data, odd_data]: + expected_output = [torch.tensor(i) for i in data.tolist()] + + with state.split_between_processes(data, apply_padding=True) as results: + num_samples_per_device = math.ceil(len(data) / state.num_processes) + assert len(results) == num_samples_per_device, ( + f"Process {state.device} did not get the correct number of item(s). Process index: {state.process_index}; Length: {len(results)}" + ) + results_per_process = [] + for result in results: + results_per_process.append(result.to("cpu")) + + state.wait_for_everyone() + + gathered_results = gather_object(results_per_process) + output = gathered_results[: len(data)] + + assert expected_output == output, f"Gathered results is incorrect. Expected: {expected_output}; Got: {output}" + + +def test_split_between_processes_evenly(): + state = AcceleratorState() + if state.num_processes in (1, 2, 4, 8): + data = list(range(17)) + num_samples_per_process = len(data) // state.num_processes + num_extras = len(data) % state.num_processes + with state.split_between_processes(data) as results: + if state.process_index < num_extras: + assert len(results) == num_samples_per_process + 1, ( + f"Each Process should have even elements. Expected: {num_samples_per_process + 1}, Actual: {len(results)}" + ) + else: + assert len(results) == num_samples_per_process, ( + f"Each Process should have even elements. Expected: {num_samples_per_process}, Actual: {len(results)}" + ) + state.wait_for_everyone() + + +def test_trigger(): + accelerator = Accelerator() + # should start with being false + assert accelerator.check_trigger() is False + + # set a breakpoint on the main process + if accelerator.is_main_process: + accelerator.set_trigger() + + # check it's been activated across all processes + # calls `all_reduce` and triggers a sync + assert accelerator.check_trigger() is True + + # check it's been reset after the sync + assert accelerator.check_trigger() is False + + +def test_reinstantiated_state(): + import pytest + + AcceleratorState._reset_state() + simple_model = torch.nn.Linear(1, 1) + # First define an accelerator + accelerator = Accelerator() + # Then call `reset_state`, breaking the state existing in the accelerator + AcceleratorState._reset_state() + # Now try and prepare a simple model, should raise the custom error early + with pytest.raises(AttributeError) as cm: + accelerator.prepare(simple_model) + assert "`AcceleratorState` object has no attribute" in str(cm.value.args[0]) + assert "This happens if `AcceleratorState._reset_state()`" in str(cm.value.args[0]) + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Initialization**") + init_state_check() + state.wait_for_everyone() + + if state.distributed_type == DistributedType.MULTI_GPU: + num_processes_per_node = torch.cuda.device_count() + else: + num_processes_per_node = state.num_processes + + # We only run this test on non-multinode + if num_processes_per_node == state.num_processes: + if state.process_index == 0: + print("\n**Test process execution**") + process_execution_check() + + if state.process_index == 0: + print("\n**Test split between processes as a list**") + test_split_between_processes_list() + + if state.process_index == 0: + print("\n**Test split between processes as a dict**") + test_split_between_processes_nested_dict() + + if state.process_index == 0: + print("\n**Test split between processes as a tensor**") + test_split_between_processes_tensor() + + if state.process_index == 0: + print("\n**Test split between processes evenly**") + test_split_between_processes_evenly() + + if state.process_index == 0: + print("\n**Test split between processes as a datasets.Dataset**") + if is_datasets_available(): + from datasets import Dataset as datasets_Dataset + + test_split_between_processes_dataset(datasets_Dataset) + else: + print("Skipped because Hugging Face datasets is not available") + + if state.local_process_index == 0: + print("\n**Test random number generator synchronization**") + rng_sync_check() + + if state.local_process_index == 0: + print("\n**DataLoader integration test**") + dl_preparation_check() + if state.distributed_type != DistributedType.XLA: + central_dl_preparation_check() + custom_sampler_check() + check_seedable_sampler() + check_seedable_sampler_with_data_seed() + + if state.num_processes > 1: + check_seedable_sampler_in_batch_sampler_shard() + + # Trainings are not exactly the same in DeepSpeed and CPU mode + if state.distributed_type == DistributedType.DEEPSPEED: + return + + if state.local_process_index == 0: + print("\n**Training integration test**") + training_check(use_seedable_sampler=False) + training_check(use_seedable_sampler=True) + + if state.local_process_index == 0: + print("\n**Breakpoint trigger test**") + test_trigger() + + if is_pytest_available(): + if state.local_process_index == 0: + print("\n**Test reinstantiated state**") + test_reinstantiated_state() + + state.destroy_process_group() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..44e1ecc1d59c5691f284282fb8cd2259c8a70658 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py @@ -0,0 +1,410 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy + +import torch +import torch.nn.functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from torch.utils.data import DataLoader + +from accelerate.accelerator import Accelerator, DataLoaderConfiguration, GradientAccumulationPlugin +from accelerate.state import GradientState +from accelerate.test_utils import RegressionDataset, RegressionModel +from accelerate.utils import DistributedType, set_seed + + +def check_model_parameters(model_a, model_b, did_step, iteration, **kwargs): + for param, grad_param in zip(model_a.parameters(), model_b.parameters()): + if not param.requires_grad: + continue + if not did_step: + # Grads should not be in sync + assert torch.allclose(param.grad, grad_param.grad, **kwargs) is False, ( + f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" + ) + else: + # Grads should be in sync + assert torch.allclose(param.grad, grad_param.grad, **kwargs) is True, ( + f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" + ) + + +def step_model(model, input, target, accelerator, do_backward=True): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + if not do_backward: + loss /= accelerator.gradient_accumulation_steps + loss.backward() + else: + accelerator.backward(loss) + + +def get_training_setup(accelerator, sched=False): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=80) + dataloader = DataLoader(dset, batch_size=16) + model.to(accelerator.device) + if sched: + opt = AdamW(params=model.parameters(), lr=1e-3) + ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3) + sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65) + ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65) + # Make a copy of `model` + if sched: + ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader) + else: + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + if sched: + return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) + return model, ddp_model, dataloader + + +def test_noop_sync(accelerator): + # Test when on a single CPU or GPU that the context manager does nothing + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync + check_model_parameters(model, ddp_model, True, iteration) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + assert torch.allclose(param.grad, ddp_param.grad), ( + f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + ) + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync(accelerator): + # Test on distributed setup that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if iteration % 2 == 0: + # Grads should not be in sync + assert torch.allclose(param.grad, ddp_param.grad) is False, ( + f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + ) + else: + # Grads should be in sync + assert torch.allclose(param.grad, ddp_param.grad) is True, ( + f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + ) + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync_multiple_fwd(accelerator): + # Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards + model, ddp_model, dataloader = get_training_setup(accelerator) + # Do multiple forwards + losses = [] + num_iterations = 3 + for iteration in range(num_iterations): + ddp_input, ddp_target = next(iter(dataloader)).values() + + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + ddp_output = ddp_model(ddp_input) + loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device)) + losses.append(loss) + + # Do multiple backwards and sync only at the last backward + for iteration in range(num_iterations): + loss = losses[iteration] + + if iteration < num_iterations - 1: + # Accumulate grads locally + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should not be in sync + assert torch.allclose(param.grad, ddp_param.grad) is False, ( + f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + ) + + else: + # Sync grads if last backward + with accelerator.trigger_sync_in_backward(ddp_model): + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should be in sync + assert torch.allclose(param.grad, ddp_param.grad) is True, ( + f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + ) + + +def test_gradient_accumulation(split_batches=False, dispatch_batches=False, sync_each_batch=False): + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) + dataloader_config = DataLoaderConfiguration(split_batches=split_batches, dispatch_batches=dispatch_batches) + accelerator = Accelerator( + dataloader_config=dataloader_config, + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + # Test that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator, False) + # Do "gradient accumulation" (noop) + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1) or sync_each_batch: + # Grads should be in sync + assert torch.allclose(param.grad, ddp_param.grad) is True, ( + f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + ) + else: + # Grads should not be in sync + assert torch.allclose(param.grad, ddp_param.grad) is False, ( + f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + ) + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + GradientState._reset_state() + + +def test_gradient_accumulation_with_opt_and_scheduler( + split_batches=False, dispatch_batches=False, sync_each_batch=False +): + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) + dataloader_config = DataLoaderConfiguration(split_batches=split_batches, dispatch_batches=dispatch_batches) + accelerator = Accelerator( + dataloader_config=dataloader_config, + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + # Test that context manager behaves properly + model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + model.train() + ddp_model.train() + step_model(model, input, target, accelerator, False) + opt.step() + + if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)): + if split_batches: + sched.step() + else: + for _ in range(accelerator.num_processes): + sched.step() + + # Perform gradient accumulation under wrapper + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + ddp_opt.step() + ddp_sched.step() + + # Learning rates should be the same + assert opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"], ( + f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n" + ) + did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader)) + if accelerator.num_processes > 1: + check_model_parameters( + model, + ddp_model, + did_step or sync_each_batch, # syncs at each grad_accum interval of if sync_each_batch==True + iteration, + rtol=1e-3, # needs a relative tolerance due to roundoff errors + ) + + if did_step: + opt.zero_grad() # flush gradients every accum step + ddp_opt.zero_grad() + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + GradientState._reset_state() + + +def test_dataloader_break(): + accelerator = Accelerator() + first_dset = RegressionDataset(length=80) + first_dataloader = DataLoader(first_dset, batch_size=16) + second_dset = RegressionDataset(length=96) + second_dataloader = DataLoader(second_dset, batch_size=16) + first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader) + + assert accelerator.gradient_state.active_dataloader is None + for iteration, _ in enumerate(first_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader) + if iteration < len(first_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + if iteration == 1: + for batch_num, _ in enumerate(second_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader) + if batch_num < len(second_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + assert accelerator.gradient_state.active_dataloader is None + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Test `accumulate` gradient accumulation with dataloader break**") + if state.distributed_type != DistributedType.XLA: + test_dataloader_break() + if state.distributed_type == DistributedType.NO: + if state.local_process_index == 0: + print("**Test NOOP `no_sync` context manager**") + test_noop_sync(accelerator) + if state.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_CPU, + DistributedType.MULTI_HPU, + ): + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager**") + test_distributed_sync(accelerator) + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager with multiple forwards**") + test_distributed_sync_multiple_fwd(accelerator) + if state.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_HPU, + ): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + for sync_each_batch in [True, False]: + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", + ) + test_gradient_accumulation(split_batch, dispatch_batches, sync_each_batch) + + # Currently will break on torch 2.0 +, need to investigate why + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + "`split_batches=False`, `dispatch_batches=False`, `sync_each_batch=False`**", + ) + test_gradient_accumulation_with_opt_and_scheduler() + if state.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_HPU, + ): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + for sync_each_batch in [True, False]: + if not split_batch and not dispatch_batches and not sync_each_batch: + continue + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", + ) + test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches, sync_each_batch) + state.destroy_process_group() + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/testing.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..e964399784779fb9bf3d34269d08fd76d4e96319 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/testing.py @@ -0,0 +1,870 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import inspect +import io +import os +import re +import shutil +import subprocess +import sys +import tempfile +import unittest +from contextlib import contextmanager +from functools import partial +from pathlib import Path +from typing import Union +from unittest import mock + +import torch + +import accelerate + +from ..state import AcceleratorState +from ..utils import ( + check_cuda_fp8_capability, + compare_versions, + gather, + is_aim_available, + is_bnb_available, + is_clearml_available, + is_comet_ml_available, + is_cuda_available, + is_datasets_available, + is_deepspeed_available, + is_dvclive_available, + is_fp8_available, + is_fp16_available, + is_habana_gaudi1, + is_hpu_available, + is_import_timer_available, + is_matplotlib_available, + is_mlflow_available, + is_mlu_available, + is_mps_available, + is_musa_available, + is_npu_available, + is_pandas_available, + is_pippy_available, + is_pytest_available, + is_schedulefree_available, + is_sdaa_available, + is_swanlab_available, + is_tensorboard_available, + is_timm_available, + is_torch_version, + is_torch_xla_available, + is_torchao_available, + is_torchdata_stateful_dataloader_available, + is_torchvision_available, + is_trackio_available, + is_transformer_engine_available, + is_transformers_available, + is_triton_available, + is_wandb_available, + is_xpu_available, + str_to_bool, +) + + +def get_backend(): + if is_torch_xla_available(): + return "xla", torch.cuda.device_count(), torch.cuda.memory_allocated + elif is_cuda_available(): + return "cuda", torch.cuda.device_count(), torch.cuda.memory_allocated + elif is_mps_available(min_version="2.0"): + return "mps", 1, torch.mps.current_allocated_memory + elif is_mps_available(): + return "mps", 1, lambda: 0 + elif is_mlu_available(): + return "mlu", torch.mlu.device_count(), torch.mlu.memory_allocated + elif is_sdaa_available(): + return "sdaa", torch.sdaa.device_count(), torch.sdaa.memory_allocated + elif is_musa_available(): + return "musa", torch.musa.device_count(), torch.musa.memory_allocated + elif is_npu_available(): + return "npu", torch.npu.device_count(), torch.npu.memory_allocated + elif is_xpu_available(): + return "xpu", torch.xpu.device_count(), torch.xpu.memory_allocated + elif is_hpu_available(): + return "hpu", torch.hpu.device_count(), torch.hpu.memory_allocated + else: + return "cpu", 1, lambda: 0 + + +torch_device, device_count, memory_allocated_func = get_backend() + + +def get_launch_command(**kwargs) -> list: + """ + Wraps around `kwargs` to help simplify launching from `subprocess`. + + Example: + ```python + # returns ['accelerate', 'launch', '--num_processes=2', '--device_count=2'] + get_launch_command(num_processes=2, device_count=2) + ``` + """ + command = ["accelerate", "launch"] + for k, v in kwargs.items(): + if isinstance(v, bool) and v: + command.append(f"--{k}") + elif v is not None: + command.append(f"--{k}={v}") + return command + + +DEFAULT_LAUNCH_COMMAND = get_launch_command(num_processes=device_count, monitor_interval=0.1) + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) + + +def skip(test_case): + "Decorator that skips a test unconditionally" + return unittest.skip("Test was skipped")(test_case) + + +def slow(test_case): + """ + Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a + truthy value to run them. + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def require_cpu(test_case): + """ + Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available. + """ + return unittest.skipUnless(torch_device == "cpu", "test requires only a CPU")(test_case) + + +def require_non_cpu(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case) + + +def require_cuda(test_case): + """ + Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available or when + TorchXLA is available. + """ + return unittest.skipUnless(is_cuda_available() and not is_torch_xla_available(), "test requires a GPU")(test_case) + + +def require_cuda_or_hpu(test_case): + """ + Decorator marking a test that requires CUDA or HPU. These tests are skipped when there are no GPU available or when + TorchXLA is available. + """ + return unittest.skipUnless( + (is_cuda_available() and not is_torch_xla_available()) or is_hpu_available(), "test requires a GPU or HPU" + )(test_case) + + +def require_xpu(test_case): + """ + Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available. + """ + return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case) + + +def require_cuda_or_xpu(test_case): + """ + Decorator marking a test that requires CUDA or XPU. These tests are skipped when there are no GPU available or when + TorchXLA is available. + """ + cuda_condition = is_cuda_available() and not is_torch_xla_available() + xpu_condition = is_xpu_available() + return unittest.skipUnless(cuda_condition or xpu_condition, "test requires a CUDA GPU or XPU")(test_case) + + +def require_non_xpu(test_case): + """ + Decorator marking a test that should be skipped for XPU. + """ + return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case) + + +def require_non_hpu(test_case): + """ + Decorator marking a test that should be skipped for HPU. + """ + return unittest.skipUnless(torch_device != "hpu", "test requires a non-HPU")(test_case) + + +def require_fp16(test_case): + """ + Decorator marking a test that requires FP16. These tests are skipped when FP16 is not supported. + """ + + return unittest.skipUnless(is_fp16_available(), "test requires FP16 support")(test_case) + + +def require_fp8(test_case): + """ + Decorator marking a test that requires FP8. These tests are skipped when FP8 is not supported. + """ + + # is_fp8_available only checks for libraries + # ideally it should check for device capability as well + fp8_is_available = is_fp8_available() + + if torch.cuda.is_available() and not check_cuda_fp8_capability(): + fp8_is_available = False + + if is_hpu_available() and is_habana_gaudi1(): + fp8_is_available = False + + return unittest.skipUnless(fp8_is_available, "test requires FP8 support")(test_case) + + +def require_fsdp2(test_case): + return unittest.skipUnless(is_torch_version(">=", "2.5.0"), "test requires FSDP2 (torch >= 2.5.0)")(test_case) + + +def require_mlu(test_case): + """ + Decorator marking a test that requires MLU. These tests are skipped when there are no MLU available. + """ + return unittest.skipUnless(is_mlu_available(), "test require a MLU")(test_case) + + +def require_sdaa(test_case): + """ + Decorator marking a test that requires SDAA. These tests are skipped when there are no SDAA available. + """ + return unittest.skipUnless(is_sdaa_available(), "test require a SDAA")(test_case) + + +def require_musa(test_case): + """ + Decorator marking a test that requires MUSA. These tests are skipped when there are no MUSA available. + """ + return unittest.skipUnless(is_musa_available(), "test require a MUSA")(test_case) + + +def require_npu(test_case): + """ + Decorator marking a test that requires NPU. These tests are skipped when there are no NPU available. + """ + return unittest.skipUnless(is_npu_available(), "test require a NPU")(test_case) + + +def require_mps(test_case): + """ + Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps` + backend. + """ + return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case) + + +def require_huggingface_suite(test_case): + """ + Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not. + """ + return unittest.skipUnless( + is_transformers_available() and is_datasets_available(), + "test requires the Hugging Face suite", + )(test_case) + + +def require_transformers(test_case): + """ + Decorator marking a test that requires transformers. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires timm. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case) + + +def require_torchvision(test_case): + """ + Decorator marking a test that requires torchvision. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_torchvision_available(), "test requires the torchvision library")(test_case) + + +def require_triton(test_case): + """ + Decorator marking a test that requires triton. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_triton_available(), "test requires the triton library")(test_case) + + +def require_schedulefree(test_case): + """ + Decorator marking a test that requires schedulefree. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_schedulefree_available(), "test requires the schedulefree library")(test_case) + + +def require_bnb(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case) + + +def require_tpu(test_case): + """ + Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available. + """ + return unittest.skipUnless(is_torch_xla_available(check_is_tpu=True), "test requires TPU")(test_case) + + +def require_non_torch_xla(test_case): + """ + Decorator marking a test as requiring an environment without TorchXLA. These tests are skipped when TorchXLA is + available. + """ + return unittest.skipUnless(not is_torch_xla_available(), "test requires an env without TorchXLA")(test_case) + + +def require_single_device(test_case): + """ + Decorator marking a test that requires a single device. These tests are skipped when there is no hardware + accelerator available or number of devices is more than one. + """ + return unittest.skipUnless( + torch_device != "cpu" and device_count == 1, "test requires a single device accelerator" + )(test_case) + + +def require_single_gpu(test_case): + """ + Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU + available or number of GPUs is more than one. + """ + return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case) + + +def require_single_xpu(test_case): + """ + Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU + available or number of xPUs is more than one. + """ + return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case) + + +def require_multi_device(test_case): + """ + Decorator marking a test that requires a multi-device setup. These tests are skipped on a machine without multiple + devices. + """ + return unittest.skipUnless(device_count > 1, "test requires multiple hardware accelerators")(test_case) + + +def require_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple + GPUs. + """ + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_multi_xpu(test_case): + """ + Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple + XPUs. + """ + return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case) + + +def require_multi_gpu_or_xpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple + GPUs or XPUs. + """ + return unittest.skipUnless( + (is_cuda_available() or is_xpu_available()) and device_count > 1, "test requires multiple GPUs or XPUs" + )(test_case) + + +def require_deepspeed(test_case): + """ + Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed + """ + return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case) + + +def require_tp(test_case): + """ + Decorator marking a test that requires TP installed. These tests are skipped when TP isn't installed + """ + return unittest.skipUnless( + is_torch_version(">=", "2.3.0") and compare_versions("transformers", ">=", "4.52.0"), + "test requires torch version >= 2.3.0 and transformers version >= 4.52.0", + )(test_case) + + +def require_torch_min_version(test_case=None, version=None): + """ + Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an + installed torch version is less than the required one. + """ + if test_case is None: + return partial(require_torch_min_version, version=version) + return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case) + + +def require_tensorboard(test_case): + """ + Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't + installed + """ + return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case) + + +def require_wandb(test_case): + """ + Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed + """ + return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case) + + +def require_trackio(test_case): + """ + Decorator marking a test that requires trackio installed. These tests are skipped when trackio isn't installed + """ + return unittest.skipUnless(is_trackio_available(), "test requires trackio")(test_case) + + +def require_comet_ml(test_case): + """ + Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed + """ + return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case) + + +def require_aim(test_case): + """ + Decorator marking a test that requires aim installed. These tests are skipped when aim isn't installed + """ + return unittest.skipUnless(is_aim_available(), "test requires aim")(test_case) + + +def require_clearml(test_case): + """ + Decorator marking a test that requires clearml installed. These tests are skipped when clearml isn't installed + """ + return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case) + + +def require_dvclive(test_case): + """ + Decorator marking a test that requires dvclive installed. These tests are skipped when dvclive isn't installed + """ + return unittest.skipUnless(is_dvclive_available(), "test requires dvclive")(test_case) + + +def require_swanlab(test_case): + """ + Decorator marking a test that requires swanlab installed. These tests are skipped when swanlab isn't installed + """ + return unittest.skipUnless(is_swanlab_available(), "test requires swanlab")(test_case) + + +def require_pandas(test_case): + """ + Decorator marking a test that requires pandas installed. These tests are skipped when pandas isn't installed + """ + return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case) + + +def require_mlflow(test_case): + """ + Decorator marking a test that requires mlflow installed. These tests are skipped when mlflow isn't installed + """ + return unittest.skipUnless(is_mlflow_available(), "test requires mlflow")(test_case) + + +def require_pippy(test_case): + """ + Decorator marking a test that requires pippy installed. These tests are skipped when pippy isn't installed It is + also checked if the test is running on a Gaudi1 device which doesn't support pippy. + """ + return unittest.skipUnless(is_pippy_available() and not is_habana_gaudi1(), "test requires pippy")(test_case) + + +def require_import_timer(test_case): + """ + Decorator marking a test that requires tuna interpreter installed. These tests are skipped when tuna isn't + installed + """ + return unittest.skipUnless(is_import_timer_available(), "test requires tuna interpreter")(test_case) + + +def require_transformer_engine(test_case): + """ + Decorator marking a test that requires transformers engine installed. These tests are skipped when transformers + engine isn't installed + """ + return unittest.skipUnless(is_transformer_engine_available(), "test requires transformers engine")(test_case) + + +def require_torchao(test_case): + """ + Decorator marking a test that requires torchao installed. These tests are skipped when torchao isn't installed + """ + return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case) + + +def require_matplotlib(test_case): + """ + Decorator marking a test that requires matplotlib installed. These tests are skipped when matplotlib isn't + installed + """ + return unittest.skipUnless(is_matplotlib_available(), "test requires matplotlib")(test_case) + + +_atleast_one_tracker_available = ( + any([is_wandb_available(), is_tensorboard_available(), is_trackio_available(), is_swanlab_available()]) + and not is_comet_ml_available() +) + + +def require_trackers(test_case): + """ + Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none + are installed + """ + return unittest.skipUnless( + _atleast_one_tracker_available, + "test requires at least one tracker to be available and for `comet_ml` to not be installed", + )(test_case) + + +def require_torchdata_stateful_dataloader(test_case): + """ + Decorator marking a test that requires torchdata.stateful_dataloader. + + These tests are skipped when torchdata with stateful_dataloader module isn't installed. + + """ + return unittest.skipUnless( + is_torchdata_stateful_dataloader_available(), "test requires torchdata.stateful_dataloader" + )(test_case) + + +def run_first(test_case): + """ + Decorator marking a test with order(1). When pytest-order plugin is installed, tests marked with this decorator are + garanteed to run first. + + This is especially useful in some test settings like on a Gaudi instance where a Gaudi device can only be used by a + single process at a time. So we make sure all tests that run in a subprocess are launched first, to avoid device + allocation conflicts. + + If pytest is not installed, test will be returned as is. + """ + + if is_pytest_available(): + import pytest + + return pytest.mark.order(1)(test_case) + return test_case + + +class TempDirTestCase(unittest.TestCase): + """ + A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its + data at the start of a test, and then destroyes it at the end of the TestCase. + + Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases + + The temporary directory location will be stored in `self.tmpdir` + """ + + clear_on_setup = True + + @classmethod + def setUpClass(cls): + "Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`" + cls.tmpdir = Path(tempfile.mkdtemp()) + + @classmethod + def tearDownClass(cls): + "Remove `cls.tmpdir` after test suite has finished" + if os.path.exists(cls.tmpdir): + shutil.rmtree(cls.tmpdir) + + def setUp(self): + "Destroy all contents in `self.tmpdir`, but not `self.tmpdir`" + if self.clear_on_setup: + for path in self.tmpdir.glob("**/*"): + if path.is_file(): + path.unlink() + elif path.is_dir(): + shutil.rmtree(path) + + +class AccelerateTestCase(unittest.TestCase): + """ + A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes + the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between + tests. + """ + + def tearDown(self): + super().tearDown() + # Reset the state of the AcceleratorState singleton. + AcceleratorState._reset_state(True) + + +class MockingTestCase(unittest.TestCase): + """ + A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the + behavior of a class-wide mock when defining one normally will not do. + + Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as + setting an environment variable with that information. + + The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to + `super().setUp()` such as: + ```python + def setUp(self): + super().setUp() + mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"}) + self.add_mocks(mocks) + ``` + """ + + def add_mocks(self, mocks: Union[mock.Mock, list[mock.Mock]]): + """ + Add custom mocks for tests that should be repeated on each test. Should be called during + `MockingTestCase.setUp`, after `super().setUp()`. + + Args: + mocks (`mock.Mock` or list of `mock.Mock`): + Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run + """ + self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks] + for m in self.mocks: + m.start() + self.addCleanup(m.stop) + + +def are_the_same_tensors(tensor): + state = AcceleratorState() + tensor = tensor[None].clone().to(state.device) + tensors = gather(tensor).cpu() + tensor = tensor[0].cpu() + for i in range(tensors.shape[0]): + if not torch.equal(tensors[i], tensor): + return False + return True + + +class _RunOutput: + def __init__(self, returncode, stdout, stderr): + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +async def _read_stream(stream, callback): + while True: + line = await stream.readline() + if line: + callback(line) + else: + break + + +async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: + if echo: + print("\nRunning: ", " ".join(cmd)) + + p = await asyncio.create_subprocess_exec( + cmd[0], + *cmd[1:], + stdin=stdin, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe + # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait + # + # If it starts hanging, will need to switch to the following code. The problem is that no data + # will be seen until it's done and if it hangs for example there will be no debug info. + # out, err = await p.communicate() + # return _RunOutput(p.returncode, out, err) + + out = [] + err = [] + + def tee(line, sink, pipe, label=""): + line = line.decode("utf-8").rstrip() + sink.append(line) + if not quiet: + print(label, line, file=pipe) + + # XXX: the timeout doesn't seem to make any difference here + await asyncio.wait( + [ + asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))), + asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))), + ], + timeout=timeout, + ) + return _RunOutput(await p.wait(), out, err) + + +def execute_subprocess_async(cmd: list, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: + # Cast every path in `cmd` to a string + for i, c in enumerate(cmd): + if isinstance(c, Path): + cmd[i] = str(c) + loop = asyncio.get_event_loop() + result = loop.run_until_complete( + _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) + ) + + cmd_str = " ".join(cmd) + if result.returncode > 0: + stderr = "\n".join(result.stderr) + raise RuntimeError( + f"'{cmd_str}' failed with returncode {result.returncode}\n\n" + f"The combined stderr from workers follows:\n{stderr}" + ) + + return result + + +def pytest_xdist_worker_id(): + """ + Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0 + if `-n 1` or `pytest-xdist` isn't being used. + """ + worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0") + worker = re.sub(r"^gw", "", worker, 0, re.M) + return int(worker) + + +def get_torch_dist_unique_port(): + """ + Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument. + + Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same + port at once. + """ + port = 29500 + uniq_delta = pytest_xdist_worker_id() + return port + uniq_delta + + +class SubprocessCallException(Exception): + pass + + +def run_command(command: list[str], return_stdout=False, env=None): + """ + Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture + if an error occurred while running `command` + """ + # Cast every path in `command` to a string + for i, c in enumerate(command): + if isinstance(c, Path): + command[i] = str(c) + if env is None: + env = os.environ.copy() + try: + output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env) + if return_stdout: + if hasattr(output, "decode"): + output = output.decode("utf-8") + return output + except subprocess.CalledProcessError as e: + raise SubprocessCallException( + f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" + ) from e + + +def path_in_accelerate_package(*components: str) -> Path: + """ + Get a path within the `accelerate` package's directory. + + Args: + *components: Components of the path to join after the package directory. + + Returns: + `Path`: The path to the requested file or directory. + """ + + accelerate_package_dir = Path(inspect.getfile(accelerate)).parent + return accelerate_package_dir.joinpath(*components) + + +@contextmanager +def assert_exception(exception_class: Exception, msg: str = None) -> bool: + """ + Context manager to assert that the right `Exception` class was raised. + + If `msg` is provided, will check that the message is contained in the raised exception. + """ + was_ran = False + try: + yield + was_ran = True + except Exception as e: + assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}" + if msg is not None: + assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'" + if was_ran: + raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.") + + +def capture_call_output(func, *args, **kwargs): + """ + Takes in a `func` with `args` and `kwargs` and returns the captured stdout as a string + """ + captured_output = io.StringIO() + original_stdout = sys.stdout + try: + sys.stdout = captured_output + func(*args, **kwargs) + except Exception as e: + raise e + finally: + sys.stdout = original_stdout + return captured_output.getvalue() diff --git a/venv/lib/python3.10/site-packages/accelerate/test_utils/training.py b/venv/lib/python3.10/site-packages/accelerate/test_utils/training.py new file mode 100644 index 0000000000000000000000000000000000000000..e71896c1f98bf47093d772cc77d7f27ba30c7ee8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/test_utils/training.py @@ -0,0 +1,162 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +from torch.utils.data import DataLoader + +from accelerate.utils.dataclasses import DistributedType + + +class RegressionDataset: + def __init__(self, a=2, b=3, length=64, seed=None): + rng = np.random.default_rng(seed) + self.length = length + self.x = rng.normal(size=(length,)).astype(np.float32) + self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32) + + def __len__(self): + return self.length + + def __getitem__(self, i): + return {"x": self.x[i], "y": self.y[i]} + + +class RegressionModel4XPU(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.b = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a[0] + self.b[0] + + +class RegressionModel(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor(a).float()) + self.b = torch.nn.Parameter(torch.tensor(b).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a + self.b + + +def mocked_dataloaders(accelerator, batch_size: int = 16): + from datasets import load_dataset + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} + datasets = load_dataset("csv", data_files=data_files) + label_list = datasets["train"].unique("label") + + label_to_id = {v: i for i, v in enumerate(label_list)} + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer( + examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length" + ) + if "label" in examples: + outputs["labels"] = [label_to_id[l] for l in examples["label"]] + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["sentence1", "sentence2", "label"], + ) + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2) + eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1) + + return train_dataloader, eval_dataloader + + +def mocked_dataloaders_for_autoregressive_models(accelerator, batch_size: int = 16): + from datasets import load_dataset + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM-360M") + tokenizer.pad_token = tokenizer.eos_token + + data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} + datasets = load_dataset("csv", data_files=data_files) + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], truncation=True, max_length=None, return_attention_mask=False) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["sentence1", "sentence2", "label"], + ) + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + max_length = ( + 128 + if accelerator.distributed_type == DistributedType.XLA + else max([len(e["input_ids"]) for e in examples]) + ) + # When using mixed precision we want round multiples of 8/16 + if accelerator.mixed_precision == "fp8": + pad_to_multiple_of = 16 + elif accelerator.mixed_precision != "no": + pad_to_multiple_of = 8 + else: + pad_to_multiple_of = None + + batch = tokenizer.pad( + examples, + padding="max_length", + max_length=max_length + 1, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors="pt", + ) + + batch["labels"] = batch["input_ids"][:, 1:] + batch["input_ids"] = batch["input_ids"][:, :-1] + + batch["labels"] = torch.where(batch["labels"] == tokenizer.pad_token_id, -100, batch["labels"]) + + return batch + + # Instantiate dataloaders. + train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=False, collate_fn=collate_fn, batch_size=2) + eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1) + + return train_dataloader, eval_dataloader diff --git a/venv/lib/python3.10/site-packages/accelerate/tracking.py b/venv/lib/python3.10/site-packages/accelerate/tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..29e0aece74eedabd6c7417c11609eb24aad8df50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/tracking.py @@ -0,0 +1,1326 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Expectation: +# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`} + +import json +import os +import time +from functools import wraps +from typing import Any, Optional, Union + +import yaml +from packaging import version + +from .logging import get_logger +from .state import PartialState +from .utils import ( + LoggerType, + compare_versions, + is_aim_available, + is_clearml_available, + is_comet_ml_available, + is_dvclive_available, + is_mlflow_available, + is_swanlab_available, + is_tensorboard_available, + is_trackio_available, + is_wandb_available, + listify, +) + + +_available_trackers = [] + +if is_tensorboard_available(): + _available_trackers.append(LoggerType.TENSORBOARD) + +if is_wandb_available(): + _available_trackers.append(LoggerType.WANDB) + +if is_comet_ml_available(): + _available_trackers.append(LoggerType.COMETML) + +if is_aim_available(): + _available_trackers.append(LoggerType.AIM) + +if is_mlflow_available(): + _available_trackers.append(LoggerType.MLFLOW) + +if is_clearml_available(): + _available_trackers.append(LoggerType.CLEARML) + +if is_dvclive_available(): + _available_trackers.append(LoggerType.DVCLIVE) + +if is_swanlab_available(): + _available_trackers.append(LoggerType.SWANLAB) + +if is_trackio_available(): + _available_trackers.append(LoggerType.TRACKIO) + +logger = get_logger(__name__) + + +def on_main_process(function): + """ + Decorator to selectively run the decorated function on the main process only based on the `main_process_only` + attribute in a class. + + Checks at function execution rather than initialization time, not triggering the initialization of the + `PartialState`. + """ + + @wraps(function) + def execute_on_main_process(self, *args, **kwargs): + if getattr(self, "main_process_only", False): + return PartialState().on_main_process(function)(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + + return execute_on_main_process + + +def get_available_trackers(): + "Returns a list of all supported available trackers in the system" + return _available_trackers + + +class GeneralTracker: + """ + A base Tracker class to be used for all logging integration implementations. + + Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to + [`Accelerator`]. + + Should implement `name`, `requires_logging_directory`, and `tracker` properties such that: + + `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory` + (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal + tracking mechanism used by a tracker class (such as the `run` for wandb) + + Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and + other functions should occur on the main process or across all processes (by default will use `True`) + """ + + main_process_only = True + + def __init__(self, _blank=False): + if not _blank: + err = "" + if not hasattr(self, "name"): + err += "`name`" + if not hasattr(self, "requires_logging_directory"): + if len(err) > 0: + err += ", " + err += "`requires_logging_directory`" + + # as tracker is a @property that relies on post-init + if "tracker" not in dir(self): + if len(err) > 0: + err += ", " + err += "`tracker`" + if len(err) > 0: + raise NotImplementedError( + f"The implementation for this tracker class is missing the following " + f"required attributes. Please define them in the class definition: " + f"{err}" + ) + + def start(self): + """ + Lazy initialization of the tracker inside Accelerator to avoid initializing PartialState before + InitProcessGroupKwargs. + """ + pass + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration + functionality of a tracking API. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + pass + + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with + special behavior for the `step parameter. + + Args: + values (Dictionary `str` to `str`, `float`, or `int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + pass + + def finish(self): + """ + Should run any finalizing functions within the tracking API. If the API should not have one, just don't + overwrite that method. + """ + pass + + +class TensorBoardTracker(GeneralTracker): + """ + A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run + logging_dir (`str`, `os.PathLike`): + Location for TensorBoard logs to be stored. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method. + """ + + name = "tensorboard" + requires_logging_directory = True + + def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs): + super().__init__() + self.run_name = run_name + self.logging_dir_param = logging_dir + self.init_kwargs = kwargs + + @on_main_process + def start(self): + try: + from torch.utils import tensorboard + except ModuleNotFoundError: + import tensorboardX as tensorboard + self.logging_dir = os.path.join(self.logging_dir_param, self.run_name) + self.writer = tensorboard.SummaryWriter(self.logging_dir, **self.init_kwargs) + logger.debug(f"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the + hyperparameters in a yaml file for future use. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.add_hparams(values, metric_dict={}) + self.writer.flush() + project_run_name = time.time() + dir_name = os.path.join(self.logging_dir, str(project_run_name)) + os.makedirs(dir_name, exist_ok=True) + with open(os.path.join(dir_name, "hparams.yml"), "w") as outfile: + try: + yaml.dump(values, outfile) + except yaml.representer.RepresenterError: + logger.error("Serialization to store hyperparameters failed") + raise + logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `SummaryWriter.add_scaler`, + `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`. + """ + values = listify(values) + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.add_scalar(k, v, global_step=step, **kwargs) + elif isinstance(v, str): + self.writer.add_text(k, v, global_step=step, **kwargs) + elif isinstance(v, dict): + self.writer.add_scalars(k, v, global_step=step, **kwargs) + self.writer.flush() + logger.debug("Successfully logged to TensorBoard") + + @on_main_process + def log_images(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `images` to the current run. + + Args: + values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `SummaryWriter.add_image` method. + """ + for k, v in values.items(): + self.writer.add_images(k, v, global_step=step, **kwargs) + logger.debug("Successfully logged images to TensorBoard") + + @on_main_process + def finish(self): + """ + Closes `TensorBoard` writer + """ + self.writer.close() + logger.debug("TensorBoard writer closed") + + +class WandBTracker(GeneralTracker): + """ + A `Tracker` class that supports `wandb`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `wandb.init` method. + """ + + name = "wandb" + requires_logging_directory = False + main_process_only = False + + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + self.init_kwargs = kwargs + + @on_main_process + def start(self): + import wandb + + self.run = wandb.init(project=self.run_name, **self.init_kwargs) + logger.debug(f"Initialized WandB project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + import wandb + + if os.environ.get("WANDB_MODE") == "offline": + # In offline mode, restart wandb with config included + if hasattr(self, "run") and self.run: + self.run.finish() + + init_kwargs = self.init_kwargs.copy() + init_kwargs["config"] = values + self.run = wandb.init(project=self.run_name, **init_kwargs) + else: + wandb.config.update(values, allow_val_change=True) + logger.debug("Stored initial configuration hyperparameters to WandB") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `wandb.log` method. + """ + self.run.log(values, step=step, **kwargs) + logger.debug("Successfully logged to WandB") + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `images` to the current run. + + Args: + values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `wandb.log` method. + """ + import wandb + + for k, v in values.items(): + self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs) + logger.debug("Successfully logged images to WandB") + + @on_main_process + def log_table( + self, + table_name: str, + columns: list[str] = None, + data: list[list[Any]] = None, + dataframe: Any = None, + step: Optional[int] = None, + **kwargs, + ): + """ + Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either + with `columns` and `data` or with `dataframe`. + + Args: + table_name (`str`): + The name to give to the logged table on the wandb workspace + columns (list of `str`, *optional*): + The name of the columns on the table + data (List of List of Any data type, *optional*): + The data to be logged in the table + dataframe (Any data type, *optional*): + The data to be logged in the table + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + import wandb + + values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)} + self.log(values, step=step, **kwargs) + + @on_main_process + def finish(self): + """ + Closes `wandb` writer + """ + self.run.finish() + logger.debug("WandB run closed") + + +class TrackioTracker(GeneralTracker): + """ + A `Tracker` class that supports `trackio`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. Will be used as the `project` name when instantiating trackio. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `trackio.init` method. Refer to this + [init](https://github.com/gradio-app/trackio/blob/814809552310468b13f84f33764f1369b4e5136c/trackio/__init__.py#L22) + to see all supported key word arguments. + """ + + name = "trackio" + requires_logging_directory = False + main_process_only = False + + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + self.init_kwargs = kwargs + + @on_main_process + def start(self): + import trackio + + self.run = trackio.init(project=self.run_name, **self.init_kwargs) + logger.debug(f"Initialized trackio project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + import trackio + + trackio.config.update(values, allow_val_change=True) + logger.debug("Stored initial configuration hyperparameters to trackio") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `trackio.log` method. + """ + self.run.log(values, **kwargs) + logger.debug("Successfully logged to trackio") + + @on_main_process + def finish(self): + """ + Closes `trackio` run + """ + self.run.finish() + logger.debug("trackio run closed") + + +class CometMLTracker(GeneralTracker): + """ + A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script. + + API keys must be stored in a Comet config file. + + Note: + For `comet_ml` versions < 3.41.0, additional keyword arguments are passed to `comet_ml.Experiment` instead: + https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment/#comet_ml.Experiment.__init__ + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `comet_ml.start` method: + https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/start/ + """ + + name = "comet_ml" + requires_logging_directory = False + + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + self.init_kwargs = kwargs + + @on_main_process + def start(self): + import comet_ml + + comet_version = version.parse(comet_ml.__version__) + if compare_versions(comet_version, ">=", "3.41.0"): + self.writer = comet_ml.start(project_name=self.run_name, **self.init_kwargs) + else: + logger.info("Update `comet_ml` (>=3.41.0) for experiment reuse and offline support.") + self.writer = comet_ml.Experiment(project_name=self.run_name, **self.init_kwargs) + + logger.debug(f"Initialized CometML project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.log_parameters(values) + logger.debug("Stored initial configuration hyperparameters to Comet") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`, + or `Experiment.log_metrics` method based on the contents of `values`. + """ + if step is not None: + self.writer.set_step(step) + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.log_metric(k, v, step=step, **kwargs) + elif isinstance(v, str): + self.writer.log_other(k, v, **kwargs) + elif isinstance(v, dict): + self.writer.log_metrics(v, step=step, **kwargs) + logger.debug("Successfully logged to Comet") + + @on_main_process + def finish(self): + """ + Flush `comet-ml` writer + """ + self.writer.end() + logger.debug("Comet run flushed") + + +class AimTracker(GeneralTracker): + """ + A `Tracker` class that supports `aim`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `Run.__init__` method. + """ + + name = "aim" + requires_logging_directory = True + + def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs): + super().__init__() + self.run_name = run_name + self.aim_repo_path = logging_dir + self.init_kwargs = kwargs + + @on_main_process + def start(self): + from aim import Run + + self.writer = Run(repo=self.aim_repo_path, **self.init_kwargs) + self.writer.name = self.run_name + logger.debug(f"Initialized Aim project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + self.writer["hparams"] = values + + @on_main_process + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `Run.track` method. + """ + # Note: replace this with the dictionary support when merged + for key, value in values.items(): + self.writer.track(value, name=key, step=step, **kwargs) + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[dict[str, dict]] = None): + """ + Logs `images` to the current run. + + Args: + values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`): + Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a + tuple is provided, the first element should be the image and the second element should be the caption. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs (`Dict[str, dict]`): + Additional key word arguments passed along to the `Run.Image` and `Run.track` method specified by the + keys `aim_image` and `track`, respectively. + """ + import aim + + aim_image_kw = {} + track_kw = {} + + if kwargs is not None: + aim_image_kw = kwargs.get("aim_image", {}) + track_kw = kwargs.get("track", {}) + + for key, value in values.items(): + if isinstance(value, tuple): + img, caption = value + else: + img, caption = value, "" + aim_image = aim.Image(img, caption=caption, **aim_image_kw) + self.writer.track(aim_image, name=key, step=step, **track_kw) + + @on_main_process + def finish(self): + """ + Closes `aim` writer + """ + self.writer.close() + + +class MLflowTracker(GeneralTracker): + """ + A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script. + + Args: + experiment_name (`str`, *optional*): + Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument. + logging_dir (`str` or `os.PathLike`, defaults to `"."`): + Location for mlflow logs to be stored. + run_id (`str`, *optional*): + If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s + end time is unset and its status is set to running, but the run’s other attributes (source_version, + source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument. + tags (`Dict[str, str]`, *optional*): + An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a + run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are + set on the new run. Environment variable MLFLOW_TAGS has priority over this argument. + nested_run (`bool`, *optional*, defaults to `False`): + Controls whether run is nested in parent run. True creates a nested run. Environment variable + MLFLOW_NESTED_RUN has priority over this argument. + run_name (`str`, *optional*): + Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified. + description (`str`, *optional*): + An optional string that populates the description box of the run. If a run is being resumed, the + description is set on the resumed run. If a new run is being created, the description is set on the new + run. + """ + + name = "mlflow" + requires_logging_directory = False + + def __init__( + self, + experiment_name: str = None, + logging_dir: Optional[Union[str, os.PathLike]] = None, + run_id: Optional[str] = None, + tags: Optional[Union[dict[str, Any], str]] = None, + nested_run: Optional[bool] = False, + run_name: Optional[str] = None, + description: Optional[str] = None, + ): + experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME", experiment_name) + run_id = os.environ.get("MLFLOW_RUN_ID", run_id) + tags = os.environ.get("MLFLOW_TAGS", tags) + if isinstance(tags, str): + tags = json.loads(tags) + + nested_run = os.environ.get("MLFLOW_NESTED_RUN", nested_run) + + self.experiment_name = experiment_name + self.logging_dir = logging_dir + self.run_id = run_id + self.tags = tags + self.nested_run = nested_run + self.run_name = run_name + self.description = description + + @on_main_process + def start(self): + import mlflow + + exps = mlflow.search_experiments(filter_string=f"name = '{self.experiment_name}'") + if len(exps) > 0: + if len(exps) > 1: + logger.warning("Multiple experiments with the same name found. Using first one.") + experiment_id = exps[0].experiment_id + else: + experiment_id = mlflow.create_experiment( + name=self.experiment_name, + artifact_location=self.logging_dir, + tags=self.tags, + ) + + self.active_run = mlflow.start_run( + run_id=self.run_id, + experiment_id=experiment_id, + run_name=self.run_name, + nested=self.nested_run, + tags=self.tags, + description=self.description, + ) + + logger.debug(f"Initialized mlflow experiment {self.experiment_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.active_run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + import mlflow + + for name, value in list(values.items()): + # internally, all values are converted to str in MLflow + if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH: + logger.warning_once( + f'Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s' + f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute." + ) + del values[name] + + values_list = list(values.items()) + + # MLflow cannot log more than 100 values in one go, so we have to split it + for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH): + mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH])) + + logger.debug("Stored initial configuration hyperparameters to MLflow") + + @on_main_process + def log(self, values: dict, step: Optional[int]): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + metrics = {} + for k, v in values.items(): + if isinstance(v, (int, float)): + metrics[k] = v + else: + logger.warning_once( + f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. ' + "MLflow's log_metric() only accepts float and int types so we dropped this attribute." + ) + import mlflow + + mlflow.log_metrics(metrics, step=step) + logger.debug("Successfully logged to mlflow") + + @on_main_process + def log_figure(self, figure: Any, artifact_file: str, **save_kwargs): + """ + Logs an figure to the current run. + + Args: + figure (Any): + The figure to be logged. + artifact_file (`str`, *optional*): + The run-relative artifact file path in posixpath format to which the image is saved. + If not provided, the image is saved to a default location. + **kwargs: + Additional keyword arguments passed to the underlying mlflow.log_image function. + """ + import mlflow + + mlflow.log_figure(figure=figure, artifact_file=artifact_file, **save_kwargs) + logger.debug("Successfully logged image to mlflow") + + @on_main_process + def log_artifacts(self, local_dir: str, artifact_path: Optional[str] = None): + """ + Logs an artifacts (all content of a dir) to the current run. + + local_dir (`str`): + Path to the directory to be logged as an artifact. + artifact_path (`str`, *optional*): + Directory within the run's artifact directory where the artifact will be logged. If omitted, the + artifact will be logged to the root of the run's artifact directory. The run step. If included, the + artifact will be affiliated with this step. + """ + import mlflow + + mlflow.log_artifacts(local_dir=local_dir, artifact_path=artifact_path) + logger.debug("Successfully logged artofact to mlflow") + + @on_main_process + def log_artifact(self, local_path: str, artifact_path: Optional[str] = None): + """ + Logs an artifact (file) to the current run. + + local_path (`str`): + Path to the file to be logged as an artifact. + artifact_path (`str`, *optional*): + Directory within the run's artifact directory where the artifact will be logged. If omitted, the + artifact will be logged to the root of the run's artifact directory. The run step. If included, the + artifact will be affiliated with this step. + """ + import mlflow + + mlflow.log_artifact(local_path=local_path, artifact_path=artifact_path) + logger.debug("Successfully logged artofact to mlflow") + + @on_main_process + def finish(self): + """ + End the active MLflow run. + """ + import mlflow + + mlflow.end_run() + + +class ClearMLTracker(GeneralTracker): + """ + A `Tracker` class that supports `clearml`. Should be initialized at the start of your script. + + Args: + run_name (`str`, *optional*): + Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this + argument. + **kwargs (additional keyword arguments, *optional*): + Kwargs passed along to the `Task.__init__` method. + """ + + name = "clearml" + requires_logging_directory = False + + def __init__(self, run_name: str = None, **kwargs): + super().__init__() + self.user_provided_run_name = run_name + self._initialized_externally = False + self.init_kwargs = kwargs + + @on_main_process + def start(self): + from clearml import Task + + current_task = Task.current_task() + if current_task: + self._initialized_externally = True + self.task = current_task + return + + task_init_args = {**self.init_kwargs} + task_init_args.setdefault("project_name", os.environ.get("CLEARML_PROJECT", self.user_provided_run_name)) + task_init_args.setdefault("task_name", os.environ.get("CLEARML_TASK", self.user_provided_run_name)) + self.task = Task.init(**task_init_args) + + @property + def tracker(self): + return self.task + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + return self.task.connect_configuration(values) + + @on_main_process + def log(self, values: dict[str, Union[int, float]], step: Optional[int] = None, **kwargs): + """ + Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be + ints or floats + + Args: + values (`Dict[str, Union[int, float]]`): + Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will + be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed. + Otherwise, the value will be reported under the 'train' series, and no prefix will be removed. + step (`int`, *optional*): + If specified, the values will be reported as scalars, with the iteration number equal to `step`. + Otherwise they will be reported as single values. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_single_value` or + `clearml.Logger.report_scalar` methods. + """ + clearml_logger = self.task.get_logger() + for k, v in values.items(): + if not isinstance(v, (int, float)): + logger.warning_once( + "Accelerator is attempting to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of ClearML logger's report_scalar() " + "is incorrect so we dropped this attribute." + ) + continue + if step is None: + clearml_logger.report_single_value(name=k, value=v, **kwargs) + continue + title, series = ClearMLTracker._get_title_series(k) + clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs) + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `images` to the current run. + + Args: + values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_image` method. + """ + clearml_logger = self.task.get_logger() + for k, v in values.items(): + title, series = ClearMLTracker._get_title_series(k) + clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs) + + @on_main_process + def log_table( + self, + table_name: str, + columns: list[str] = None, + data: list[list[Any]] = None, + dataframe: Any = None, + step: Optional[int] = None, + **kwargs, + ): + """ + Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`. + + Args: + table_name (`str`): + The name of the table + columns (list of `str`, *optional*): + The name of the columns on the table + data (List of List of Any data type, *optional*): + The data to be logged in the table. If `columns` is not specified, then the first entry in data will be + the name of the columns of the table + dataframe (Any data type, *optional*): + The data to be logged in the table + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_table` method. + """ + to_report = dataframe + if dataframe is None: + if data is None: + raise ValueError( + "`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`" + ) + to_report = [columns] + data if columns else data + title, series = ClearMLTracker._get_title_series(table_name) + self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs) + + @on_main_process + def finish(self): + """ + Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this + function is a noop + """ + if self.task and not self._initialized_externally: + self.task.close() + + @staticmethod + def _get_title_series(name): + for prefix in ["eval", "test", "train"]: + if name.startswith(prefix + "_"): + return name[len(prefix) + 1 :], prefix + return name, "train" + + +class DVCLiveTracker(GeneralTracker): + """ + A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script. + + Args: + run_name (`str`, *optional*): + Ignored for dvclive. See `kwargs` instead. + kwargs: + Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live). + + Example: + + ```py + from accelerate import Accelerator + + accelerator = Accelerator(log_with="dvclive") + accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}}) + ``` + """ + + name = "dvclive" + requires_logging_directory = False + + def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs): + super().__init__() + self.live = live + self.init_kwargs = kwargs + + @on_main_process + def start(self): + from dvclive import Live + + self.live = self.live if self.live is not None else Live(**self.init_kwargs) + + @property + def tracker(self): + return self.live + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the + hyperparameters in a yaml file for future use. + + Args: + values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, or `int`. + """ + self.live.log_params(values) + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, or `int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to `dvclive.Live.log_metric()`. + """ + from dvclive.plots import Metric + + if step is not None: + self.live.step = step + for k, v in values.items(): + if Metric.could_log(v): + self.live.log_metric(k, v, **kwargs) + else: + logger.warning_once( + "Accelerator attempted to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of DVCLive's Live.log_metric() " + "is incorrect so we dropped this attribute." + ) + self.live.next_step() + + @on_main_process + def finish(self): + """ + Closes `dvclive.Live()`. + """ + self.live.end() + + +class SwanLabTracker(GeneralTracker): + """ + A `Tracker` class that supports `swanlab`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `swanlab.init` method. + """ + + name = "swanlab" + requires_logging_directory = False + main_process_only = False + + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + self.init_kwargs = kwargs + + @on_main_process + def start(self): + import swanlab + + self.run = swanlab.init(project=self.run_name, **self.init_kwargs) + swanlab.config["FRAMEWORK"] = "🤗Accelerate" # add accelerate logo in config + logger.debug(f"Initialized SwanLab project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + import swanlab + + swanlab.config.update(values, allow_val_change=True) + logger.debug("Stored initial configuration hyperparameters to SwanLab") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + data : Dict[str, DataType] + Data must be a dict. The key must be a string with 0-9, a-z, A-Z, " ", "_", "-", "/". The value must be a + `float`, `float convertible object`, `int` or `swanlab.data.BaseType`. + step : int, optional + The step number of the current data, if not provided, it will be automatically incremented. + If step is duplicated, the data will be ignored. + kwargs: + Additional key word arguments passed along to the `swanlab.log` method. Likes: + print_to_console : bool, optional + Whether to print the data to the console, the default is False. + """ + self.run.log(values, step=step, **kwargs) + logger.debug("Successfully logged to SwanLab") + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `images` to the current run. + + Args: + values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `swanlab.log` method. Likes: + print_to_console : bool, optional + Whether to print the data to the console, the default is False. + """ + import swanlab + + for k, v in values.items(): + self.log({k: [swanlab.Image(image) for image in v]}, step=step, **kwargs) + logger.debug("Successfully logged images to SwanLab") + + @on_main_process + def finish(self): + """ + Closes `swanlab` writer + """ + self.run.finish() + logger.debug("SwanLab run closed") + + +LOGGER_TYPE_TO_CLASS = { + "aim": AimTracker, + "comet_ml": CometMLTracker, + "mlflow": MLflowTracker, + "tensorboard": TensorBoardTracker, + "wandb": WandBTracker, + "clearml": ClearMLTracker, + "dvclive": DVCLiveTracker, + "swanlab": SwanLabTracker, + "trackio": TrackioTracker, +} + + +def filter_trackers( + log_with: list[Union[str, LoggerType, GeneralTracker]], + logging_dir: Union[str, os.PathLike] = None, +): + """ + Takes in a list of potential tracker types and checks that: + - The tracker wanted is available in that environment + - Filters out repeats of tracker types + - If `all` is in `log_with`, will return all trackers in the environment + - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None` + + Args: + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"trackio"` + - `"aim"` + - `"comet_ml"` + - `"mlflow"` + - `"dvclive"` + - `"swanlab"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + logging_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing logs of locally-compatible loggers. + """ + loggers = [] + if log_with is not None: + if not isinstance(log_with, (list, tuple)): + log_with = [log_with] + if "all" in log_with or LoggerType.ALL in log_with: + loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers() + else: + for log_type in log_with: + if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker): + raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}") + if issubclass(type(log_type), GeneralTracker): + loggers.append(log_type) + else: + log_type = LoggerType(log_type) + if log_type not in loggers: + if log_type in get_available_trackers(): + tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)] + if tracker_init.requires_logging_directory: + if logging_dir is None: + raise ValueError( + f"Logging with `{log_type}` requires a `logging_dir` to be passed in." + ) + loggers.append(log_type) + else: + logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.") + + return loggers diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__init__.py b/venv/lib/python3.10/site-packages/accelerate/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d051a8fb1a1ef96dd734c39b0bdd4740e657632f --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/__init__.py @@ -0,0 +1,303 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..parallelism_config import ParallelismConfig +from .ao import convert_model_to_fp8_ao, filter_first_and_last_linear_layers, has_ao_layers +from .constants import ( + MITA_PROFILING_AVAILABLE_PYTORCH_VERSION, + MODEL_NAME, + OPTIMIZER_NAME, + PROFILE_PATTERN_NAME, + RNG_STATE_NAME, + SAFE_MODEL_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + SAFE_WEIGHTS_PATTERN_NAME, + SAMPLER_NAME, + SCALER_NAME, + SCHEDULER_NAME, + TORCH_DISTRIBUTED_OPERATION_TYPES, + TORCH_LAUNCH_PARAMS, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + WEIGHTS_PATTERN_NAME, + XPU_PROFILING_AVAILABLE_PYTORCH_VERSION, +) +from .dataclasses import ( + AORecipeKwargs, + AutocastKwargs, + BnbQuantizationConfig, + ComputeEnvironment, + CustomDtype, + DataLoaderConfiguration, + DDPCommunicationHookType, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FP8RecipeKwargs, + FullyShardedDataParallelPlugin, + GradientAccumulationPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + MSAMPRecipeKwargs, + PrecisionType, + ProfileKwargs, + ProjectConfiguration, + RNGType, + SageMakerDistributedType, + TensorInformation, + TERecipeKwargs, + TorchContextParallelConfig, + TorchDynamoPlugin, + TorchTensorParallelConfig, + TorchTensorParallelPlugin, + add_model_config_to_megatron_parser, +) +from .environment import ( + are_libraries_initialized, + check_cuda_fp8_capability, + check_cuda_p2p_ib_support, + clear_environment, + convert_dict_to_env_variables, + get_cpu_distributed_information, + get_gpu_info, + get_int_from_env, + parse_choice_from_env, + parse_flag_from_env, + patch_environment, + purge_accelerate_environment, + set_numa_affinity, + str_to_bool, +) +from .imports import ( + deepspeed_required, + get_ccl_version, + is_4bit_bnb_available, + is_8bit_bnb_available, + is_aim_available, + is_bf16_available, + is_bitsandbytes_multi_backend_available, + is_bnb_available, + is_boto3_available, + is_ccl_available, + is_clearml_available, + is_comet_ml_available, + is_cuda_available, + is_datasets_available, + is_deepspeed_available, + is_dvclive_available, + is_fp8_available, + is_fp16_available, + is_habana_gaudi1, + is_hpu_available, + is_import_timer_available, + is_ipex_available, + is_lomo_available, + is_matplotlib_available, + is_megatron_lm_available, + is_mlflow_available, + is_mlu_available, + is_mps_available, + is_msamp_available, + is_musa_available, + is_npu_available, + is_pandas_available, + is_peft_available, + is_pippy_available, + is_pynvml_available, + is_pytest_available, + is_rich_available, + is_sagemaker_available, + is_schedulefree_available, + is_sdaa_available, + is_swanlab_available, + is_tensorboard_available, + is_timm_available, + is_torch_xla_available, + is_torchao_available, + is_torchdata_available, + is_torchdata_stateful_dataloader_available, + is_torchvision_available, + is_trackio_available, + is_transformer_engine_available, + is_transformers_available, + is_triton_available, + is_wandb_available, + is_weights_only_available, + is_xccl_available, + is_xpu_available, + torchao_required, +) +from .modeling import ( + align_module_device, + calculate_maximum_sizes, + check_device_map, + check_tied_parameters_in_config, + check_tied_parameters_on_same_device, + compute_module_sizes, + convert_file_size_to_int, + dtype_byte_size, + find_tied_parameters, + get_balanced_memory, + get_grad_scaler, + get_max_layer_size, + get_max_memory, + get_mixed_precision_context_manager, + has_offloaded_params, + id_tensor_storage, + infer_auto_device_map, + is_peft_model, + load_checkpoint_in_model, + load_offloaded_weights, + load_state_dict, + named_module_tensors, + retie_parameters, + set_module_tensor_to_device, +) +from .offload import ( + OffloadedWeightsLoader, + PrefixedDataset, + extract_submodules_state_dict, + load_offloaded_weight, + offload_state_dict, + offload_weight, + save_offload_index, +) +from .operations import ( + CannotPadNestedTensorWarning, + GatheredParameters, + broadcast, + broadcast_object_list, + concatenate, + convert_outputs_to_fp32, + convert_to_fp32, + copy_tensor_to_devices, + find_batch_size, + find_device, + gather, + gather_object, + get_data_structure, + honor_type, + ignorant_find_batch_size, + initialize_tensors, + is_namedtuple, + is_tensor_information, + is_torch_tensor, + listify, + pad_across_processes, + pad_input_tensors, + recursively_apply, + reduce, + send_to_device, + slice_tensors, +) +from .versions import compare_versions, is_torch_version + + +if is_deepspeed_available(): + from .deepspeed import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + HfDeepSpeedConfig, + get_active_deepspeed_plugin, + map_pytorch_optim_to_deepspeed, + ) + +from .bnb import has_4bit_bnb_layers, load_and_quantize_model +from .fsdp_utils import ( + disable_fsdp_ram_efficient_loading, + enable_fsdp_ram_efficient_loading, + ensure_weights_retied, + fsdp2_apply_ac, + fsdp2_canonicalize_names, + fsdp2_load_full_state_dict, + fsdp2_prepare_model, + fsdp2_switch_optimizer_parameters, + get_fsdp2_grad_scaler, + load_fsdp_model, + load_fsdp_optimizer, + merge_fsdp_weights, + save_fsdp_model, + save_fsdp_optimizer, +) +from .launch import ( + PrepareForLaunch, + _filter_args, + prepare_deepspeed_cmd_env, + prepare_multi_gpu_env, + prepare_sagemager_args_inputs, + prepare_simple_launcher_cmd_env, + prepare_tpu, +) + +# For docs +from .megatron_lm import ( + AbstractTrainStep, + BertTrainStep, + GPTTrainStep, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + T5TrainStep, + avg_losses_across_data_parallel_group, +) + + +if is_megatron_lm_available(): + from .megatron_lm import ( + MegatronEngine, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + gather_across_data_parallel_groups, + ) + from .megatron_lm import initialize as megatron_lm_initialize + from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader + from .megatron_lm import prepare_model_optimizer_scheduler as megatron_lm_prepare_model_optimizer_scheduler + from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer + from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler +from .memory import find_executable_batch_size, release_memory +from .other import ( + check_os_kernel, + clean_state_dict_for_safetensors, + compile_regions, + compile_regions_deepspeed, + convert_bytes, + extract_model_from_parallel, + get_module_children_bottom_up, + get_pretty_name, + has_compiled_regions, + is_compiled_module, + is_port_in_use, + load, + merge_dicts, + model_has_dtensor, + recursive_getattr, + save, + wait_for_everyone, + write_basic_config, +) +from .random import set_seed, synchronize_rng_state, synchronize_rng_states +from .torch_xla import install_xla +from .tqdm import tqdm +from .transformer_engine import ( + apply_fp8_autowrap, + contextual_fp8_autocast, + convert_model, + has_transformer_engine_layers, +) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05216c87768410f022eb9400e699afdca430f0c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/ao.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/ao.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27c5b08339581a45527beac2eb429af76b80a5fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/ao.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f740a5f98cd2ed4576c1a6569bbfe2747a6963cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1335dd59f397a498035dd52de07a0734ab30d70a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2c75c9a5761c0bb436375b3973909464a3707de Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb66f0659bd59446434f7c34c20ea5339ee7817c Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6cf912f80955e265667fdb59c6de5e606d9727 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8740df81ad590170458beaf138b3011725a48f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..283fd4062662a5369085fa95e1faa402f6a00d9b Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..407f7c4e971d23253292211be80cc41d80183bc9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6eedd21f41c564844689a83d18ce82bb7255f081 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78ee03533c00597a370072d7f971a07749117b37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82ae9b4821765b2b877c0d32d7f659360ac918a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd9f94fbff296c146b70bd33252e9577e57ba617 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03d6b8ff3b118671c6edd56148d3229beb2dc156 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15e836752053e4a68d5b58039388c9951f978d58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..510bb1f5297a7cc26da4adb37aff32beb109ea3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dd6844c218175f3ab867733b3da411a6532a9d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..297c8a8f3631326de2f528864be1d5d8fa5cf69a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..378550dd4a166a07c45fd91850218108f7e0664d Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..290ad6a17a9c0b12aee8ded53b4c15e1c9f02471 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/ao.py b/venv/lib/python3.10/site-packages/accelerate/utils/ao.py new file mode 100644 index 0000000000000000000000000000000000000000..73155615b768a01f709bb8e0857617ba58a6ec83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/ao.py @@ -0,0 +1,140 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Needed utilities for torchao FP8 training. +""" + +from functools import partial +from typing import TYPE_CHECKING, Callable, Optional + +import torch + +from .imports import is_torchao_available, torchao_required + + +if TYPE_CHECKING: + if is_torchao_available(): + from torchao.float8.float8_linear import Float8LinearConfig + + +def find_first_last_linear_layers(model: torch.nn.Module): + """ + Finds the first and last linear layer names in a model. + + This is needed during FP8 to avoid issues with instability by keeping the first and last layers unquantized. + + Ref: https://x.com/xariusrke/status/1826669142604141052 + """ + first_linear, last_linear = None, None + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + if first_linear is None: + first_linear = name + last_linear = name + return first_linear, last_linear + + +def filter_linear_layers(module, fqn: str, layers_to_filter: list[str]) -> bool: + """ + A function which will check if `module` is: + - a `torch.nn.Linear` layer + - has in_features and out_features divisible by 16 + - is not part of `layers_to_filter` + + Args: + module (`torch.nn.Module`): + The module to check. + fqn (`str`): + The fully qualified name of the layer. + layers_to_filter (`List[str]`): + The list of layers to filter. + """ + if isinstance(module, torch.nn.Linear): + if module.in_features % 16 != 0 or module.out_features % 16 != 0: + return False + if fqn in layers_to_filter: + return False + return True + + +def filter_first_and_last_linear_layers(module, fqn: str) -> bool: + """ + A filter function which will filter out all linear layers except the first and last. + + + + For stability reasons, we skip the first and last linear layers Otherwise can lead to the model not training or + converging properly + + + + Args: + module (`torch.nn.Module`): + The module to check. + fqn (`str`): + The fully qualified name of the layer. + """ + first_linear, last_linear = find_first_last_linear_layers(module) + return filter_linear_layers(module, fqn, layers_to_filter=[first_linear, last_linear]) + + +@torchao_required +def has_ao_layers(model: torch.nn.Module): + from torchao.float8.float8_linear import Float8Linear + + for name, module in model.named_modules(): + if isinstance(module, Float8Linear): + return True + return False + + +@torchao_required +def convert_model_to_fp8_ao( + model: torch.nn.Module, + config: Optional["Float8LinearConfig"] = None, + module_filter_func: Optional[Callable] = filter_first_and_last_linear_layers, +): + """ + Converts all `nn.Linear` layers in the model (except the first and last) to torchao's `Float8Linear` layer inplace. + + Args: + model (`torch.nn.Module`): + The model to convert. + config (`torchao.float8.Float8LinearConfig`, *optional*): + The configuration for the FP8 training. Recommended to utilize + `torchao.float8.recipe_name_to_linear_config` to generate this. In general, the default config should be + sufficient (what is passed when set to `None`). + module_filter_func (`Callable`, *optional*, defaults to `filter_linear_layers`): + Optional function that must take in a module and layer name, and returns a boolean indicating whether the + module should be converted to FP8. Defaults to `filter_linear_layers`. See it for an example. + + Example: + + ```python + from accelerate.utils.ao import convert_model_to_fp8_ao + + model = MyModel() + model.to("cuda") + convert_to_float8_training(model) + + model.train() + ``` + """ + from torchao.float8 import convert_to_float8_training + + first_linear, last_linear = find_first_last_linear_layers(model) + if module_filter_func is None: + module_filter_func = partial(filter_linear_layers, layers_to_filter=[first_linear, last_linear]) + convert_to_float8_training(model, module_filter_fn=module_filter_func, config=config) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/bnb.py b/venv/lib/python3.10/site-packages/accelerate/utils/bnb.py new file mode 100644 index 0000000000000000000000000000000000000000..af4aa541233d21193aa5b302ee37c0b57dae4926 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/bnb.py @@ -0,0 +1,469 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import os +from copy import deepcopy +from typing import Optional, Union + +import torch +import torch.nn as nn + +from accelerate.utils.imports import ( + is_4bit_bnb_available, + is_8bit_bnb_available, +) + +from ..big_modeling import dispatch_model, init_empty_weights +from .dataclasses import BnbQuantizationConfig +from .modeling import ( + find_tied_parameters, + get_balanced_memory, + infer_auto_device_map, + load_checkpoint_in_model, + offload_weight, + set_module_tensor_to_device, +) + + +logger = logging.getLogger(__name__) + + +def load_and_quantize_model( + model: torch.nn.Module, + bnb_quantization_config: BnbQuantizationConfig, + weights_location: Union[str, os.PathLike] = None, + device_map: Optional[dict[str, Union[int, str, torch.device]]] = None, + no_split_module_classes: Optional[list[str]] = None, + max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + offload_state_dict: bool = False, +): + """ + This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the + model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the + model is already loaded, we will quantize the model and put the model on the GPU, + + Args: + model (`torch.nn.Module`): + Input model. The model can be already loaded or on the meta device + bnb_quantization_config (`BnbQuantizationConfig`): + The bitsandbytes quantization parameters + weights_location (`str` or `os.PathLike`): + The folder weights_location to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + - a path to a folder containing a unique pytorch_model.bin file. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_state_dict (`bool`, *optional*, defaults to `False`): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. + + Returns: + `torch.nn.Module`: The quantized model + """ + + load_in_4bit = bnb_quantization_config.load_in_4bit + load_in_8bit = bnb_quantization_config.load_in_8bit + + if load_in_8bit and not is_8bit_bnb_available(): + raise ImportError( + "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," + " make sure you have the latest version of `bitsandbytes` installed." + ) + if load_in_4bit and not is_4bit_bnb_available(): + raise ValueError( + "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," + "make sure you have the latest version of `bitsandbytes` installed." + ) + + modules_on_cpu = [] + # custom device map + if isinstance(device_map, dict) and len(device_map.keys()) > 1: + modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] + + # We keep some modules such as the lm_head in their original dtype for numerical stability reasons + if bnb_quantization_config.skip_modules is None: + bnb_quantization_config.skip_modules = get_keys_to_not_convert(model) + + # add cpu modules to skip modules only for 4-bit modules + if load_in_4bit: + bnb_quantization_config.skip_modules.extend(modules_on_cpu) + modules_to_not_convert = bnb_quantization_config.skip_modules + + # We add the modules we want to keep in full precision + if bnb_quantization_config.keep_in_fp32_modules is None: + bnb_quantization_config.keep_in_fp32_modules = [] + keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules + modules_to_not_convert.extend(keep_in_fp32_modules) + + # compatibility with peft + model.is_loaded_in_4bit = load_in_4bit + model.is_loaded_in_8bit = load_in_8bit + + model_device = get_parameter_device(model) + if model_device.type != "meta": + # quantization of an already loaded model + logger.warning( + "It is not recommended to quantize a loaded model. " + "The model should be instantiated under the `init_empty_weights` context manager." + ) + model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert) + # convert param to the right dtype + dtype = bnb_quantization_config.torch_dtype + for name, param in model.state_dict().items(): + if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules): + param.to(torch.float32) + if param.dtype != torch.float32: + name = name.replace(".weight", "").replace(".bias", "") + param = getattr(model, name, None) + if param is not None: + param.to(torch.float32) + elif torch.is_floating_point(param): + param.to(dtype) + if model_device.type == "cuda": + model.cuda(torch.cuda.current_device()) + torch.cuda.empty_cache() + elif torch.cuda.is_available(): + model.to(torch.cuda.current_device()) + elif torch.xpu.is_available(): + model.to(torch.xpu.current_device()) + else: + raise RuntimeError("No GPU or Intel XPU found. A GPU or Intel XPU is needed for quantization.") + logger.info( + f"The model device type is {model_device.type}. However, gpu or intel xpu is needed for quantization." + "We move the model to it." + ) + return model + + elif weights_location is None: + raise RuntimeError( + f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " + ) + + else: + with init_empty_weights(): + model = replace_with_bnb_layers( + model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert + ) + device_map = get_quantized_model_device_map( + model, + bnb_quantization_config, + device_map, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + ) + if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): + offload_state_dict = True + + offload = any(x in list(device_map.values()) for x in ["cpu", "disk"]) + + load_checkpoint_in_model( + model, + weights_location, + device_map, + dtype=bnb_quantization_config.torch_dtype, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules, + offload_8bit_bnb=load_in_8bit and offload, + ) + return dispatch_model(model, device_map=device_map, offload_dir=offload_folder) + + +def get_quantized_model_device_map( + model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None +): + if device_map is None: + if torch.cuda.is_available(): + device_map = {"": torch.cuda.current_device()} + elif torch.xpu.is_available(): + device_map = {"": torch.xpu.current_device()} + else: + raise RuntimeError("No GPU found. A GPU is needed for quantization.") + logger.info("The device_map was not initialized.Setting device_map to `{'':torch.cuda.current_device()}`.") + + if isinstance(device_map, str): + if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " + "'sequential'." + ) + + special_dtypes = {} + special_dtypes.update( + { + name: bnb_quantization_config.torch_dtype + for name, _ in model.named_parameters() + if any(m in name for m in bnb_quantization_config.skip_modules) + } + ) + special_dtypes.update( + { + name: torch.float32 + for name, _ in model.named_parameters() + if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules) + } + ) + + kwargs = {} + kwargs["special_dtypes"] = special_dtypes + kwargs["no_split_module_classes"] = no_split_module_classes + kwargs["dtype"] = bnb_quantization_config.target_dtype + + # get max_memory for each device. + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + low_zero=(device_map == "balanced_low_0"), + max_memory=max_memory, + **kwargs, + ) + + kwargs["max_memory"] = max_memory + device_map = infer_auto_device_map(model, **kwargs) + + if isinstance(device_map, dict): + # check if don't have any quantized module on the cpu + modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules + + device_map_without_some_modules = { + key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert + } + for device in ["cpu", "disk"]: + if device in device_map_without_some_modules.values(): + if bnb_quantization_config.load_in_4bit: + raise ValueError( + """ + Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit + the quantized model. If you want to dispatch the model on the CPU or the disk while keeping + these modules in `torch_dtype`, you need to pass a custom `device_map` to + `load_and_quantize_model`. Check + https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk + for more details. + """ + ) + else: + logger.info( + "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" + ) + del device_map_without_some_modules + return device_map + + +def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None): + """ + A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` + modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. + + Parameters: + model (`torch.nn.Module`): + Input model or `torch.nn.Module` as the function is run recursively. + modules_to_not_convert (`List[str]`): + Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for + numerical stability reasons. + current_key_name (`List[str]`, *optional*): + An array to track the current key of the recursion. This is used to check whether the current key (part of + it) is not in the list of modules to not convert. + """ + + if modules_to_not_convert is None: + modules_to_not_convert = [] + + model, has_been_replaced = _replace_with_bnb_layers( + model, bnb_quantization_config, modules_to_not_convert, current_key_name + ) + if not has_been_replaced: + logger.warning( + "You are loading your model in 8bit or 4bit but no linear modules were found in your model." + " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." + " Please double check your model architecture, or submit an issue on github if you think this is" + " a bug." + ) + return model + + +def _replace_with_bnb_layers( + model, + bnb_quantization_config, + modules_to_not_convert=None, + current_key_name=None, +): + """ + Private method that wraps the recursion for module replacement. + + Returns the converted model and a boolean that indicates if the conversion has been successfull or not. + """ + # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily + import bitsandbytes as bnb + + has_been_replaced = False + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + if isinstance(module, nn.Linear) and name not in modules_to_not_convert: + # Check if the current key is not in the `modules_to_not_convert` + current_key_name_str = ".".join(current_key_name) + proceed = True + for key in modules_to_not_convert: + if ( + (key in current_key_name_str) and (key + "." in current_key_name_str) + ) or key == current_key_name_str: + proceed = False + break + if proceed: + # Load bnb module with empty weight and replace ``nn.Linear` module + if bnb_quantization_config.load_in_8bit: + bnb_module = bnb.nn.Linear8bitLt( + module.in_features, + module.out_features, + module.bias is not None, + has_fp16_weights=False, + threshold=bnb_quantization_config.llm_int8_threshold, + ) + elif bnb_quantization_config.load_in_4bit: + bnb_module = bnb.nn.Linear4bit( + module.in_features, + module.out_features, + module.bias is not None, + bnb_quantization_config.bnb_4bit_compute_dtype, + compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant, + quant_type=bnb_quantization_config.bnb_4bit_quant_type, + ) + else: + raise ValueError("load_in_8bit and load_in_4bit can't be both False") + bnb_module.weight.data = module.weight.data + if module.bias is not None: + bnb_module.bias.data = module.bias.data + bnb_module.requires_grad_(False) + setattr(model, name, bnb_module) + has_been_replaced = True + if len(list(module.children())) > 0: + _, _has_been_replaced = _replace_with_bnb_layers( + module, bnb_quantization_config, modules_to_not_convert, current_key_name + ) + has_been_replaced = has_been_replaced | _has_been_replaced + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced + + +def get_keys_to_not_convert(model): + r""" + An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules + we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want + to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in + int8. + + Parameters: + model (`torch.nn.Module`): + Input model + """ + # Create a copy of the model + with init_empty_weights(): + tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` + + tied_params = find_tied_parameters(tied_model) + # For compatibility with Accelerate < 0.18 + if isinstance(tied_params, dict): + tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) + else: + tied_keys = sum(tied_params, []) + has_tied_params = len(tied_keys) > 0 + + # Check if it is a base model + is_base_model = False + if hasattr(model, "base_model_prefix"): + is_base_model = not hasattr(model, model.base_model_prefix) + + # Ignore this for base models (BertModel, GPT2Model, etc.) + if (not has_tied_params) and is_base_model: + return [] + + # otherwise they have an attached head + list_modules = list(model.named_children()) + list_last_module = [list_modules[-1][0]] + + # add last module together with tied weights + intersection = set(list_last_module) - set(tied_keys) + list_untouched = list(set(tied_keys)) + list(intersection) + + # remove ".weight" from the keys + names_to_remove = [".weight", ".bias"] + filtered_module_names = [] + for name in list_untouched: + for name_to_remove in names_to_remove: + if name_to_remove in name: + name = name.replace(name_to_remove, "") + filtered_module_names.append(name) + + return filtered_module_names + + +def has_4bit_bnb_layers(model): + """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model""" + # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily + import bitsandbytes as bnb + + for m in model.modules(): + if isinstance(m, bnb.nn.Linear4bit): + return True + return False + + +def get_parameter_device(parameter: nn.Module): + return next(parameter.parameters()).device + + +def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics): + # if it is not quantized, we quantize and offload the quantized weights and the SCB stats + if fp16_statistics is None: + set_module_tensor_to_device(model, param_name, 0, dtype=new_dtype, value=param) + tensor_name = param_name + module = model + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + # offload weights + module._parameters[tensor_name].requires_grad = False + offload_weight(module._parameters[tensor_name], param_name, offload_folder, index=offload_index) + if hasattr(module._parameters[tensor_name], "SCB"): + offload_weight( + module._parameters[tensor_name].SCB, + param_name.replace("weight", "SCB"), + offload_folder, + index=offload_index, + ) + else: + offload_weight(param, param_name, offload_folder, index=offload_index) + offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index) + + set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size())) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/constants.py b/venv/lib/python3.10/site-packages/accelerate/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..2e2be5434b240105b523044d53988ee498bf8605 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/constants.py @@ -0,0 +1,105 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import operator as op + +import torch + + +SCALER_NAME = "scaler.pt" +MODEL_NAME = "pytorch_model" +SAFE_MODEL_NAME = "model" +RNG_STATE_NAME = "random_states" +OPTIMIZER_NAME = "optimizer" +SCHEDULER_NAME = "scheduler" +SAMPLER_NAME = "sampler" +PROFILE_PATTERN_NAME = "profile_{suffix}.json" +WEIGHTS_NAME = f"{MODEL_NAME}.bin" +WEIGHTS_PATTERN_NAME = "pytorch_model{suffix}.bin" +WEIGHTS_INDEX_NAME = f"{WEIGHTS_NAME}.index.json" +SAFE_WEIGHTS_NAME = f"{SAFE_MODEL_NAME}.safetensors" +SAFE_WEIGHTS_PATTERN_NAME = "model{suffix}.safetensors" +SAFE_WEIGHTS_INDEX_NAME = f"{SAFE_WEIGHTS_NAME}.index.json" +SAGEMAKER_PYTORCH_VERSION = "1.10.2" +SAGEMAKER_PYTHON_VERSION = "py38" +SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0" +SAGEMAKER_PARALLEL_EC2_INSTANCES = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"] +FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"] +FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"] +FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"] +FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] +FSDP2_STATE_DICT_TYPE = ["SHARDED_STATE_DICT", "FULL_STATE_DICT"] +FSDP_PYTORCH_VERSION = ( + "2.1.0.a0+32f93b1" # Technically should be 2.1.0, but MS-AMP uses this specific prerelease in their Docker image. +) +FSDP2_PYTORCH_VERSION = "2.6.0" +FSDP_MODEL_NAME = "pytorch_model_fsdp" +DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich", "nossh", "slurm"] +TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"] +ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION = "2.2.0" +XPU_PROFILING_AVAILABLE_PYTORCH_VERSION = "2.4.0" +MITA_PROFILING_AVAILABLE_PYTORCH_VERSION = "2.1.0" +BETA_TP_AVAILABLE_PYTORCH_VERSION = "2.3.0" + +BETA_TP_AVAILABLE_TRANSFORMERS_VERSION = "4.52.0" +BETA_CP_AVAILABLE_PYTORCH_VERSION = "2.6.0" + +STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} + +# These are the args for `torch.distributed.launch` for pytorch < 1.9 +TORCH_LAUNCH_PARAMS = [ + "nnodes", + "nproc_per_node", + "rdzv_backend", + "rdzv_endpoint", + "rdzv_id", + "rdzv_conf", + "standalone", + "max_restarts", + "monitor_interval", + "start_method", + "role", + "module", + "m", + "no_python", + "run_path", + "log_dir", + "r", + "redirects", + "t", + "tee", + "node_rank", + "master_addr", + "master_port", +] + +CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM", "TP"] +TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + [ + "MULTI_NPU", + "MULTI_MLU", + "MULTI_SDAA", + "MULTI_MUSA", + "MULTI_XPU", + "MULTI_CPU", + "MULTI_HPU", +] +SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING = ( + torch.nn.Conv1d, + torch.nn.Conv2d, + torch.nn.Conv3d, + torch.nn.ConvTranspose1d, + torch.nn.ConvTranspose2d, + torch.nn.ConvTranspose3d, + torch.nn.Linear, +) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py b/venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..d7d3c7b19fb435a6c39eb91c4bc7cb613774cc0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py @@ -0,0 +1,2926 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +General namespace and dataclass related classes +""" + +import argparse +import copy +import enum +import functools +import logging +import os +import warnings +from collections.abc import Iterable +from contextlib import contextmanager +from dataclasses import dataclass, field +from datetime import timedelta +from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union, get_args + +import torch + +from .constants import ( + BETA_CP_AVAILABLE_PYTORCH_VERSION, + BETA_TP_AVAILABLE_PYTORCH_VERSION, + BETA_TP_AVAILABLE_TRANSFORMERS_VERSION, + FSDP2_PYTORCH_VERSION, + FSDP_AUTO_WRAP_POLICY, + FSDP_BACKWARD_PREFETCH, + FSDP_SHARDING_STRATEGY, + MITA_PROFILING_AVAILABLE_PYTORCH_VERSION, + XPU_PROFILING_AVAILABLE_PYTORCH_VERSION, +) +from .environment import parse_flag_from_env, str_to_bool +from .imports import ( + is_cuda_available, + is_hpu_available, + is_mlu_available, + is_msamp_available, + is_musa_available, + is_npu_available, + is_transformer_engine_available, + is_xpu_available, +) +from .versions import compare_versions, is_torch_version + + +if TYPE_CHECKING: + # Mock imports for type checking + from torchao.float8 import Float8LinearConfig + + +logger = logging.getLogger(__name__) + + +class KwargsHandler: + """ + Internal mixin that implements a `to_kwargs()` method for a dataclass. + """ + + def to_dict(self): + return copy.deepcopy(self.__dict__) + + def to_kwargs(self): + """ + Returns a dictionary containing the attributes with values different from the default of this class. + """ + # import clear_environment here to avoid circular import problem + from .environment import clear_environment + + with clear_environment(): + default_dict = self.__class__().to_dict() + this_dict = self.to_dict() + return {k: v for k, v in this_dict.items() if default_dict[k] != v} + + +class EnumWithContains(enum.EnumMeta): + "A metaclass that adds the ability to check if `self` contains an item with the `in` operator" + + def __contains__(cls, item): + try: + cls(item) + except ValueError: + return False + return True + + +class BaseEnum(enum.Enum, metaclass=EnumWithContains): + "An enum class that can get the value of an item with `str(Enum.key)`" + + def __str__(self): + return self.value + + @classmethod + def list(cls): + "Method to list all the possible items in `cls`" + return list(map(str, cls)) + + +@dataclass +class AutocastKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the + documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more + information on each argument. + + Example: + + ```python + from accelerate import Accelerator + from accelerate.utils import AutocastKwargs + + kwargs = AutocastKwargs(cache_enabled=True) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + """ + + enabled: bool = True + cache_enabled: bool = None + + +class DDPCommunicationHookType(BaseEnum): + """ + Represents a type of communication hook used in DDP. + + Values: + + - **NO** -- no communication hook + - **FP16** -- DDP communication hook to compress the gradients in FP16 + - **BF16** -- DDP communication hook to compress the gradients in BF16 + - **POWER_SGD** -- DDP communication hook to use PowerSGD + - **BATCHED_POWER_SGD** -- DDP communication hook to use batched PowerSGD + """ + + NO = "no" + FP16 = "fp16" + BF16 = "bf16" + POWER_SGD = "power_sgd" + BATCHED_POWER_SGD = "batched_power_sgd" + + +@dataclass +class DistributedDataParallelKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize how your model is wrapped in a + `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this + [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more + information on each argument. + + + + `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions. + + `static_graph` is only available in PyTorch 1.11.0 and later versions. + + + + Example: + + ```python + from accelerate import Accelerator + from accelerate.utils import DistributedDataParallelKwargs + + kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + """ + + dim: int = 0 + broadcast_buffers: bool = True + bucket_cap_mb: int = 25 + find_unused_parameters: bool = False + check_reduction: bool = False + gradient_as_bucket_view: bool = False + static_graph: bool = False + + comm_hook: DDPCommunicationHookType = DDPCommunicationHookType.NO + comm_wrapper: Literal[ + DDPCommunicationHookType.NO, + DDPCommunicationHookType.FP16, + DDPCommunicationHookType.BF16, + ] = DDPCommunicationHookType.NO + comm_state_option: dict = field(default_factory=dict) + + def to_dict(self, ignore_keys=("comm_hook", "comm_wrapper", "comm_state_option")): + return {k: v for k, v in super().to_dict().items() if k not in ignore_keys} + + def register_comm_hook(self, model): + from torch.distributed.algorithms.ddp_comm_hooks import ( + default_hooks, + powerSGD_hook, + ) + + hook_map: dict[DDPCommunicationHookType, Callable] = { + DDPCommunicationHookType.FP16: default_hooks.fp16_compress_hook, + DDPCommunicationHookType.BF16: default_hooks.bf16_compress_hook, + DDPCommunicationHookType.POWER_SGD: powerSGD_hook.powerSGD_hook, + DDPCommunicationHookType.BATCHED_POWER_SGD: powerSGD_hook.batched_powerSGD_hook, + } + + wrapper_map: dict[DDPCommunicationHookType, Callable] = { + DDPCommunicationHookType.FP16: default_hooks.fp16_compress_wrapper, + DDPCommunicationHookType.BF16: default_hooks.bf16_compress_wrapper, + } + + hook: Optional[Callable] = hook_map.get(self.comm_hook) + wrapper: Optional[Callable] = wrapper_map.get(self.comm_wrapper) + + if hook and wrapper: + hook = wrapper(hook) + + if hook: + state = ( + powerSGD_hook.PowerSGDState(None, **self.comm_state_option) + if self.comm_hook + in ( + DDPCommunicationHookType.POWER_SGD, + DDPCommunicationHookType.BATCHED_POWER_SGD, + ) + else None + ) + model.register_comm_hook( + state=state, + hook=hook, + ) + + +@dataclass +class GradScalerKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the + `torch.amp.GradScaler` or `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this + [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument. + + + + `torch.cuda.amp.GradScaler` is only available in PyTorch 1.5.0 and later versions, and `torch.amp.GradScaler` is + only available in PyTorch 2.4.0 and later versions. + + + + Example: + + ```python + from accelerate import Accelerator + from accelerate.utils import GradScalerKwargs + + kwargs = GradScalerKwargs(backoff_factor=0.25) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + """ + + init_scale: float = 65536.0 + growth_factor: float = 2.0 + backoff_factor: float = 0.5 + growth_interval: int = 2000 + enabled: bool = True + + +@dataclass +class InitProcessGroupKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer + to the documentation of this + [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more + information on each argument. + + Note: If `timeout` is set to `None`, the default will be based upon how `backend` is set. + + ```python + from datetime import timedelta + from accelerate import Accelerator + from accelerate.utils import InitProcessGroupKwargs + + kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800)) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + """ + + backend: Optional[str] = "nccl" + init_method: Optional[str] = None + timeout: Optional[timedelta] = None + + def __post_init__(self): + if self.timeout is None: + seconds = 1800 if self.backend != "nccl" else 600 + self.timeout = timedelta(seconds=seconds) + + +# Literals +Backend = Literal["MSAMP", "TE"] +OptLevel = Literal["O1", "O2"] +FP8Format = Literal["HYBRID", "E4M3", "E5M2"] +AmaxComputeAlgorithm = Literal["max", "most_recent"] + + +# FP8 training recipe kwargs +@dataclass +class AORecipeKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision + training with `torchao` FP8. + + Args: + config (`torchao.float8.Float8LinearConfig`, *optional*, default to `None`): + The configuration for the FP8 training. In general, the default config should be sufficient. + module_filter_func (`Callable`, *optional*, default to `None`): + Optional function that must take in a module and layer name, and returns a boolean indicating whether the + module should be converted to FP8. Defaults to `accelerate.utils.ao.filter_linear_layers`. See it for an + example. + """ + + config: Optional["Float8LinearConfig"] = None + module_filter_func: Optional[Callable] = None + + +@dataclass +class TERecipeKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision + training with `transformer-engine`. + + + + For more information on the args, please refer to the API + [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html). + + + + ```python + from accelerate import Accelerator + from accelerate.utils import TERecipeKwargs + + kwargs = TERecipeKwargs(fp8_format="HYBRID") + accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs]) + ``` + + Args: + use_autocast_during_eval (`bool`, *optional*, default to `False`): + Whether to use FP8 autocast during eval mode. Generally better metrics are found when this is `False`. + margin (`int`, *optional*, default to 0): + The margin to use for the gradient scaling. + interval (`int`, *optional*, default to 1): + The interval to use for how often the scaling factor is recomputed. + fp8_format (`str`, *optional*, default to "HYBRID"): + The format to use for the FP8 recipe. Must be one of `HYBRID`, `E4M3` or `E5M2`. (Generally `HYBRID` for + training, `E4M3` or `E5M2` for evaluation) + amax_history_len (`int`, *optional*, default to 1024): + The length of the history to use for the scaling factor computation + amax_compute_algo (`str`, *optional*, default to "most_recent"): + The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`. + override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`): + Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. + """ + + use_autocast_during_eval: bool = None + margin: int = None + interval: int = None + fp8_format: FP8Format = None + amax_history_len: int = None + amax_compute_algo: AmaxComputeAlgorithm = None + override_linear_precision: tuple[bool, bool, bool] = None + + def __post_init__(self): + env_prefix = "ACCELERATE_FP8_" + if not is_transformer_engine_available(): + raise ImportError("TransformerEngine is not available. Please install it or use a different backend.") + if self.use_autocast_during_eval is None: + self.use_autocast_during_eval = parse_flag_from_env(env_prefix + "USE_AUTOCAST_DURING_EVAL") + if self.margin is None: + self.margin = int(os.environ.get(env_prefix + "MARGIN", 0)) + if self.interval is None: + self.interval = int(os.environ.get(env_prefix + "INTERVAL", 1)) + if self.fp8_format is None: + self.fp8_format = os.environ.get(env_prefix + "FORMAT", "HYBRID") + self.fp8_format = self.fp8_format.upper() + if self.fp8_format not in get_args(FP8Format): + raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.") + if self.amax_compute_algo is None: + self.amax_compute_algo = os.environ.get(env_prefix + "AMAX_COMPUTE_ALGO", "most_recent") + self.amax_compute_algo = self.amax_compute_algo.lower() + if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm): + raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}") + if self.amax_history_len is None: + self.amax_history_len = int(os.environ.get(env_prefix + "AMAX_HISTORY_LEN", 1024)) + if self.override_linear_precision is None: + fprop = parse_flag_from_env(env_prefix + "OVERRIDE_FPROP") + dgrad = parse_flag_from_env(env_prefix + "OVERRIDE_DGRAD") + wgrad = parse_flag_from_env(env_prefix + "OVERRIDE_WGRAD") + self.override_linear_precision = (fprop, dgrad, wgrad) + + +@dataclass +class MSAMPRecipeKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision + training with `ms-amp`. + """ + + opt_level: OptLevel = None + + def __post_init__(self): + env_prefix = "ACCELERATE_FP8_" + if self.opt_level is None: + self.opt_level = os.environ.get(env_prefix + "OPT_LEVEL", "O2") + if self.opt_level not in get_args(OptLevel): + raise ValueError(f"`opt_level` must be one of {' or '.join(get_args(OptLevel))}") + + +@dataclass +class FP8RecipeKwargs(TERecipeKwargs, MSAMPRecipeKwargs): + """ + Deprecated. Please use one of the proper FP8 recipe kwargs classes such as `TERecipeKwargs` or `MSAMPRecipeKwargs` + instead. + """ + + backend: Backend = None + + def __post_init__(self): + env_prefix = "ACCELERATE_FP8_" + warnings.warn( + "FP8RecipeKwargs is deprecated and will be removed in Accelerate v2.0.0. " + "Please use one of the proper FP8 recipe kwargs classes such as TERecipeKwargs or MSAMPRecipeKwargs instead.", + FutureWarning, + ) + default_backend = "msamp" if is_msamp_available() else "te" + if self.backend is None: + self.backend = os.environ.get(env_prefix + "BACKEND", default_backend) + self.backend = self.backend.upper() + if self.backend not in get_args(Backend): + raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine) to use `FP8RecipeKwargs`.") + super().__post_init__() + + +# Literal +ProfilerActivity = Literal["cpu", "xpu", "mtia", "cuda", "hpu"] + + +@dataclass +class ProfileKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the initialization of the profiler. Please refer to the + documentation of this [context manager](https://pytorch.org/docs/stable/profiler.html#torch.profiler.profile) for + more information on each argument. + + + + `torch.profiler` is only available in PyTorch 1.8.1 and later versions. + + + + Example: + + ```python + from accelerate import Accelerator + from accelerate.utils import ProfileKwargs + + kwargs = ProfileKwargs(activities=["cpu", "cuda"]) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + + Args: + activities (`List[str]`, *optional*, default to `None`): + The list of activity groups to use in profiling. Must be one of `"cpu"`, `"xpu"`, `"mtia"`, "hpu" or + `"cuda"`. + schedule_option (`Dict[str, int]`, *optional*, default to `None`): + The schedule option to use for the profiler. Available keys are `wait`, `warmup`, `active`, `repeat` and + `skip_first`. The profiler will skip the first `skip_first` steps, then wait for `wait` steps, then do the + warmup for the next `warmup` steps, then do the active recording for the next `active` steps and then + repeat the cycle starting with `wait` steps. The optional number of cycles is specified with the `repeat` + parameter, the zero value means that the cycles will continue until the profiling is finished. + on_trace_ready (`Callable`, *optional*, default to `None`): + Callable that is called at each step when schedule returns `ProfilerAction.RECORD_AND_SAVE` during the + profiling. + record_shapes (`bool`, *optional*, default to `False`): + Save information about operator’s input shapes. + profile_memory (`bool`, *optional*, default to `False`): + Track tensor memory allocation/deallocation + with_stack (`bool`, *optional*, default to `False`): + Record source information (file and line number) for the ops. + with_flops (`bool`, *optional*, default to `False`): + Use formula to estimate the FLOPS of specific operators + with_modules (`bool`, *optional*, default to `False`): + Record module hierarchy (including function names) corresponding to the callstack of the op. + output_trace_dir (`str`, *optional*, default to `None`): + Exports the collected trace in Chrome JSON format. Chrome use 'chrome://tracing' view json file. Defaults + to None, which means profiling does not store json files. + """ + + activities: Optional[list[ProfilerActivity]] = None + schedule_option: Optional[dict[str, int]] = None + on_trace_ready: Optional[Callable] = None + record_shapes: bool = False + profile_memory: bool = False + with_stack: bool = False + with_flops: bool = False + with_modules: bool = False + output_trace_dir: Optional[str] = None + + def _get_profiler_activity(self, activity: ProfilerActivity) -> torch.profiler.ProfilerActivity: + """Get the profiler activity from the string. + + Args: + activity (str): The profiler activity name. + + Returns: + torch.profiler.ProfilerActivity: The profiler activity. + """ + + profiler_activity_map: dict[str, torch.profiler.ProfilerActivity] = { + "cpu": torch.profiler.ProfilerActivity.CPU, + "cuda": torch.profiler.ProfilerActivity.CUDA, + } + + if is_hpu_available(): + profiler_activity_map["hpu"] = torch.profiler.ProfilerActivity.HPU + + if is_torch_version(">=", XPU_PROFILING_AVAILABLE_PYTORCH_VERSION): + if torch.xpu.is_available(): + profiler_activity_map["xpu"] = torch.profiler.ProfilerActivity.XPU + + if is_torch_version(">=", MITA_PROFILING_AVAILABLE_PYTORCH_VERSION): + if torch.mtia.is_available(): + profiler_activity_map["mtia"] = torch.profiler.ProfilerActivity.MTIA + + if activity not in profiler_activity_map: + raise ValueError(f"Invalid profiler activity: {activity}. Must be one of {list(profiler_activity_map)}.") + return profiler_activity_map[activity] + + def build(self) -> torch.profiler.profile: + """ + Build a profiler object with the current configuration. + + Returns: + torch.profiler.profile: The profiler object. + """ + activities: Optional[list[ProfilerActivity]] = None + if self.activities is not None: + activities = [self._get_profiler_activity(activity) for activity in self.activities] + schedule: Optional[torch.profiler.schedule] = None + if self.schedule_option is not None: + schedule = torch.profiler.schedule(**self.schedule_option) + + return torch.profiler.profile( + activities=activities, + schedule=schedule, + on_trace_ready=self.on_trace_ready, + record_shapes=self.record_shapes, + profile_memory=self.profile_memory, + with_stack=self.with_stack, + with_flops=self.with_flops, + with_modules=self.with_modules, + ) + + +class DistributedType(str, enum.Enum): + """ + Represents a type of distributed environment. + + Values: + + - **NO** -- Not a distributed environment, just a single process. + - **MULTI_CPU** -- Distributed on multiple CPU nodes. + - **MULTI_GPU** -- Distributed on multiple GPUs. + - **MULTI_MLU** -- Distributed on multiple MLUs. + - **MULTI_SDAA** -- Distributed on multiple SDAAs. + - **MULTI_MUSA** -- Distributed on multiple MUSAs. + - **MULTI_NPU** -- Distributed on multiple NPUs. + - **MULTI_XPU** -- Distributed on multiple XPUs. + - **MULTI_HPU** -- Distributed on multiple HPUs. + - **DEEPSPEED** -- Using DeepSpeed. + - **XLA** -- Using TorchXLA. + """ + + # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box. + NO = "NO" + MULTI_CPU = "MULTI_CPU" + MULTI_GPU = "MULTI_GPU" + MULTI_NPU = "MULTI_NPU" + MULTI_MLU = "MULTI_MLU" + MULTI_SDAA = "MULTI_SDAA" + MULTI_MUSA = "MULTI_MUSA" + MULTI_XPU = "MULTI_XPU" + DEEPSPEED = "DEEPSPEED" + FSDP = "FSDP" + XLA = "XLA" + MEGATRON_LM = "MEGATRON_LM" + MULTI_HPU = "MULTI_HPU" + + +class SageMakerDistributedType(str, enum.Enum): + """ + Represents a type of distributed environment. + + Values: + + - **NO** -- Not a distributed environment, just a single process. + - **DATA_PARALLEL** -- using sagemaker distributed data parallelism. + - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism. + """ + + # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. + NO = "NO" + DATA_PARALLEL = "DATA_PARALLEL" + MODEL_PARALLEL = "MODEL_PARALLEL" + + +class FP8BackendType(str, enum.Enum): + """ + Represents the backend used for FP8. + + Values: + + - **TE** -- using TransformerEngine. + - **MSAMP** -- using msamp. + """ + + # Subclassing str as well as Enum allows the `FP8BackendType` to be JSON-serializable out of the box. + NO = "NO" + TE = "TE" + MSAMP = "MSAMP" + AO = "AO" + + +class ComputeEnvironment(str, enum.Enum): + """ + Represents a type of the compute environment. + + Values: + + - **LOCAL_MACHINE** -- private/custom cluster hardware. + - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment. + """ + + # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box. + LOCAL_MACHINE = "LOCAL_MACHINE" + AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER" + + +class DynamoBackend(str, BaseEnum): + """ + Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html). + + Values: + + - **NO** -- Do not use torch dynamo. + - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo + issues. + - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's + extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups. + - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton + kernels. [Read + more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747) + - **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read + more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) + - **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read + more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) + - **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757) + - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read + more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html) + - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read + more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst) + - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/) + - **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read + more](https://github.com/onnx/onnx-tensorrt) + - **AOT_TORCHXLA_TRACE_ONCE** -- Uses Pytorch/XLA with TorchDynamo optimization, for training. [Read + more](https://github.com/pytorch/xla/blob/r2.0/docs/dynamo.md) + - **TORCHXLA_TRACE_ONCE** -- Uses Pytorch/XLA with TorchDynamo optimization, for inference. [Read + more](https://github.com/pytorch/xla/blob/r2.0/docs/dynamo.md) + - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read + more](https://github.com/intel/intel-extension-for-pytorch). + - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/) + - **HPU_BACKEND** -- Uses HPU backend for inference optimizations. + + """ + + # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. + NO = "NO" + EAGER = "EAGER" + AOT_EAGER = "AOT_EAGER" + INDUCTOR = "INDUCTOR" + AOT_TS_NVFUSER = "AOT_TS_NVFUSER" + NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER" + CUDAGRAPHS = "CUDAGRAPHS" + OFI = "OFI" + FX2TRT = "FX2TRT" + ONNXRT = "ONNXRT" + TENSORRT = "TENSORRT" + AOT_TORCHXLA_TRACE_ONCE = "AOT_TORCHXLA_TRACE_ONCE" + TORCHXLA_TRACE_ONCE = "TORCHXLA_TRACE_ONCE" + IPEX = "IPEX" + TVM = "TVM" + HPU_BACKEND = "HPU_BACKEND" + + +class LoggerType(BaseEnum): + """Represents a type of supported experiment tracker + + Values: + + - **ALL** -- all available trackers in the environment that are supported + - **TENSORBOARD** -- TensorBoard as an experiment tracker + - **WANDB** -- wandb as an experiment tracker + - **TRACKIO** -- trackio as an experiment tracker + - **COMETML** -- comet_ml as an experiment tracker + - **MLFLOW** -- mlflow as an experiment tracker + - **CLEARML** -- clearml as an experiment tracker + - **DVCLIVE** -- dvclive as an experiment tracker + - **SWANLAB** -- swanlab as an experiment tracker + """ + + ALL = "all" + AIM = "aim" + TENSORBOARD = "tensorboard" + WANDB = "wandb" + TRACKIO = "trackio" + COMETML = "comet_ml" + MLFLOW = "mlflow" + CLEARML = "clearml" + DVCLIVE = "dvclive" + SWANLAB = "swanlab" + + +class PrecisionType(str, BaseEnum): + """Represents a type of precision used on floating point values + + Values: + + - **NO** -- using full precision (FP32) + - **FP16** -- using half precision + - **BF16** -- using brain floating point precision + """ + + NO = "no" + FP8 = "fp8" + FP16 = "fp16" + BF16 = "bf16" + + +class RNGType(BaseEnum): + TORCH = "torch" + CUDA = "cuda" + MLU = "mlu" + SDAA = "sdaa" + MUSA = "musa" + NPU = "npu" + XLA = "xla" + XPU = "xpu" + HPU = "hpu" + GENERATOR = "generator" + + +class CustomDtype(enum.Enum): + r""" + An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`. + """ + + FP8 = "fp8" + INT4 = "int4" + INT2 = "int2" + + +# data classes + + +@dataclass +class TensorInformation: + shape: torch.Size + dtype: torch.dtype + + +@dataclass +class DataLoaderConfiguration: + """ + Configuration for dataloader-related items when calling `accelerator.prepare`. + + Args: + split_batches (`bool`, defaults to `False`): + Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If + `True`, the actual batch size used will be the same on any kind of distributed processes, but it must be a + round multiple of `num_processes` you are using. If `False`, actual batch size used will be the one set in + your script multiplied by the number of processes. + dispatch_batches (`bool`, defaults to `None`): + If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process + and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose + underlying dataset is an `IterableDataset`, `False` otherwise. + even_batches (`bool`, defaults to `True`): + If set to `True`, in cases where the total batch size across all processes does not exactly divide the + dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among + all workers. + use_seedable_sampler (`bool`, defaults to `False`): + Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`]). Ensures + training results are fully reproducable using a different sampling technique. While seed-to-seed results + may differ, on average the differences are neglible when using multiple different seeds to compare. Should + also be ran with [`~utils.set_seed`] for the best results. + data_seed (`int`, defaults to `None`): + The seed to use for the underlying generator when using `use_seedable_sampler`. If `None`, the generator + will use the current default seed from torch. + non_blocking (`bool`, defaults to `False`): + If set to `True`, the dataloader prepared by the Accelerator will utilize non-blocking host-to-device + transfers, allowing for better overlap between dataloader communication and computation. Recommended that + the prepared dataloader has `pin_memory` set to `True` to work properly. + use_stateful_dataloader (`bool`, defaults to `False`): + If set to `True`, the dataloader prepared by the Accelerator will be backed by + [torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). + This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed. + """ + + split_batches: bool = field( + default=False, + metadata={ + "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If" + " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a" + " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set" + " in your script multiplied by the number of processes." + }, + ) + dispatch_batches: bool = field( + default=None, + metadata={ + "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process" + " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose" + " underlying dataset is an `IterableDataset`, `False` otherwise." + }, + ) + even_batches: bool = field( + default=True, + metadata={ + "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the" + " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among" + " all workers." + }, + ) + use_seedable_sampler: bool = field( + default=False, + metadata={ + "help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])." + "Ensures training results are fully reproducable using a different sampling technique. " + "While seed-to-seed results may differ, on average the differences are neglible when using" + "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results." + }, + ) + data_seed: int = field( + default=None, + metadata={ + "help": "The seed to use for the underlying generator when using `use_seedable_sampler`. If `None`, the generator" + " will use the current default seed from torch." + }, + ) + non_blocking: bool = field( + default=False, + metadata={ + "help": "If set to `True`, the dataloader prepared by the Accelerator will utilize non-blocking host-to-device" + " transfers, allowing for better overlap between dataloader communication and computation. Recommended that the" + " prepared dataloader has `pin_memory` set to `True` to work properly." + }, + ) + use_stateful_dataloader: bool = field( + default=False, + metadata={ + "help": "If set to `True`, the dataloader prepared by the Accelerator will be backed by " + "[torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed." + }, + ) + + +@dataclass +class ProjectConfiguration: + """ + Configuration for the Accelerator object based on inner-project needs. + + Args: + project_dir (`str`, defaults to `None`): + A path to a directory for storing data. + logging_dir (`str`, defaults to `None`): + A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`. + automatic_checkpoint_naming (`bool`, defaults to `False`): + Whether saved states should be automatically iteratively named. + total_limit (`int`, defaults to `None`): + The maximum number of total saved states to keep. + iteration (`int`, defaults to `0`): + The current save iteration. + save_on_each_node (`bool`, defaults to `False`): + When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on + the main one. + """ + + project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."}) + logging_dir: str = field( + default=None, + metadata={ + "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`." + }, + ) + automatic_checkpoint_naming: bool = field( + default=False, + metadata={"help": "Whether saved states should be automatically iteratively named."}, + ) + + total_limit: int = field( + default=None, + metadata={"help": "The maximum number of total saved states to keep."}, + ) + + iteration: int = field( + default=0, + metadata={"help": "The current save iteration."}, + ) + + save_on_each_node: bool = field( + default=False, + metadata={ + "help": ( + "When doing multi-node distributed training, whether to save models and checkpoints on each node, or" + " only on the main one" + ) + }, + ) + + def set_directories(self, project_dir: str = None): + "Sets `self.project_dir` and `self.logging_dir` to the appropriate values." + self.project_dir = project_dir + if self.logging_dir is None: + self.logging_dir = project_dir + + def __post_init__(self): + self.set_directories(self.project_dir) + + +@dataclass +class GradientAccumulationPlugin(KwargsHandler): + """ + A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or + `gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error. + + Parameters: + num_steps (`int`): + The number of steps to accumulate gradients for. + adjust_scheduler (`bool`, *optional*, defaults to `True`): + Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be + `True` if the used scheduler was not adjusted for gradient accumulation. + sync_with_dataloader (`bool`, *optional*, defaults to `True`): + Whether to synchronize setting the gradients when at the end of the dataloader. + sync_each_batch (`bool`, *optional*): + Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory + requirements when using gradient accumulation with distributed training, at expense of speed. + + Example: + + ```python + from accelerate.utils import GradientAccumulationPlugin + + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2) + accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin) + ``` + """ + + num_steps: int = field( + default=None, + metadata={"help": "The number of steps to accumulate gradients for."}, + ) + adjust_scheduler: bool = field( + default=True, + metadata={ + "help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation." + }, + ) + sync_with_dataloader: bool = field( + default=True, + metadata={ + "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing." + }, + ) + sync_each_batch: bool = field( + default=False, + metadata={ + "help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed." + }, + ) + + +@dataclass +class TorchDynamoPlugin(KwargsHandler): + """ + This plugin is used to compile a model with PyTorch 2.0 + + Args: + backend (`DynamoBackend`, defaults to `None`): + A valid Dynamo backend. See https://pytorch.org/docs/stable/torch.compiler.html for more details. + mode (`str`, defaults to `None`): + Possible options are 'default', 'reduce-overhead' or 'max-autotune'. + fullgraph (`bool`, defaults to `None`): + Whether it is ok to break model into several subgraphs. + dynamic (`bool`, defaults to `None`): + Whether to use dynamic shape for tracing. + options (`Any`, defaults to `None`): + A dictionary of options to pass to the backend. + disable (`bool`, defaults to `False`): + Turn torch.compile() into a no-op for testing + use_regional_compilation (`bool`, defaults to `None`): + Use it to reduce the cold start compilation time of torch.compile() by targeting repeated blocks of the + same class and compiling them sequentially to hit the compiler's cache. For example, in `GPT2LMHeadModel`, + the repeated block/class is `GPT2Block`, and can be accessed as `model.transformer.h[0]`. The rest of the + model (e.g model.lm_head) is compiled separately. + """ + + backend: DynamoBackend = field( + default=None, + metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"}, + ) + mode: str = field( + default=None, + metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"}, + ) + fullgraph: bool = field( + default=None, + metadata={"help": "Whether it is ok to break model into several subgraphs"}, + ) + dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"}) + options: Any = field( + default=None, + metadata={"help": "A dictionary of options to pass to the backend."}, + ) + disable: bool = field( + default=False, + metadata={"help": "Turn torch.compile() into a no-op for testing"}, + ) + + use_regional_compilation: bool = field( + default=None, + metadata={ + "help": ( + # https://pytorch.org/tutorials/recipes/regional_compilation.html + "Use it to reduce the cold start compilation time of torch.compile() by targeting repeated " + "blocks of the same class and compiling them sequentially to hit the compiler's cache. For " + "example, in `GPT2LMHeadModel`, the repeated block/class is `GPT2Block`, and can be accessed " + "as `model.transformer.h[0]`. The rest of the model (e.g model.lm_head) is compiled separately." + ) + }, + ) + + def __post_init__(self): + prefix = "ACCELERATE_DYNAMO_" + if self.backend is None: + self.backend = os.environ.get(prefix + "BACKEND", "no") + self.backend = DynamoBackend(self.backend.upper()) + + if self.mode is None: + self.mode = os.environ.get(prefix + "MODE", "default") + if self.fullgraph is None: + self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1 + if self.use_regional_compilation is None: + self.use_regional_compilation = ( + str_to_bool(os.environ.get(prefix + "USE_REGIONAL_COMPILATION", "False")) == 1 + ) + + if self.dynamic is None and os.environ.get(prefix + "USE_DYNAMIC", None) is not None: + self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1 + + def to_dict(self): + dynamo_config = copy.deepcopy(self.__dict__) + dynamo_config["backend"] = dynamo_config["backend"].value.lower() + return dynamo_config + + def to_kwargs(self): + kwargs = super().to_kwargs() + kwargs.pop("use_regional_compilation", None) + return kwargs + + +@dataclass +class DeepSpeedPlugin: + """ + This plugin is used to integrate DeepSpeed. + + Args: + hf_ds_config (`Any`, defaults to `None`): + Path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`. + gradient_accumulation_steps (`int`, defaults to `None`): + Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value + from the `Accelerator` directly. + gradient_clipping (`float`, defaults to `None`): + Enable gradient clipping with value. + zero_stage (`int`, defaults to `None`): + Possible options are 0, 1, 2, 3. Default will be taken from environment variable. + is_train_batch_min (`bool`, defaults to `True`): + If both train & eval dataloaders are specified, this will decide the `train_batch_size`. + offload_optimizer_device (`str`, defaults to `None`): + Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3. + offload_param_device (`str`, defaults to `None`): + Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3. + offload_optimizer_nvme_path (`str`, defaults to `None`): + Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3. + offload_param_nvme_path (`str`, defaults to `None`): + Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3. + zero3_init_flag (`bool`, defaults to `None`): + Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3. + zero3_save_16bit_model (`bool`, defaults to `None`): + Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3. + transformer_moe_cls_names (`str`, defaults to `None`): + Comma-separated list of Transformers MoE layer class names (case-sensitive). For example, + `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention`, `JetMoEBlock`, etc. + enable_msamp (`bool`, defaults to `None`): + Flag to indicate whether to enable MS-AMP backend for FP8 training. + msasmp_opt_level (`Optional[Literal["O1", "O2"]]`, defaults to `None`): + Optimization level for MS-AMP (defaults to 'O1'). Only applicable if `enable_msamp` is True. Should be one + of ['O1' or 'O2']. + """ + + hf_ds_config: Any = field( + default=None, + metadata={ + "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`." + }, + ) + gradient_accumulation_steps: int = field( + default=None, + metadata={ + "help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly." + }, + ) + gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"}) + zero_stage: int = field( + default=None, + metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"}, + ) + is_train_batch_min: bool = field( + default=True, + metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"}, + ) + offload_optimizer_device: str = field( + default=None, + metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."}, + ) + offload_param_device: str = field( + default=None, + metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."}, + ) + offload_optimizer_nvme_path: str = field( + default=None, + metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."}, + ) + offload_param_nvme_path: str = field( + default=None, + metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."}, + ) + zero3_init_flag: bool = field( + default=None, + metadata={ + "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models." + "Only applicable with ZeRO Stage-3." + }, + ) + zero3_save_16bit_model: bool = field( + default=None, + metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."}, + ) + transformer_moe_cls_names: str = field( + default=None, + metadata={ + "help": "comma-separated list of transformers MoE layer class names (case-sensitive), e.g : " + " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..." + }, + ) + enable_msamp: bool = field( + default=None, + metadata={"help": "Flag to indicate whether to enable MS-AMP backend for FP8 training."}, + ) + msamp_opt_level: Optional[Literal["O1", "O2"]] = field( + default=None, + metadata={ + "help": "Optimization level for MS-AMP (defaults to 'O1'). Only applicable if `enable_msamp` is True. Should be one of ['O1' or 'O2']." + }, + ) + + def __post_init__(self): + from .deepspeed import HfDeepSpeedConfig + + if self.gradient_accumulation_steps is None: + gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto") + self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas + + if self.gradient_clipping is None: + gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "auto") + self.gradient_clipping = gradient_clipping if gradient_clipping == "auto" else float(gradient_clipping) + + if self.zero_stage is None: + self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2)) + + if self.offload_optimizer_device is None: + self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none") + + if self.offload_param_device is None: + self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none") + + if self.offload_optimizer_nvme_path is None: + self.offload_optimizer_nvme_path = os.environ.get( + "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none" + ) + + if self.offload_param_nvme_path is None: + self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none") + + if self.zero3_save_16bit_model is None: + self.zero3_save_16bit_model = ( + os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false").lower() == "true" + ) + if self.enable_msamp is None: + self.enable_msamp = os.environ.get("ACCELERATE_FP8_BACKEND", None) == "MSAMP" + + if self.msamp_opt_level is None: + self.msamp_opt_level = os.environ.get("ACCELERATE_FP8_OPT_LEVEL", "O1") + + if self.hf_ds_config is None: + self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none") + if ( + isinstance(self.hf_ds_config, dict) + or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none") + or isinstance(self.hf_ds_config, HfDeepSpeedConfig) + ): + if not isinstance(self.hf_ds_config, HfDeepSpeedConfig): + self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config) + if "gradient_accumulation_steps" not in self.hf_ds_config.config: + self.hf_ds_config.config["gradient_accumulation_steps"] = 1 + if "zero_optimization" not in self.hf_ds_config.config: + raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.") + + self._deepspeed_config_checks() + plugin_to_config_mapping = { + "gradient_accumulation_steps": "gradient_accumulation_steps", + "gradient_clipping": "gradient_clipping", + "zero_stage": "zero_optimization.stage", + "offload_optimizer_device": "zero_optimization.offload_optimizer.device", + "offload_param_device": "zero_optimization.offload_param.device", + "offload_param_nvme_path": "zero_optimization.offload_param.nvme_path", + "offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path", + "zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save", + } + kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None} + for key in kwargs.keys(): + self.fill_match(key, **kwargs, must_match=False) + self.hf_ds_config.set_stage_and_offload() + + # filling the missing values in the class attributes from the DeepSpeed config + # when using the DeepSpeed config file. + for key, value in plugin_to_config_mapping.items(): + config_value = self.hf_ds_config.get_value(value) + if config_value is not None and config_value != "auto": + setattr(self, key, config_value) + else: + config = { + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": self.gradient_accumulation_steps, + "zero_optimization": { + "stage": self.zero_stage, + "offload_optimizer": { + "device": self.offload_optimizer_device, + "nvme_path": ( + self.offload_optimizer_nvme_path if self.offload_optimizer_device == "nvme" else None + ), + }, + "offload_param": { + "device": self.offload_param_device, + "nvme_path": (self.offload_param_nvme_path if self.offload_param_device == "nvme" else None), + }, + "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model, + }, + } + if self.gradient_clipping: + config["gradient_clipping"] = self.gradient_clipping + self.hf_ds_config = HfDeepSpeedConfig(config) + + self.deepspeed_config = self.hf_ds_config.config + self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout + if self.zero3_init_flag is None: + self.zero3_init_flag = ( + str_to_bool( + os.environ.get( + "ACCELERATE_DEEPSPEED_ZERO3_INIT", + str(self.hf_ds_config.is_zero3()), + ) + ) + == 1 + ) + if self.zero3_init_flag and not self.hf_ds_config.is_zero3(): + warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.") + self.zero3_init_flag = False + # NOTE: Set to False by default, will be set to `True` automatically if it's the first plugin passed + # to the `Accelerator`'s `deepspeed_plugin` param, *or* `AcceleratorState().enable_deepspeed_plugin(plugin_key)` is manually called + self._set_selected(False) + + # Ignore if it's already set + if self.enable_msamp and "msamp" not in self.deepspeed_config: + if self.zero_stage == 3: + raise NotImplementedError( + "MS-AMP is not supported for ZeRO Stage 3. Please use ZeRO Stage 0, 1, or 2 instead." + ) + if self.msamp_opt_level not in ["O1", "O2"]: + raise ValueError("Invalid optimization level for MS-AMP. Please use one of ['O1' or'O2'].") + self.deepspeed_config["msamp"] = { + "enabled": True, + "opt_level": self.msamp_opt_level, + } + + def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs): + mismatches = [] if mismatches is None else mismatches + config, ds_key = self.hf_ds_config.find_config_node(ds_key_long) + if config is None: + return + + if config.get(ds_key) == "auto": + if ds_key_long in kwargs: + config[ds_key] = kwargs[ds_key_long] + return + else: + raise ValueError( + f"`{ds_key_long}` not found in kwargs. " + f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or " + "pass it in kwargs." + ) + + if not must_match: + return + + ds_val = config.get(ds_key) + if ds_val is not None and ds_key_long in kwargs: + if ds_val != kwargs[ds_key_long]: + mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}") + + def is_auto(self, ds_key_long): + val = self.hf_ds_config.get_value(ds_key_long) + if val is None: + return False + else: + return val == "auto" + + def get_value(self, ds_key_long, default=None): + return self.hf_ds_config.get_value(ds_key_long, default) + + def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): + """Process the DeepSpeed config with the values from the kwargs.""" + mismatches = [] if mismatches is None else mismatches + if config is None: + config = self.deepspeed_config + for key, value in config.items(): + if isinstance(value, dict): + self.deepspeed_config_process( + prefix=prefix + key + ".", + mismatches=mismatches, + config=value, + must_match=must_match, + **kwargs, + ) + else: + self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) + if len(mismatches) > 0 and prefix == "": + mismatches_msg = "\n".join(mismatches) + raise ValueError( + "Please correct the following DeepSpeed config values that mismatch kwargs " + f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'." + ) + + def set_mixed_precision(self, mixed_precision): + ds_config = self.deepspeed_config + kwargs = { + "fp16.enabled": mixed_precision == "fp16", + # When training in fp8, we still rely on bf16 autocast for the core mixed precision + "bf16.enabled": mixed_precision in ("bf16", "fp8"), + } + if mixed_precision == "fp16": + if "fp16" not in ds_config: + ds_config["fp16"] = {"enabled": True, "auto_cast": True} + elif mixed_precision in ("bf16", "fp8"): + if "bf16" not in ds_config: + ds_config["bf16"] = {"enabled": True} + + if mixed_precision == "fp8" and self.enable_msamp: + if "msamp" not in ds_config: + ds_config["msamp"] = { + "enabled": True, + "opt_level": self.msamp_opt_level, + } + + if mixed_precision != "no": + diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16" + if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true": + raise ValueError( + f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file." + ) + for dtype in ["fp16", "bf16"]: + if dtype not in ds_config: + ds_config[dtype] = {"enabled": False} + self.fill_match("fp16.enabled", must_match=False, **kwargs) + self.fill_match("bf16.enabled", must_match=False, **kwargs) + + def set_deepspeed_weakref(self): + from .imports import is_transformers_available + + ds_config = copy.deepcopy(self.deepspeed_config) + if self.zero3_init_flag: + if not is_transformers_available(): + raise Exception( + "When `zero3_init_flag` is set, it requires Transformers to be installed. " + "Please run `pip install transformers`." + ) + if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto": + ds_config["gradient_accumulation_steps"] = 1 + if "train_micro_batch_size_per_gpu" not in ds_config or ds_config["train_micro_batch_size_per_gpu"] == "auto": + ds_config["train_micro_batch_size_per_gpu"] = 1 + if ds_config.get("train_batch_size", None) == "auto": + del ds_config["train_batch_size"] + + if compare_versions("transformers", "<", "4.46"): + from transformers.deepspeed import ( + HfDeepSpeedConfig, + unset_hf_deepspeed_config, + ) + else: + from transformers.integrations import ( + HfDeepSpeedConfig, + unset_hf_deepspeed_config, + ) + + unset_hf_deepspeed_config() + self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa + + def is_zero3_init_enabled(self): + return self.zero3_init_flag + + @contextmanager + def zero3_init_context_manager(self, enable=False): + old = self.zero3_init_flag + if old == enable: + yield + else: + self.zero3_init_flag = enable + self.dschf = None + self.set_deepspeed_weakref() + yield + self.zero3_init_flag = old + self.dschf = None + self.set_deepspeed_weakref() + + def _deepspeed_config_checks(self): + env_variable_names_to_ignore = [ + "ACCELERATE_GRADIENT_ACCUMULATION_STEPS", + "ACCELERATE_GRADIENT_CLIPPING", + "ACCELERATE_DEEPSPEED_ZERO_STAGE", + "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", + "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", + "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", + "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", + "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", + "ACCELERATE_MIXED_PRECISION", + ] + env_variable_names_to_ignore = [ + name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore + ] + + deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",") + + if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config): + raise ValueError( + f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n" + "Please specify them appropriately in the DeepSpeed config file.\n" + "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n" + "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n" + "It will only ask for the necessary config variables when using `deepspeed_config_file`." + ) + + def set_moe_leaf_modules(self, model): + if self.transformer_moe_cls_names is None: + self.transformer_moe_cls_names = os.environ.get("ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES", None) + if self.transformer_moe_cls_names is not None: + if compare_versions("deepspeed", "<", "0.14.0"): + raise ImportError("DeepSpeed version must be >= 0.14.0 to use MOE support. Please update DeepSpeed.") + from deepspeed.utils import set_z3_leaf_modules + + class_names = self.transformer_moe_cls_names.split(",") + transformer_moe_cls = [] + for layer_class in class_names: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception( + f"Could not find a transformer layer class called '{layer_class}' to wrap in the model." + ) + else: + transformer_moe_cls.append(transformer_cls) + set_z3_leaf_modules(model, transformer_moe_cls) # z3_leaf + + def select(self, _from_accelerator_state: bool = False): + """ + Sets the HfDeepSpeedWeakref to use the current deepspeed plugin configuration + """ + if not _from_accelerator_state: + raise ValueError( + "A `DeepSpeedPlugin` object must be enabled manually by calling `AcceleratorState().enable_deepspeed_plugin(plugin_key)`." + ) + self.set_deepspeed_weakref() + self._set_selected(True) + + def _unselect(self): + self._set_selected(False) + + def _set_selected(self, value: bool): + """ + Private setter for the 'enabled' attribute. + """ + self._selected = value + + @property + def selected(self): + return self._selected + + @selected.setter + def selected(self, value): + raise NotImplementedError( + "'enabled' can only be set through calling 'AcceleratorState().enable_deepspeed_plugin(key)'." + ) + + +@dataclass +class FullyShardedDataParallelPlugin: + """ + This plugin is used to enable fully sharded data parallelism. + + Args: + fsdp_version (`int`, defaults to `1`): + The version of FSDP to use. Defaults to 1. If set to 2, launcher expects the config to be converted to + FSDP2 format. + sharding_strategy (`Union[str, torch.distributed.fsdp.ShardingStrategy]`, defaults to `'FULL_SHARD'`): + Sharding strategy to use. Should be either a `str` or an instance of + `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`. Is deprecated in favor of + `reshard_after_forward`. + reshard_after_forward (`Union[str, torch.distributed.fsdp.ShardingStrategy, bool]`, defaults to `'FULL_SHARD'` for `fsdp_version=1` and `True` for `fsdp_version=2`): + Sharding strategy to use. Should be a bool if `fsdp_version` is set to 2 else a `str` or an instance of + `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`. + backward_prefetch (`Union[str, torch.distributed.fsdp.BackwardPrefetch]`, defaults to `'NO_PREFETCH'`): + Backward prefetch strategy to use. Should be either a `str` or an instance of + `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`. + mixed_precision_policy (`Optional[Union[dict, torch.distributed.fsdp.MixedPrecision, torch.distributed.fsdp.MixedPrecisionPolicy]]`, defaults to `None`): + A config to enable mixed precision training with FullyShardedDataParallel. If passing in a `dict`, it + should have the following keys: `param_dtype`, `reduce_dtype`, and `buffer_dtype`, can be an instance of + `torch.distributed.fsdp.MixedPrecisionPolicy` if `fsdp_version` is set to 2. + auto_wrap_policy (`Optional(Union[Callable, Literal["transformer_based_wrap", "size_based_wrap", "no_wrap"]]), defaults to `NO_WRAP`): + A callable or string specifying a policy to recursively wrap layers with FSDP. If a string, it must be one + of `transformer_based_wrap`, `size_based_wrap`, or `no_wrap`. See + `torch.distributed.fsdp.wrap.size_based_wrap_policy` for a direction on what it should look like. + cpu_offload (`Union[bool, torch.distributed.fsdp.CPUOffload, torch.distributed.fsdp.CPUOffloadPolicy]`, defaults to `False`): + Whether to offload parameters to CPU. Should be either a `bool` or an instance of + `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload` or + `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffloadPolicy` if `fsdp_version` is set to 2. + ignored_modules (`Optional[Union[Iterable[torch.nn.Module], str]]`, defaults to `None`): + A list of modules to ignore when wrapping with FSDP. When passing a string, will match the modules by name + using regex fullmatch. + state_dict_type (`Union[str, torch.distributed.fsdp.StateDictType]`, defaults to `'FULL_STATE_DICT'`): + State dict type to use. If a string, it must be one of `full_state_dict`, `local_state_dict`, or + `sharded_state_dict`. + state_dict_config (`Optional[Union[torch.distributed.fsdp.FullStateDictConfig, torch.distributed.fsdp.ShardedStateDictConfig]`, defaults to `None`): + State dict config to use. Is determined based on the `state_dict_type` if not passed in. + optim_state_dict_config (`Optional[Union[torch.distributed.fsdp.FullOptimStateDictConfig, torch.distributed.fsdp.ShardedOptimStateDictConfig]`, defaults to `None`): + Optim state dict config to use. Is determined based on the `state_dict_type` if not passed in. + limit_all_gathers (`bool`, defaults to `True`): + Whether to have FSDP explicitly synchronizes the CPU thread to prevent too many in-flight all-gathers. This + bool only affects the sharded strategies that schedule all-gathers. Enabling this can help lower the number + of CUDA malloc retries. + use_orig_params (`bool`, defaults to `False`): + Whether to use the original parameters for the optimizer. + param_init_fn (`Optional[Callable[[torch.nn.Module], None]`, defaults to `None`): + A `Callable[torch.nn.Module] -> None` that specifies how modules that are currently on the meta device + should be initialized onto an actual device. Only applicable when `sync_module_states` is `True`. By + default is a `lambda` which calls `to_empty` on the module. + sync_module_states (`bool`, defaults to `False`): + Whether each individually wrapped FSDP unit should broadcast module parameters from rank 0 to ensure they + are the same across all ranks after initialization. Defaults to `False` unless `cpu_ram_efficient_loading` + is `True`, then will be forcibly enabled. + forward_prefetch (`bool`, defaults to `False`): + Whether to have FSDP explicitly prefetches the next upcoming all-gather while executing in the forward + pass. only use with Static graphs. + activation_checkpointing (`bool`, defaults to `False`): + A technique to reduce memory usage by clearing activations of certain layers and recomputing them during a + backward pass. Effectively, this trades extra computation time for reduced memory usage. + cpu_ram_efficient_loading (`bool`, defaults to `None`): + If True, only the first process loads the pretrained model checkoint while all other processes have empty + weights. Only applicable for Transformers. When using this, `sync_module_states` needs to be `True`. + transformer_cls_names_to_wrap (`Optional[List[str]]`, defaults to `None`): + A list of transformer layer class names to wrap. Only applicable when `auto_wrap_policy` is + `transformer_based_wrap`. + min_num_params (`Optional[int]`, defaults to `None`): + The minimum number of parameters a module must have to be wrapped. Only applicable when `auto_wrap_policy` + is `size_based_wrap`. + """ + + fsdp_version: int = field( + default=None, + metadata={ + "help": "The version of FSDP to use. Defaults to 1. If set to 2, launcher expects the config to be converted to FSDP2 format." + }, + ) + + sharding_strategy: Union[str, "torch.distributed.fsdp.ShardingStrategy"] = field( + default=None, + metadata={ + "help": "Sharding strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`. Defaults to 'FULL_SHARD'. Is deprecated in favor of `reshard_after_forward` " + }, + ) + + reshard_after_forward: Union[str, "torch.distributed.fsdp.ShardingStrategy", bool] = field( + default=None, + metadata={ + "help": "Sharding strategy to use. Should be a bool if `fsdp_version` is set to 2 else a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`. Defaults to 'FULL_SHARD'" + }, + ) + backward_prefetch: Optional[Union[str, "torch.distributed.fsdp.BackwardPrefetch"]] = field( + default=None, + metadata={ + "help": "Backward prefetch strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`. Defaults to 'NO_PREFETCH'. This becomes obsolete in FSDP2." + }, + ) + mixed_precision_policy: Optional[ + Union[ + dict, + "torch.distributed.fsdp.MixedPrecision", + "torch.distributed.fsdp.MixedPrecisionPolicy", + ] + ] = field( + default=None, + metadata={ + "help": "A config to enable mixed precision training with FullyShardedDataParallel. " + "If passing in a `dict`, it should have the following keys: `param_dtype`, `reduce_dtype`, and `buffer_dtype`." + "Can also be an instance of `torch.distributed.fsdp.MixedPrecisionPolicy` if `fsdp_version` is set to 2." + }, + ) + auto_wrap_policy: Optional[Union[Callable, Literal["transformer_based_wrap", "size_based_wrap", "no_wrap"]]] = ( + field( + default=None, + metadata={ + "help": "A callable or string specifying a policy to recursively wrap layers with FSDP. If a string, it must be one of `transformer_based_wrap`, `size_based_wrap`, or `no_wrap`. " + "Defaults to `NO_WRAP`. See `torch.distributed.fsdp.wrap.size_based_wrap_policy` for a direction on what it should look like" + }, + ) + ) + cpu_offload: Union[ + bool, + "torch.distributed.fsdp.CPUOffload", + "torch.distributed.fsdp.CPUOffloadPolicy", + ] = field( + default=None, + metadata={ + "help": "Whether to offload parameters to CPU. Should be either a `bool` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload` or `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffloadPolicy` if `fsdp_version` is set to 2. Defaults to `False`" + }, + ) + ignored_modules: Optional[Union[Iterable[torch.nn.Module], str]] = field( + default=None, + metadata={"help": "A list of modules to ignore when wrapping with FSDP."}, + ) + + state_dict_type: Union[str, "torch.distributed.fsdp.StateDictType"] = field( + default=None, + metadata={ + "help": "State dict type to use. If a string, it must be one of `full_state_dict`, `local_state_dict`, or `sharded_state_dict`. Defaults to `FULL_STATE_DICT`" + }, + ) + state_dict_config: Optional[ + Union[ + "torch.distributed.fsdp.FullStateDictConfig", + "torch.distributed.fsdp.ShardedStateDictConfig", + ] + ] = field( + default=None, + metadata={"help": "State dict config to use. Is determined based on the `state_dict_type` if not passed in."}, + ) + optim_state_dict_config: Optional[ + Union[ + "torch.distributed.fsdp.FullOptimStateDictConfig", + "torch.distributed.fsdp.ShardedOptimStateDictConfig", + ] + ] = field( + default=None, + metadata={ + "help": "Optim state dict config to use. Is determined based on the `state_dict_type` if not passed in." + }, + ) + limit_all_gathers: bool = field( + default=True, + metadata={ + "help": "Whether to have FSDP explicitly synchronizes the CPU thread to prevent " + "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. " + "Enabling this can help lower the number of CUDA malloc retries." + }, + ) + use_orig_params: Optional[bool] = field( + default=None, + metadata={ + "help": "Whether to use the original parameters for the optimizer. Defaults to `False`. This becomes obsolete in FSDP2." + }, + ) + param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field( + default=None, + metadata={ + "help": "A Callable[torch.nn.Module] -> None that specifies how modules " + "that are currently on the meta device should be initialized onto an actual device. " + "Only applicable when `sync_module_states` is `True`. By default is a `lambda` which calls `to_empty` on the module." + }, + ) + sync_module_states: Optional[bool] = field( + default=None, + metadata={ + "help": "Whether each individually wrapped FSDP unit should broadcast module parameters from rank 0 " + "to ensure they are the same across all ranks after initialization. Defaults to `False` unless " + "`cpu_ram_efficient_loading` is `True`, then will be forcibly enabled. This becomes obsolete in FSDP2." + }, + ) + forward_prefetch: bool = field( + default=None, + metadata={ + "help": "Whether to have FSDP explicitly prefetches the next upcoming " + "all-gather while executing in the forward pass. only use with Static graphs. Defaults to `False`" + }, + ) + activation_checkpointing: bool = field( + default=None, + metadata={ + "help": "A technique to reduce memory usage by clearing activations of " + "certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time " + "for reduced memory usage. Defaults to `False`" + }, + ) + cpu_ram_efficient_loading: bool = field( + default=None, + metadata={ + "help": "If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " + "Only applicable for 🤗 Transformers. When using this, `sync_module_states` needs to be `True`. Defaults to `False`." + }, + ) + transformer_cls_names_to_wrap: Optional[list[str]] = field( + default=None, + metadata={ + "help": "A list of transformer layer class names to wrap. Only applicable when `auto_wrap_policy` is `transformer_based_wrap`." + }, + ) + min_num_params: Optional[int] = field( + default=None, + metadata={ + "help": "The minimum number of parameters a module must have to be wrapped. Only applicable when `auto_wrap_policy` is `size_based_wrap`." + }, + ) + + def __post_init__(self): + from torch.distributed.fsdp import BackwardPrefetch, ShardingStrategy + + _fsdp2_warnings = set() + + env_prefix = "FSDP_" + # Strategy: By default we should always assume that values are passed in, else we check the environment variables + if self.fsdp_version is None: + self.fsdp_version = int(os.environ.get(env_prefix + "VERSION", "1")) + + if self.fsdp_version == 2: + if not is_torch_version(">=", FSDP2_PYTORCH_VERSION): + raise ImportError(f"FSDP2 requires PyTorch >= {FSDP2_PYTORCH_VERSION}") + + if self.sharding_strategy is not None: + # We cannot properly detect all of the cases, as by default `args.fsdp_sharding_strategy` is set to `fully_shard` + # Therefore we issue a warning only if the user has explicitly set it inside their plugin + _fsdp2_warnings.add( + "sharding_strategy is deprecated in favor of reshard_after_forward. " + "This will be removed in a future version of Accelerate." + ) + if self.fsdp_version == 1: + if self.sharding_strategy is None: + self.sharding_strategy = os.environ.get(env_prefix + "SHARDING_STRATEGY", "FULL_SHARD") + if isinstance(self.sharding_strategy, str): + if self.sharding_strategy.upper() in FSDP_SHARDING_STRATEGY: + self.sharding_strategy = FSDP_SHARDING_STRATEGY.index(self.sharding_strategy.upper()) + 1 + if isinstance(self.sharding_strategy, int) or self.sharding_strategy.isdigit(): + self.sharding_strategy = ShardingStrategy(int(self.sharding_strategy)) + else: + self.sharding_strategy = ShardingStrategy[self.sharding_strategy.upper()] + + # Fallback to `reshard_after_forward` in FSDP1 if `sharding_strategy` is not set + if self.reshard_after_forward is None and self.sharding_strategy is None: + reshard_after_forward = os.environ.get( + env_prefix + "RESHARD_AFTER_FORWARD", + "true" if self.fsdp_version == 2 else "FULL_SHARD", + ) + if self.fsdp_version == 2: + self.reshard_after_forward = str_to_bool(reshard_after_forward.lower(), to_bool=True) + else: + self.reshard_after_forward = reshard_after_forward + if isinstance(self.reshard_after_forward, str): + if self.fsdp_version == 2: + self.reshard_after_forward = str_to_bool(self.reshard_after_forward.lower(), to_bool=True) + else: + # We need to remap based on custom enum values for user readability + if self.reshard_after_forward.upper() in FSDP_SHARDING_STRATEGY: + self.reshard_after_forward = FSDP_SHARDING_STRATEGY.index(self.reshard_after_forward.upper()) + 1 + if isinstance(self.reshard_after_forward, int) or self.reshard_after_forward.isdigit(): + self.reshard_after_forward = ShardingStrategy(int(self.reshard_after_forward)) + else: + self.reshard_after_forward = ShardingStrategy[self.reshard_after_forward.upper()] + + if self.fsdp_version == 2 and not isinstance(self.reshard_after_forward, bool): + raise ValueError( + f"reshard_after_forward set to {self.reshard_after_forward}. This is not supported with FSDP2, please set to a `bool`" + ) + if self.fsdp_version == 1 and isinstance(self.reshard_after_forward, bool): + raise ValueError( + f"reshard_after_forward set to {self.reshard_after_forward}. This is not supported with FSDP1, please set to a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`" + ) + + if self.cpu_offload is None: + self.cpu_offload = str_to_bool(os.environ.get(env_prefix + "OFFLOAD_PARAMS", "False")) == 1 + + self.set_cpu_offload() # abstracted away to hide imports due to version checks + self.validate_cpu_offload() + + if self.backward_prefetch is None: + self.backward_prefetch = os.environ.get(env_prefix + "BACKWARD_PREFETCH", None) + if isinstance(self.backward_prefetch, str) and self.backward_prefetch.upper() == "NO_PREFETCH": + self.backward_prefetch = None + if self.backward_prefetch is not None and not isinstance(self.backward_prefetch, BackwardPrefetch): + if isinstance(self.backward_prefetch, str) and self.backward_prefetch.upper() in FSDP_BACKWARD_PREFETCH: + self.backward_prefetch = FSDP_BACKWARD_PREFETCH.index(self.backward_prefetch.upper()) + 1 + if isinstance(self.backward_prefetch, int) or self.backward_prefetch.isdigit(): + self.backward_prefetch = BackwardPrefetch(int(self.backward_prefetch)) + else: + self.backward_prefetch = BackwardPrefetch[self.backward_prefetch.upper()] + if self.fsdp_version == 2 and self.backward_prefetch is not None: + _fsdp2_warnings.add("backward_prefetch is not supported in FSDP2. Setting backward prefetch to None.") + self.backward_prefetch = None + + self.set_state_dict_type() + + if self.auto_wrap_policy is None: + self.auto_wrap_policy = os.environ.get(env_prefix + "AUTO_WRAP_POLICY", "NO_WRAP") + if isinstance(self.auto_wrap_policy, str): + if self.auto_wrap_policy.upper() not in FSDP_AUTO_WRAP_POLICY: + raise ValueError( + f"Invalid auto wrap policy: {self.auto_wrap_policy}. Must be one of {FSDP_AUTO_WRAP_POLICY}" + ) + from torch.distributed.fsdp.wrap import ( + size_based_auto_wrap_policy, + transformer_auto_wrap_policy, + ) + + if self.auto_wrap_policy.upper() == "TRANSFORMER_BASED_WRAP": + self.auto_wrap_policy = transformer_auto_wrap_policy + if self.transformer_cls_names_to_wrap is None: + self.transformer_cls_names_to_wrap = os.environ.get(env_prefix + "TRANSFORMER_CLS_TO_WRAP", None) + if isinstance(self.transformer_cls_names_to_wrap, str): + self.transformer_cls_names_to_wrap = self.transformer_cls_names_to_wrap.split(",") + elif self.auto_wrap_policy.upper() == "SIZE_BASED_WRAP": + self.auto_wrap_policy = size_based_auto_wrap_policy + if self.min_num_params is None: + self.min_num_params = int(os.environ.get(env_prefix + "MIN_NUM_PARAMS", 0)) + elif not isinstance(self.min_num_params, int): + raise ValueError( + f"`min_num_params` must be an integer. Got {self.min_num_params} of type {type(self.min_num_params)}" + ) + elif self.auto_wrap_policy.upper() == "NO_WRAP": + self.auto_wrap_policy = None + + if self.use_orig_params is None and self.fsdp_version == 1: + self.use_orig_params = str_to_bool(os.environ.get(env_prefix + "USE_ORIG_PARAMS", "False")) == 1 + if self.fsdp_version == 2 and self.use_orig_params is not None: + _fsdp2_warnings.add("use_orig_params is obsolete in FSDP2, as FSDP2 always uses the original parameters.") + self.use_orig_params = None + + if self.sync_module_states is None and self.fsdp_version == 1: + self.sync_module_states = str_to_bool(os.environ.get(env_prefix + "SYNC_MODULE_STATES", "False")) == 1 + if self.fsdp_version == 2 and self.sync_module_states is not None: + _fsdp2_warnings.add( + "sync_module_states is obsolete in FSDP2, as it is not needed anymore." + "Setting sync_module_states to None." + ) + self.sync_module_states = None + + if self.forward_prefetch is None and self.fsdp_version == 1: + self.forward_prefetch = str_to_bool(os.environ.get(env_prefix + "FORWARD_PREFETCH", "False")) == 1 + if self.fsdp_version == 2 and self.forward_prefetch is not None: + raise ValueError("forward_prefetch is not yet implemented in FSDP2, set to None or use `fsdp_version=1`") + + if self.activation_checkpointing is None: + self.activation_checkpointing = ( + str_to_bool(os.environ.get(env_prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1 + ) + + if self.ignored_modules is None: + self.ignored_modules = os.environ.get(env_prefix + "IGNORED_MODULES", None) + + if self.cpu_ram_efficient_loading is None: + self.cpu_ram_efficient_loading = ( + str_to_bool(os.environ.get(env_prefix + "CPU_RAM_EFFICIENT_LOADING", "False")) == 1 + ) + # There's no need to specify sync_module_states in FSDP2 + if self.fsdp_version == 1 and self.cpu_ram_efficient_loading and not self.sync_module_states: + warnings.warn( + "sync_module_states cannot be False since efficient cpu ram loading enabled. " + "Setting sync_module_states to True." + ) + self.sync_module_states = True + + if self.cpu_ram_efficient_loading != bool( + str_to_bool(os.environ.get(env_prefix + "CPU_RAM_EFFICIENT_LOADING", "False")) + ): + env_var = env_prefix + "CPU_RAM_EFFICIENT_LOADING" + warnings.warn( + f"The `cpu_ram_efficient_loading` flag for `FullyShardedDataParallelPlugin` does not match the environment variable {env_var}. " + "Setting environment variable to match `cpu_ram_efficient_loading`." + ) + os.environ[env_var] = str(self.cpu_ram_efficient_loading) + + if isinstance(self.mixed_precision_policy, dict): + self.set_mixed_precision(self.mixed_precision_policy) + if self.mixed_precision_policy is not None: + self.validate_mixed_precision_policy() + + if self.sync_module_states: + if is_npu_available(): + device = torch.npu.current_device() + elif is_mlu_available(): + device = torch.mlu.current_device() + elif is_musa_available(): + device = torch.musa.current_device() + elif is_cuda_available(): + device = torch.cuda.current_device() + elif is_xpu_available(): + device = torch.xpu.current_device() + elif is_hpu_available(): + device = torch.hpu.current_device() + else: + raise RuntimeError( + "There are currently no available devices found, must be one of 'XPU', 'CUDA', 'MLU', 'NPU', 'MUSA', or 'HPU'." + ) + # Create a function that will be used to initialize the parameters of the model + # when using `sync_module_states` + self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False) + + # Single warning for all deprecation warnings due to FSDP2 conversion + if _fsdp2_warnings: + logger.warning("Multiple deprecation warnings due to FSDP2 conversion:\n".join(_fsdp2_warnings)) + + def set_state_dict_type(self, state_dict_type=None): + """ + Set the state dict config based on the `StateDictType`. + """ + from torch.distributed.fsdp.fully_sharded_data_parallel import ( + FullOptimStateDictConfig, + FullStateDictConfig, + ShardedOptimStateDictConfig, + ShardedStateDictConfig, + StateDictType, + ) + + # Override the state_dict_type if provided, typical use case: + # user trains with sharded, but final save is with full + if state_dict_type is not None: + self.state_dict_type = state_dict_type + + if self.state_dict_type is None: + self.state_dict_type = os.environ.get( + "FSDP_STATE_DICT_TYPE", + "FULL_STATE_DICT" if self.fsdp_version == 1 else "SHARDED_STATE_DICT", + ) + if isinstance(self.state_dict_type, str): + if self.state_dict_type.isdigit(): + self.state_dict_type = StateDictType(int(self.state_dict_type)) + else: + self.state_dict_type = StateDictType[self.state_dict_type.upper()] + + if self.state_dict_type == StateDictType.FULL_STATE_DICT: + if self.state_dict_config is None: + self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) + if self.optim_state_dict_config is None: + self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True) + elif self.state_dict_type == StateDictType.SHARDED_STATE_DICT: + if self.state_dict_config is None: + self.state_dict_config = ShardedStateDictConfig(offload_to_cpu=True) + if self.optim_state_dict_config is None: + self.optim_state_dict_config = ShardedOptimStateDictConfig(offload_to_cpu=True) + + if self.fsdp_version == 2 and self.state_dict_type == StateDictType.LOCAL_STATE_DICT: + raise ValueError( + "FSDP2 does not support LOCAL_STATE_DICT. " + "Please set `fsdp_state_dict_type` to `SHARDED_STATE_DICT` or `FULL_STATE_DICT`." + ) + + def set_auto_wrap_policy(self, model): + """ + Given `model`, creates an `auto_wrap_policy` baesd on the passed in policy and if we can use the + `transformer_cls_to_wrap` + """ + from torch.distributed.fsdp.wrap import ( + size_based_auto_wrap_policy, + transformer_auto_wrap_policy, + ) + + # First base off of `_no_split_modules` + no_split_modules = getattr(model, "_no_split_modules", None) + default_transformer_cls_names_to_wrap = list(no_split_modules) if no_split_modules is not None else [] + if self.auto_wrap_policy == transformer_auto_wrap_policy: + if self.transformer_cls_names_to_wrap is None: + self.transformer_cls_names_to_wrap = default_transformer_cls_names_to_wrap + transformer_cls_to_wrap = set() + for layer_class in self.transformer_cls_names_to_wrap: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise ValueError(f"Could not find the transformer layer class {layer_class} in the model.") + transformer_cls_to_wrap.add(transformer_cls) + # Finally we set the auto_wrap_policy to a callable + self.auto_wrap_policy = functools.partial( + self.auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap + ) + + elif self.auto_wrap_policy == size_based_auto_wrap_policy: + # If zero, we silently ignore it. + if self.min_num_params > 0: + self.auto_wrap_policy = functools.partial(self.auto_wrap_policy, min_num_params=self.min_num_params) + else: + self.auto_wrap_policy = None + + def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False): + "Sets the mixed precision policy for FSDP" + mixed_precision_mapping = { + "fp8": torch.bfloat16, + "fp16": torch.float16, + "bf16": torch.bfloat16, + "fp32": torch.float32, + } + dtype = mixed_precision + if isinstance(mixed_precision, str): + dtype = mixed_precision_mapping.get(mixed_precision, None) + if dtype is None: + raise ValueError( + f"Invalid mixed precision: {mixed_precision}. Must be one of {list(mixed_precision_mapping.keys())}" + ) + elif isinstance(mixed_precision, torch.dtype) and mixed_precision not in mixed_precision_mapping.values(): + raise ValueError( + f"Invalid mixed precision: {mixed_precision}. Must be one of {list(mixed_precision_mapping.values())}" + ) + + buffer_type = torch.float32 if buffer_autocast else dtype + + if self.fsdp_version == 1: + from torch.distributed.fsdp import MixedPrecision + elif self.fsdp_version == 2: + from torch.distributed.fsdp import MixedPrecisionPolicy as MixedPrecision + + if override or self.mixed_precision_policy is None: + dtype_args = {"param_dtype": dtype, "reduce_dtype": dtype} + if self.fsdp_version == 1: + dtype_args["buffer_dtype"] = buffer_type + else: + dtype_args["output_dtype"] = dtype + # TODO(s1ro1): `cast_forward_inputs` for FSDP2? + self.mixed_precision_policy = MixedPrecision(**dtype_args) + elif isinstance(self.mixed_precision_policy, dict): + # Check for incompatible types + valid_keys = ["param_dtype", "reduce_dtype"] + ( + ["buffer_dtype"] if self.fsdp_version == 1 else ["output_dtype"] + ) + missing_keys = [k for k in valid_keys if k not in self.mixed_precision_policy] + invalid_values = [ + k for k, v in self.mixed_precision_policy.items() if v not in mixed_precision_mapping.values() + ] + if missing_keys or invalid_values: + raise ValueError( + f"Invalid mixed precision policy: {self.mixed_precision_policy}. " + f"Must be a `dict` with keys {valid_keys}." + f"Values must be one of {list(mixed_precision_mapping.values())}" + ) + self.mixed_precision_policy = MixedPrecision(**self.mixed_precision_policy) + + def validate_mixed_precision_policy(self): + """ + Validates the mixed precision policy, abstracted away to not bring in the imports if not needed. + """ + if self.fsdp_version == 2: + from torch.distributed.fsdp import MixedPrecisionPolicy as MixedPrecision + else: + from torch.distributed.fsdp import MixedPrecision + + if not isinstance(self.mixed_precision_policy, MixedPrecision): + required_type = ( + "`torch.distributed.fsdp.MixedPrecisionPolicy`" + if self.fsdp_version == 2 + else "`torch.distributed.fsdp.MixedPrecision`" + ) + raise ValueError(f"mixed_precision_policy must be an instance of {required_type}.") + + def set_cpu_offload(self): + if self.fsdp_version == 2: + from torch.distributed.fsdp import CPUOffloadPolicy, OffloadPolicy + else: + from torch.distributed.fsdp import CPUOffload + + if isinstance(self.cpu_offload, bool): + if self.fsdp_version == 2: + if not self.cpu_offload: + self.cpu_offload = OffloadPolicy() + else: + self.cpu_offload = CPUOffloadPolicy() + else: + self.cpu_offload = CPUOffload(offload_params=self.cpu_offload) + + def validate_cpu_offload(self): + if self.fsdp_version == 2: + from torch.distributed.fsdp import OffloadPolicy + else: + from torch.distributed.fsdp import CPUOffload + + if self.fsdp_version == 2 and not isinstance(self.cpu_offload, OffloadPolicy): + raise ValueError( + f"`cpu_offload` must be an instance of `torch.distributed.fsdp.OffloadPolicy` in FSDP2, got {self.cpu_offload}" + ) + if self.fsdp_version == 1 and not isinstance(self.cpu_offload, CPUOffload): + raise ValueError( + f"`cpu_offload` must be an instance of `torch.distributed.fsdp.CPUOffload` in FSDP1, got {self.cpu_offload}" + ) + + +@dataclass +class TorchTensorParallelPlugin: + """ + This plugin is used to enable tensor parallelism using PyTorch >= 2.0. + """ + + tp_size: int = field( + default=1, + metadata={"help": "tensor parallel size will be used in the device mesh preparation"}, + ) + + # torch_device_mesh is of type "torch.distributed.DeviceMesh" + torch_device_mesh: Optional["torch.distributed.DeviceMesh"] = field(default=None) + + +@dataclass +class TorchContextParallelConfig: + """ + This class holds the configuration for context parallelism in PyTorch. + """ + + cp_comm_strategy: Optional[str] = field( + default=None, + metadata={ + "help": "Communication strategy for context parallelism. Can be one of 'allgather' or 'alltoall'. Defaults to 'allgather'." + }, + ) + + def __post_init__(self): + if not is_torch_version(">=", BETA_CP_AVAILABLE_PYTORCH_VERSION): + raise ValueError( + f"Context parallelism is only available in PyTorch {BETA_CP_AVAILABLE_PYTORCH_VERSION} and later versions. " + "Please upgrade your PyTorch version." + ) + if self.cp_comm_strategy is None: + self.cp_comm_strategy = os.environ.get("PARALLELISM_CONFIG_CP_COMM_STRATEGY", "allgather") + if self.cp_comm_strategy not in ["allgather", "alltoall"]: + raise ValueError( + f"Invalid cp_comm_strategy: {self.cp_comm_strategy}. Must be one of 'allgather' or 'alltoall'." + ) + + +@dataclass +class TorchTensorParallelConfig: + """ + Use this object in your [`Accelerator`] to customize your torch tensor parallelism. + """ + + enable_async_tp: bool = False + + def __post_init__(self): + if not is_torch_version(">=", BETA_TP_AVAILABLE_PYTORCH_VERSION): + raise ValueError( + f"Torch tensor parallelism is only available in PyTorch {BETA_TP_AVAILABLE_PYTORCH_VERSION} and later versions. " + "Please upgrade your PyTorch version." + ) + + if not compare_versions("transformers", ">=", BETA_TP_AVAILABLE_TRANSFORMERS_VERSION): + raise ValueError(f"TP requires transformers >= {BETA_TP_AVAILABLE_TRANSFORMERS_VERSION}") + + if self.enable_async_tp: + warnings.warn("Async tensor parallelism is currently not supported, ignoring this option.") + + +@dataclass +class MegatronLMPlugin: + """ + Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective + activation recomputation and optimized fused kernels. + + Args: + tp_degree (`int`, defaults to `None`): + Tensor parallelism degree. + pp_degree (`int`, defaults to `None`): + Pipeline parallelism degree. + num_micro_batches (`int`, defaults to `None`): + Number of micro-batches. + gradient_clipping (`float`, defaults to `None`): + Gradient clipping value based on global L2 Norm (0 to disable). + sequence_parallelism (`bool`, defaults to `None`): + Enable sequence parallelism. + recompute_activations (`bool`, defaults to `None`): + Enable selective activation recomputation. + use_distributed_optimizr (`bool`, defaults to `None`): + Enable distributed optimizer. + pipeline_model_parallel_split_rank (`int`, defaults to `None`): + Rank where encoder and decoder should be split. + num_layers_per_virtual_pipeline_stage (`int`, defaults to `None`): + Number of layers per virtual pipeline stage. + is_train_batch_min (`str`, defaults to `True`): + If both tran & eval dataloaders are specified, this will decide the `micro_batch_size`. + train_iters (`int`, defaults to `None`): + Total number of samples to train over all training runs. Note that either train-iters or train-samples + should be provided when using `MegatronLMDummyScheduler`. + train_samples (`int`, defaults to `None`): + Total number of samples to train over all training runs. Note that either train-iters or train-samples + should be provided when using `MegatronLMDummyScheduler`. + weight_decay_incr_style (`str`, defaults to `'constant'`): + Weight decay increment function. choices=["constant", "linear", "cosine"]. + start_weight_decay (`float`, defaults to `None`): + Initial weight decay coefficient for L2 regularization. + end_weight_decay (`float`, defaults to `None`): + End of run weight decay coefficient for L2 regularization. + lr_decay_style (`str`, defaults to `'linear'`): + Learning rate decay function. choices=['constant', 'linear', 'cosine']. + lr_decay_iters (`int`, defaults to `None`): + Number of iterations for learning rate decay. If None defaults to `train_iters`. + lr_decay_samples (`int`, defaults to `None`): + Number of samples for learning rate decay. If None defaults to `train_samples`. + lr_warmup_iters (`int`, defaults to `None`): + Number of iterations to linearly warmup learning rate over. + lr_warmup_samples (`int`, defaults to `None`): + Number of samples to linearly warmup learning rate over. + lr_warmup_fraction (`float`, defaults to `None`): + Fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over. + min_lr (`float`, defaults to `0`): + Minumum value for learning rate. The scheduler clip values below this threshold. + consumed_samples (`List`, defaults to `None`): + Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call. + no_wd_decay_cond (`Optional`, defaults to `None`): + Condition to disable weight decay. + scale_lr_cond (`Optional`, defaults to `None`): + Condition to scale learning rate. + lr_mult (`float`, defaults to `1.0`): + Learning rate multiplier. + megatron_dataset_flag (`bool`, defaults to `False`): + Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format. + seq_length (`int`, defaults to `None`): + Maximum sequence length to process. + encoder_seq_length (`int`, defaults to `None`): + Maximum sequence length to process for the encoder. + decoder_seq_length (`int`, defaults to `None`): + Maximum sequence length to process for the decoder. + tensorboard_dir (`str`, defaults to `None`): + Path to save tensorboard logs. + set_all_logging_options (`bool`, defaults to `False`): + Whether to set all logging options. + eval_iters (`int`, defaults to `100`): + Number of iterations to run for evaluation validation/test for. + eval_interval (`int`, defaults to `1000`): + Interval between running evaluation on validation set. + return_logits (`bool`, defaults to `False`): + Whether to return logits from the model. + custom_train_step_class (`Optional`, defaults to `None`): + Custom train step class. + custom_train_step_kwargs (`Optional`, defaults to `None`): + Custom train step kwargs. + custom_model_provider_function (`Optional`, defaults to `None`): + Custom model provider function. + custom_prepare_model_function (`Optional`, defaults to `None`): + Custom prepare model function. + custom_megatron_datasets_provider_function (`Optional`, defaults to `None`): + Custom megatron train_valid_test datasets provider function. + custom_get_batch_function (`Optional`, defaults to `None`): + Custom get batch function. + custom_loss_function (`Optional`, defaults to `None`): + Custom loss function. + other_megatron_args (`Optional`, defaults to `None`): + Other Megatron-LM arguments. Please refer Megatron-LM. + """ + + tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."}) + pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."}) + num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."}) + gradient_clipping: float = field( + default=None, + metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"}, + ) + sequence_parallelism: bool = field( + default=None, + metadata={"help": "enable sequence parallelism"}, + ) + recompute_activations: bool = field( + default=None, + metadata={"help": "enable selective activation recomputation"}, + ) + use_distributed_optimizer: bool = field( + default=None, + metadata={"help": "enable distributed optimizer"}, + ) + pipeline_model_parallel_split_rank: int = field( + default=None, + metadata={"help": "Rank where encoder and decoder should be split."}, + ) + num_layers_per_virtual_pipeline_stage: int = field( + default=None, metadata={"help": "Number of layers per virtual pipeline stage."} + ) + is_train_batch_min: str = field( + default=True, + metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"}, + ) + train_iters: int = field( + default=None, + metadata={ + "help": "Total number of iterations to train over all training runs. " + "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" + }, + ) + train_samples: int = field( + default=None, + metadata={ + "help": "Total number of samples to train over all training runs. " + "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" + }, + ) + weight_decay_incr_style: str = field( + default="constant", + metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '}, + ) + start_weight_decay: float = field( + default=None, + metadata={"help": "Initial weight decay coefficient for L2 regularization."}, + ) + end_weight_decay: float = field( + default=None, + metadata={"help": "End of run weight decay coefficient for L2 regularization."}, + ) + lr_decay_style: str = field( + default="linear", + metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."}, + ) + lr_decay_iters: int = field( + default=None, + metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."}, + ) + lr_decay_samples: int = field( + default=None, + metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."}, + ) + lr_warmup_iters: int = field( + default=None, + metadata={"help": "number of iterations to linearly warmup learning rate over."}, + ) + lr_warmup_samples: int = field( + default=None, + metadata={"help": "number of samples to linearly warmup learning rate over."}, + ) + lr_warmup_fraction: float = field( + default=None, + metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."}, + ) + min_lr: float = field( + default=0, + metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."}, + ) + consumed_samples: list[int] = field( + default=None, + metadata={ + "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call." + }, + ) + no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."}) + scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."}) + lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."}) + megatron_dataset_flag: bool = field( + default=False, + metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."}, + ) + seq_length: int = field( + default=None, + metadata={"help": "Maximum sequence length to process."}, + ) + encoder_seq_length: int = field( + default=None, + metadata={"help": "Maximum sequence length to process for the encoder."}, + ) + decoder_seq_length: int = field( + default=None, + metadata={"help": "Maximum sequence length to process for the decoder."}, + ) + tensorboard_dir: str = field( + default=None, + metadata={"help": "Path to save tensorboard logs."}, + ) + set_all_logging_options: bool = field( + default=False, + metadata={"help": "Whether to set all logging options."}, + ) + eval_iters: int = field( + default=100, + metadata={"help": "Number of iterations to run for evaluation validation/test for."}, + ) + eval_interval: int = field( + default=1000, + metadata={"help": "Interval between running evaluation on validation set."}, + ) + return_logits: bool = field( + default=False, + metadata={"help": "Whether to return logits from the model."}, + ) + + # custom train step args + custom_train_step_class: Optional[Any] = field( + default=None, + metadata={"help": "Custom train step class."}, + ) + custom_train_step_kwargs: Optional[dict[str, Any]] = field( + default=None, + metadata={"help": "Custom train step kwargs."}, + ) + + # custom model args + custom_model_provider_function: Optional[Callable] = field( + default=None, + metadata={"help": "Custom model provider function."}, + ) + custom_prepare_model_function: Optional[Callable] = field( + default=None, + metadata={"help": "Custom prepare model function."}, + ) + custom_megatron_datasets_provider_function: Optional[Callable] = field( + default=None, + metadata={"help": "Custom megatron train_valid_test datasets provider function."}, + ) + custom_get_batch_function: Optional[Callable] = field( + default=None, + metadata={"help": "Custom get batch function."}, + ) + custom_loss_function: Optional[Callable] = field( + default=None, + metadata={"help": "Custom loss function."}, + ) + + # remaining args such as enabling Alibi/ROPE positional embeddings, + # wandb logging, Multi-Query Attention, etc. + other_megatron_args: Optional[dict[str, Any]] = field( + default=None, + metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"}, + ) + + def __post_init__(self): + prefix = "MEGATRON_LM_" + if self.tp_degree is None: + self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1)) + if self.pp_degree is None: + self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1)) + if self.num_micro_batches is None: + self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1)) + if self.gradient_clipping is None: + self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0)) + if self.recompute_activations is None: + self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1 + if self.use_distributed_optimizer is None: + self.use_distributed_optimizer = ( + str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1 + ) + if self.sequence_parallelism is None: + self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1 + + if self.pp_degree > 1 or self.use_distributed_optimizer: + self.DDP_impl = "local" + else: + self.DDP_impl = "torch" + + if self.consumed_samples is not None: + if len(self.consumed_samples) == 1: + self.consumed_samples.extend([0, 0]) + elif len(self.consumed_samples) == 2: + self.consumed_samples.append(0) + + self.megatron_lm_default_args = { + "tensor_model_parallel_size": self.tp_degree, + "pipeline_model_parallel_size": self.pp_degree, + "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank, + "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage, + "DDP_impl": self.DDP_impl, + "use_distributed_optimizer": self.use_distributed_optimizer, + "sequence_parallel": self.sequence_parallelism, + "clip_grad": self.gradient_clipping, + "num_micro_batches": self.num_micro_batches, + "consumed_samples": self.consumed_samples, + "no_wd_decay_cond": self.no_wd_decay_cond, + "scale_lr_cond": self.scale_lr_cond, + "lr_mult": self.lr_mult, + "megatron_dataset_flag": self.megatron_dataset_flag, + "eval_iters": self.eval_iters, + "eval_interval": self.eval_interval, + } + if self.recompute_activations: + self.megatron_lm_default_args["recompute_granularity"] = "selective" + if self.tensorboard_dir is not None: + self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir + if self.set_all_logging_options: + self.set_tensorboard_logging_options() + if self.other_megatron_args is not None: + self.megatron_lm_default_args.update(self.other_megatron_args) + + def set_network_size_args(self, model, batch_data=None): + model_config_type = model.config.model_type.lower() + for model_type in MODEL_CONFIGS_TO_MEGATRON_PARSERS.keys(): + if model_type in model_config_type: + MODEL_CONFIGS_TO_MEGATRON_PARSERS[model_type](self, model, batch_data) + return + raise ValueError( + f"Accelerate Megatron-LM integration not supports {model_config_type} model. " + "You can add your own model config parser." + ) + + def set_mixed_precision(self, mixed_precision): + if mixed_precision == "fp16": + self.megatron_lm_default_args["fp16"] = True + elif mixed_precision == "bf16": + self.megatron_lm_default_args["bf16"] = True + self.DDP_impl = "local" + self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl + + def set_training_args(self, micro_batch_size, dp_degree): + self.data_parallel_size = dp_degree + self.micro_batch_size = micro_batch_size + self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches + self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size + self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size + self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size + + def set_optimizer_type(self, optimizer): + optimizer_name = optimizer.__class__.__name__.lower() + if "adam" in optimizer_name: + self.megatron_lm_default_args["optimizer"] = "adam" + self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0] + self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1] + self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"] + elif "sgd" in optimizer_name: + self.megatron_lm_default_args["optimizer"] = "sgd" + self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"] + else: + raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM") + + self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"] + self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"] + + def set_scheduler_args(self, scheduler): + if self.train_iters is None: + self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"] + if self.train_samples is not None: + self.train_samples = None + warnings.warn( + "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training." + ) + if self.lr_warmup_iters is None: + self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"] + if self.lr_warmup_samples is not None: + warnings.warn( + "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training." + ) + self.lr_warmup_samples = 0 + + self.megatron_lm_default_args["train_iters"] = self.train_iters + self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters + self.megatron_lm_default_args["train_samples"] = self.train_samples + self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples + self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters + self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples + self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction + self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style + self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style + self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay + self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay + self.megatron_lm_default_args["min_lr"] = self.min_lr + + def set_tensorboard_logging_options(self): + from megatron.training.arguments import _add_logging_args + + parser = argparse.ArgumentParser() + parser = _add_logging_args(parser) + logging_args = parser.parse_known_args() + self.dataset_args = vars(logging_args[0]) + for key, value in self.dataset_args.items(): + if key.startswith("log_"): + self.megatron_lm_default_args[key] = True + elif key.startswith("no_log_"): + self.megatron_lm_default_args[key.replace("no_", "")] = True + + +MODEL_CONFIGS_TO_MEGATRON_PARSERS = {} + + +def add_model_config_to_megatron_parser(model_type: str): + def add_model_config_parser_helper(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + MODEL_CONFIGS_TO_MEGATRON_PARSERS[model_type] = func + return wrapper + + return add_model_config_parser_helper + + +@add_model_config_to_megatron_parser("megatron-bert") +def parse_bert_config(megatron_lm_plugin, model, batch_data): + model_type_name = "bert" + num_layers = model.config.num_hidden_layers + hidden_size = model.config.hidden_size + num_attention_heads = model.config.num_attention_heads + max_position_embeddings = model.config.max_position_embeddings + num_labels = model.config.num_labels + orig_vocab_size = model.config.vocab_size + pretraining_flag = False + if "maskedlm" in model.__class__.__name__.lower(): + pretraining_flag = True + if megatron_lm_plugin.seq_length is not None: + if megatron_lm_plugin.encoder_seq_length is not None: + warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.") + megatron_lm_plugin.seq_length = megatron_lm_plugin.encoder_seq_length + elif megatron_lm_plugin.encoder_seq_length is not None: + megatron_lm_plugin.seq_length = megatron_lm_plugin.encoder_seq_length + elif batch_data is not None: + megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] + else: + megatron_lm_plugin.seq_length = max_position_embeddings + megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length + megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name + megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers + megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size + megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads + megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings + megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag + megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size + megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict + megatron_lm_plugin.megatron_lm_default_args["num_labels"] = num_labels + + +@add_model_config_to_megatron_parser("gpt2") +def parse_gpt2_config(megatron_lm_plugin, model, batch_data): + model_type_name = "gpt" + num_layers = model.config.n_layer + hidden_size = model.config.n_embd + num_attention_heads = model.config.n_head + max_position_embeddings = model.config.n_positions + orig_vocab_size = model.config.vocab_size + pretraining_flag = True + if megatron_lm_plugin.seq_length is not None: + if megatron_lm_plugin.decoder_seq_length is not None: + warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.") + megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length + elif megatron_lm_plugin.decoder_seq_length is not None: + megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length + elif batch_data is not None: + megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] + else: + megatron_lm_plugin.seq_length = max_position_embeddings + megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length + megatron_lm_plugin.megatron_lm_default_args["return_logits"] = megatron_lm_plugin.return_logits + megatron_lm_plugin.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer" + megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name + megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers + megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size + megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads + megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings + megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag + megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size + megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict + + +@add_model_config_to_megatron_parser("t5") +def parse_t5_config(megatron_lm_plugin, model, batch_data): + model_type_name = "t5" + num_layers = model.config.num_layers + hidden_size = model.config.d_model + num_attention_heads = model.config.num_heads + max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024 + orig_vocab_size = model.config.vocab_size + pretraining_flag = True + if megatron_lm_plugin.encoder_seq_length is None: + if batch_data is not None: + megatron_lm_plugin.encoder_seq_length = batch_data["input_ids"].shape[1] + else: + megatron_lm_plugin.encoder_seq_length = max_position_embeddings + if megatron_lm_plugin.decoder_seq_length is None: + if batch_data is not None: + megatron_lm_plugin.decoder_seq_length = batch_data["labels"].shape[1] + else: + megatron_lm_plugin.decoder_seq_length = max_position_embeddings + megatron_lm_plugin.megatron_lm_default_args["encoder_seq_length"] = megatron_lm_plugin.encoder_seq_length + megatron_lm_plugin.megatron_lm_default_args["decoder_seq_length"] = megatron_lm_plugin.decoder_seq_length + megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name + megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers + megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size + megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads + megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings + megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag + megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size + megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict + + +@add_model_config_to_megatron_parser("llama") +def parse_llama_config(megatron_lm_plugin, model, batch_data): + model_type_name = "gpt" + num_layers = model.config.num_hidden_layers + pretraining_flag = True + hidden_size = model.config.hidden_size + num_attention_heads = model.config.num_attention_heads + orig_vocab_size = model.config.vocab_size + + max_position_embeddings = model.config.max_position_embeddings + seq_length = getattr(model.config, "max_sequence_length", None) + if megatron_lm_plugin.seq_length is None: + if seq_length is not None: + megatron_lm_plugin.seq_length = seq_length + elif megatron_lm_plugin.decoder_seq_length is not None: + megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length + elif batch_data is not None: + megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] + else: + megatron_lm_plugin.seq_length = max_position_embeddings + + megatron_lm_plugin.megatron_lm_default_args["return_logits"] = megatron_lm_plugin.return_logits + megatron_lm_plugin.megatron_lm_default_args["tokenizer_type"] = "Llama2Tokenizer" + megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name + megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers + megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag + megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size + megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads + megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size + megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings + megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length + megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict + + +@dataclass +class BnbQuantizationConfig: + """ + A plugin to enable BitsAndBytes 4bit and 8bit quantization + + Args: + load_in_8bit (`bool`, defaults to `False`): + Enable 8bit quantization. + llm_int8_threshold (`float`, defaults to `6.0`): + Value of the outliner threshold. Only relevant when `load_in_8bit=True`. + load_in_4_bit (`bool`, defaults to `False`): + Enable 4bit quantization. + bnb_4bit_quant_type (`str`, defaults to `fp4`): + Set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}. + bnb_4bit_use_double_quant (`bool`, defaults to `False`): + Enable nested quantization where the quantization constants from the first quantization are quantized + again. + bnb_4bit_compute_dtype (`bool`, defaults to `fp16`): + This sets the computational type which might be different than the input time. For example, inputs might be + fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}. + torch_dtype (`torch.dtype`, defaults to `None`): + This sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value + to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model. + skip_modules (`List[str]`, defaults to `None`): + An explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`. + keep_in_fp32_modules (`List`, defaults to `None`): + An explicit list of the modules that we don't quantize. We keep them in `torch.float32`. + """ + + load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."}) + + llm_int8_threshold: float = field( + default=6.0, + metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}, + ) + + load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."}) + + bnb_4bit_quant_type: str = field( + default="fp4", + metadata={ + "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','nf4'}." + }, + ) + + bnb_4bit_use_double_quant: bool = field( + default=False, + metadata={ + "help": "enable nested quantization where the quantization constants from the first quantization are quantized again." + }, + ) + + bnb_4bit_compute_dtype: str = field( + default="fp16", + metadata={ + "help": "This sets the computational type which might be different than the input time. For example, inputs might be " + "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}." + }, + ) + + torch_dtype: torch.dtype = field( + default=None, + metadata={ + "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value" + "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model " + }, + ) + + skip_modules: list[str] = field( + default=None, + metadata={ + "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`." + }, + ) + + keep_in_fp32_modules: list[str] = field( + default=None, + metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."}, + ) + + def __post_init__(self): + """ + Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. + """ + if not isinstance(self.load_in_8bit, bool): + raise ValueError("load_in_8bit must be a boolean") + + if not isinstance(self.load_in_4bit, bool): + raise ValueError("load_in_4bit must be a boolean") + + if self.load_in_4bit and self.load_in_8bit: + raise ValueError("load_in_4bit and load_in_8bit can't be both True") + + if not self.load_in_4bit and not self.load_in_8bit: + raise ValueError("load_in_4bit and load_in_8bit can't be both False") + + if not isinstance(self.llm_int8_threshold, (int, float)): + raise ValueError("llm_int8_threshold must be a float or an int") + + if not isinstance(self.bnb_4bit_quant_type, str): + raise ValueError("bnb_4bit_quant_type must be a string") + elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]: + raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}") + + if not isinstance(self.bnb_4bit_use_double_quant, bool): + raise ValueError("bnb_4bit_use_double_quant must be a boolean") + + if isinstance(self.bnb_4bit_compute_dtype, str): + if self.bnb_4bit_compute_dtype == "fp32": + self.bnb_4bit_compute_dtype = torch.float32 + elif self.bnb_4bit_compute_dtype == "fp16": + self.bnb_4bit_compute_dtype = torch.float16 + elif self.bnb_4bit_compute_dtype == "bf16": + self.bnb_4bit_compute_dtype = torch.bfloat16 + else: + raise ValueError( + f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}" + ) + elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): + raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") + + if self.skip_modules is not None and not isinstance(self.skip_modules, list): + raise ValueError("skip_modules must be a list of strings") + + if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list): + raise ValueError("keep_in_fp_32_modules must be a list of strings") + + if self.load_in_4bit: + self.target_dtype = CustomDtype.INT4 + + if self.load_in_8bit: + self.target_dtype = torch.int8 + + if self.load_in_4bit and self.llm_int8_threshold != 6.0: + warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit") + + if isinstance(self.torch_dtype, str): + if self.torch_dtype == "fp32": + self.torch_dtype = torch.float32 + elif self.torch_dtype == "fp16": + self.torch_dtype = torch.float16 + elif self.torch_dtype == "bf16": + self.torch_dtype = torch.bfloat16 + else: + raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}") + if self.load_in_8bit and self.torch_dtype is None: + self.torch_dtype = torch.float16 + + if self.load_in_4bit and self.torch_dtype is None: + self.torch_dtype = self.bnb_4bit_compute_dtype + + if not isinstance(self.torch_dtype, torch.dtype): + raise ValueError("torch_dtype must be a torch.dtype") + + +def get_module_class_from_name(module, name): + """ + Gets a class from a module by its name. + + Args: + module (`torch.nn.Module`): The module to get the class from. + name (`str`): The name of the class. + """ + modules_children = list(module.children()) + if module.__class__.__name__ == name: + return module.__class__ + elif len(modules_children) == 0: + return + else: + for child_module in modules_children: + module_class = get_module_class_from_name(child_module, name) + if module_class is not None: + return module_class diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/deepspeed.py b/venv/lib/python3.10/site-packages/accelerate/utils/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..22db891c63d9bd48691acd87a15a206c270017a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/deepspeed.py @@ -0,0 +1,385 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import os +from copy import deepcopy + +from torch import optim + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler +from .dataclasses import DistributedType +from .imports import is_bnb_available +from .versions import compare_versions + + +def map_pytorch_optim_to_deepspeed(optimizer): + """ + Args: + optimizer: torch.optim.Optimizer + + Returns the DeepSeedCPUOptimizer (deepspeed.ops) version of the optimizer. + """ + + defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]} + + # Select the DeepSpeedCPUOptimizer based on the original optimizer class. + # DeepSpeedCPUAdam is the default + from deepspeed.ops.adam import DeepSpeedCPUAdam + + optimizer_class = DeepSpeedCPUAdam + + # For DeepSpeedCPUAdam (adamw_mode) + if compare_versions("deepspeed", ">=", "0.3.1"): + defaults["adamw_mode"] = False + is_adaw = isinstance(optimizer, optim.AdamW) + + if is_bnb_available() and not is_adaw: + import bitsandbytes.optim as bnb_opt + + if isinstance(optimizer, (bnb_opt.AdamW, bnb_opt.AdamW32bit)): + try: + is_adaw = optimizer.optim_bits == 32 + except AttributeError: + is_adaw = optimizer.args.optim_bits == 32 + else: + is_adaw = False + + if is_adaw: + defaults["adamw_mode"] = True + + # For DeepSpeedCPUAdagrad + if compare_versions("deepspeed", ">=", "0.5.5"): + # Check if the optimizer is PyTorch's Adagrad. + is_ada = isinstance(optimizer, optim.Adagrad) + # If not, and bitsandbytes is available, + # # check if the optimizer is the 32-bit bitsandbytes Adagrad. + if is_bnb_available() and not is_ada: + import bitsandbytes.optim as bnb_opt + + if isinstance(optimizer, (bnb_opt.Adagrad, bnb_opt.Adagrad32bit)): + try: + is_ada = optimizer.optim_bits == 32 + except AttributeError: + is_ada = optimizer.args.optim_bits == 32 + if is_ada: + from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad + + optimizer_class = DeepSpeedCPUAdagrad + + # For DeepSpeedCPULion + if is_bnb_available(min_version="0.38.0") and compare_versions("deepspeed", ">=", "0.11.0"): + from bitsandbytes.optim import Lion, Lion32bit + + if isinstance(optimizer, (Lion, Lion32bit)): + try: + is_bnb_32bits = optimizer.optim_bits == 32 + except AttributeError: + is_bnb_32bits = optimizer.args.optim_bits == 32 + if is_bnb_32bits: + from deepspeed.ops.lion import DeepSpeedCPULion + + optimizer_class = DeepSpeedCPULion + + return optimizer_class(optimizer.param_groups, **defaults) + + +def get_active_deepspeed_plugin(state): + """ + Returns the currently active DeepSpeedPlugin. + + Raises: + ValueError: If DeepSpeed was not enabled and this function is called. + """ + if state.distributed_type != DistributedType.DEEPSPEED: + raise ValueError( + "Couldn't retrieve the active `DeepSpeedPlugin` as none were enabled. " + "Please make sure that either `Accelerator` is configured for `deepspeed` " + "or make sure that the desired `DeepSpeedPlugin` has been enabled (`AcceleratorState().select_deepspeed_plugin(name)`) " + "before calling this function." + ) + if not isinstance(state.deepspeed_plugins, dict): + return state.deepspeed_plugins + return next(plugin for plugin in state.deepspeed_plugins.values() if plugin.selected) + + +class HfDeepSpeedConfig: + """ + This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. + + A `weakref` of this object is stored in the module's globals to be able to access the config from areas where + things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore + it's important that this object remains alive while the program is still running. + + [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration + with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic + the DeepSpeed configuration is not modified in any way. + + Args: + config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. + + """ + + def __init__(self, config_file_or_dict): + if isinstance(config_file_or_dict, dict): + # Don't modify user's data should they want to reuse it (e.g. in tests), because once we + # modified it, it will not be accepted here again, since `auto` values would have been overridden + config = deepcopy(config_file_or_dict) + elif os.path.exists(config_file_or_dict): + with open(config_file_or_dict, encoding="utf-8") as f: + config = json.load(f) + else: + try: + try: + # First try parsing as JSON directly + config = json.loads(config_file_or_dict) + except json.JSONDecodeError: + # If that fails, try base64 decoding + config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8") + config = json.loads(config_decoded) + except (UnicodeDecodeError, AttributeError, ValueError): + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" + ) + + self.config = config + + self.set_stage_and_offload() + + def set_stage_and_offload(self): + # zero stage - this is done as early as possible, before model is created, to allow + # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object + # during ``zero.Init()`` which needs to know the dtype, and some other hparams. + self._stage = self.get_value("zero_optimization.stage", -1) + + # offload + self._offload = False + if self.is_zero2() or self.is_zero3(): + offload_devices_valid = set(["cpu", "nvme"]) + offload_devices = set( + [ + self.get_value("zero_optimization.offload_optimizer.device"), + self.get_value("zero_optimization.offload_param.device"), + ] + ) + if len(offload_devices & offload_devices_valid) > 0: + self._offload = True + + def find_config_node(self, ds_key_long): + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + ds_key = nodes.pop() + for node in nodes: + config = config.get(node) + if config is None: + return None, ds_key + + return config, ds_key + + def get_value(self, ds_key_long, default=None): + """ + Returns the set value or `default` if no value is set + """ + config, ds_key = self.find_config_node(ds_key_long) + if config is None: + return default + return config.get(ds_key, default) + + def del_config_sub_tree(self, ds_key_long, must_exist=False): + """ + Deletes a sub-section of the config file if it's found. + + Unless `must_exist` is `True` the section doesn't have to exist. + """ + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + for node in nodes: + parent_config = config + config = config.get(node) + if config is None: + if must_exist: + raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") + else: + return + + # if found remove it + if parent_config is not None: + parent_config.pop(node) + + def is_true(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). + + """ + value = self.get_value(ds_key_long) + return False if value is None else bool(value) + + def is_false(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). + """ + value = self.get_value(ds_key_long) + return False if value is None else not bool(value) + + def is_zero2(self): + return self._stage == 2 + + def is_zero3(self): + return self._stage == 3 + + def is_offload(self): + return self._offload + + +class DeepSpeedEngineWrapper: + """ + Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop. + + Args: + engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap + """ + + def __init__(self, engine): + self.engine = engine + + def backward(self, loss, sync_gradients=True, **kwargs): + # Set gradient accumulation boundary based on Accelerate's sync_gradients state + # This tells DeepSpeed whether this is the final micro-batch before gradient sync + self.engine.set_gradient_accumulation_boundary(is_boundary=sync_gradients) + + # runs backpropagation and handles mixed precision + self.engine.backward(loss, **kwargs) + + # Only perform step and related operations at gradient accumulation boundaries + if sync_gradients: + # Deepspeed's `engine.step` performs the following operations: + # - gradient accumulation check + # - gradient clipping + # - optimizer step + # - zero grad + # - checking overflow + # - lr_scheduler step (only if engine.lr_scheduler is not None) + self.engine.step() + # and this plugin overrides the above calls with no-ops when Accelerate runs under + # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple + # training loop that works transparently under many training regimes. + + def get_global_grad_norm(self): + """Get the global gradient norm from DeepSpeed engine.""" + grad_norm = self.engine.get_global_grad_norm() + # Convert to scalar if it's a tensor + if hasattr(grad_norm, "item"): + return grad_norm.item() + return grad_norm + + +class DeepSpeedOptimizerWrapper(AcceleratedOptimizer): + """ + Internal wrapper around a deepspeed optimizer. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + """ + + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + self.__has_overflow__ = hasattr(self.optimizer, "overflow") + + def zero_grad(self, set_to_none=None): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + if self.__has_overflow__: + return self.optimizer.overflow + return False + + +class DeepSpeedSchedulerWrapper(AcceleratedScheduler): + """ + Internal wrapper around a deepspeed scheduler. + + Args: + scheduler (`torch.optim.lr_scheduler.LambdaLR`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + """ + + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + +class DummyOptim: + """ + Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training + loop when optimizer config is specified in the deepspeed config file. + + Args: + lr (float): + Learning rate. + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + weight_decay (float): + Weight decay. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, params, lr=0.001, weight_decay=0, **kwargs): + self.params = params + self.lr = lr + self.weight_decay = weight_decay + self.kwargs = kwargs + + +class DummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int, *optional*): + Total number of steps. + warmup_num_steps (int, *optional*): + Number of steps for warmup. + lr_scheduler_callable (callable, *optional*): + A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.lr_scheduler_callable = lr_scheduler_callable + self.kwargs = kwargs diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/environment.py b/venv/lib/python3.10/site-packages/accelerate/utils/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..c913702ef30290ed3d19c08a71213663297f32de --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/environment.py @@ -0,0 +1,421 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import os +import platform +import subprocess +import sys +from contextlib import contextmanager +from dataclasses import dataclass, field +from functools import lru_cache, wraps +from shutil import which +from typing import Optional, Union + +import torch +from packaging.version import parse + + +logger = logging.getLogger(__name__) + + +def convert_dict_to_env_variables(current_env: dict): + """ + Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of + strings as the result. + + Example: + ```python + >>> from accelerate.utils.environment import verify_env + + >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": ">> valid_env_items = verify_env(env) + >>> print(valid_env_items) + ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"] + ``` + """ + forbidden_chars = [";", "\n", "<", ">", " "] + valid_env_items = [] + for key, value in current_env.items(): + if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1: + valid_env_items.append(f"{key}={value}\n") + else: + logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.") + return valid_env_items + + +def str_to_bool(value, to_bool: bool = False) -> Union[int, bool]: + """ + Converts a string representation of truth to `True` (1) or `False` (0). + + True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; + """ + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 if not to_bool else True + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 if not to_bool else False + else: + raise ValueError(f"invalid truth value {value}") + + +def get_int_from_env(env_keys, default): + """Returns the first positive env value found in the `env_keys` list or the default.""" + for e in env_keys: + val = int(os.environ.get(e, -1)) + if val >= 0: + return val + return default + + +def parse_flag_from_env(key, default=False): + """Returns truthy value for `key` from the env if available else the default.""" + value = os.environ.get(key, str(default)) + return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int... + + +def parse_choice_from_env(key, default="no"): + value = os.environ.get(key, str(default)) + return value + + +def are_libraries_initialized(*library_names: str) -> list[str]: + """ + Checks if any of `library_names` are imported in the environment. Will return any names that are. + """ + return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()] + + +def _nvidia_smi(): + """ + Returns the right nvidia-smi command based on the system. + """ + if platform.system() == "Windows": + # If platform is Windows and nvidia-smi can't be found in path + # try from systemd drive with default installation path + command = which("nvidia-smi") + if command is None: + command = f"{os.environ['systemdrive']}\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" + else: + command = "nvidia-smi" + return command + + +def get_gpu_info(): + """ + Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA. + + Largely based on the `gputil` library. + """ + # Returns as list of `n` GPUs and their names + output = subprocess.check_output( + [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True + ) + output = output.strip() + gpus = output.split(os.linesep) + # Get names from output + gpu_count = len(gpus) + gpu_names = [gpu.split(",")[1].strip() for gpu in gpus] + return gpu_names, gpu_count + + +def get_driver_version(): + """ + Returns the driver version + + In the case of multiple GPUs, will return the first. + """ + output = subprocess.check_output( + [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True + ) + output = output.strip() + return output.split(os.linesep)[0] + + +def check_cuda_p2p_ib_support(): + """ + Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after + the 3090. + + Noteably uses `nvidia-smi` instead of torch to not initialize CUDA. + """ + try: + device_names, device_count = get_gpu_info() + # As new consumer GPUs get released, add them to `unsupported_devices`` + unsupported_devices = {"RTX 40"} + if device_count > 1: + if any( + unsupported_device in device_name + for device_name in device_names + for unsupported_device in unsupported_devices + ): + # Check if they have the right driver version + acceptable_driver_version = "550.40.07" + current_driver_version = get_driver_version() + if parse(current_driver_version) < parse(acceptable_driver_version): + return False + return True + except Exception: + pass + return True + + +@lru_cache +def check_cuda_fp8_capability(): + """ + Checks if the current GPU available supports FP8. + + Notably might initialize `torch.cuda` to check. + """ + + try: + # try to get the compute capability from nvidia-smi + output = subprocess.check_output( + [_nvidia_smi(), "--query-gpu=compute_capability", "--format=csv,noheader"], universal_newlines=True + ) + output = output.strip() + # we take the first GPU's compute capability + compute_capability = tuple(map(int, output.split(os.linesep)[0].split("."))) + except Exception: + compute_capability = torch.cuda.get_device_capability() + + return compute_capability >= (8, 9) + + +@dataclass +class CPUInformation: + """ + Stores information about the CPU in a distributed environment. It contains the following attributes: + - rank: The rank of the current process. + - world_size: The total number of processes in the world. + - local_rank: The rank of the current process on the local node. + - local_world_size: The total number of processes on the local node. + """ + + rank: int = field(default=0, metadata={"help": "The rank of the current process."}) + world_size: int = field(default=1, metadata={"help": "The total number of processes in the world."}) + local_rank: int = field(default=0, metadata={"help": "The rank of the current process on the local node."}) + local_world_size: int = field(default=1, metadata={"help": "The total number of processes on the local node."}) + + +def get_cpu_distributed_information() -> CPUInformation: + """ + Returns various information about the environment in relation to CPU distributed training as a `CPUInformation` + dataclass. + """ + information = {} + information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) + information["world_size"] = get_int_from_env( + ["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1 + ) + information["local_rank"] = get_int_from_env( + ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 + ) + information["local_world_size"] = get_int_from_env( + ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], + 1, + ) + return CPUInformation(**information) + + +def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None: + """ + Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the + affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead. + + Args: + local_process_index (int): + The index of the current process on the current server. + verbose (bool, *optional*): + Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True. + """ + if verbose is None: + verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False) + if torch.cuda.is_available(): + from accelerate.utils import is_pynvml_available + + if not is_pynvml_available(): + raise ImportError( + "To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)" + ) + import pynvml as nvml + + # The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py + nvml.nvmlInit() + num_elements = math.ceil(os.cpu_count() / 64) + handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index) + affinity_string = "" + for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements): + # assume nvml returns list of 64 bit ints + affinity_string = f"{j:064b}{affinity_string}" + affinity_list = [int(x) for x in affinity_string] + affinity_list.reverse() # so core 0 is the 0th element + affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0] + os.sched_setaffinity(0, affinity_to_set) + if verbose: + cpu_cores = os.sched_getaffinity(0) + logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}") + + +@lru_cache +def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None: + """ + Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node. + + This result is cached between calls. If you want to override it, please use + `accelerate.utils.environment.override_numa_afifnity`. + + Args: + local_process_index (int): + The index of the current process on the current server. + verbose (bool, *optional*): + Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will + default to True. + """ + override_numa_affinity(local_process_index=local_process_index, verbose=verbose) + + +@contextmanager +def clear_environment(): + """ + A context manager that will temporarily clear environment variables. + + When this context exits, the previous environment variables will be back. + + Example: + + ```python + >>> import os + >>> from accelerate.utils import clear_environment + + >>> os.environ["FOO"] = "bar" + >>> with clear_environment(): + ... print(os.environ) + ... os.environ["FOO"] = "new_bar" + ... print(os.environ["FOO"]) + {} + new_bar + + >>> print(os.environ["FOO"]) + bar + ``` + """ + _old_os_environ = os.environ.copy() + os.environ.clear() + + try: + yield + finally: + os.environ.clear() # clear any added keys, + os.environ.update(_old_os_environ) # then restore previous environment + + +@contextmanager +def patch_environment(**kwargs): + """ + A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. + + Will convert the values in `kwargs` to strings and upper-case all the keys. + + Example: + + ```python + >>> import os + >>> from accelerate.utils import patch_environment + + >>> with patch_environment(FOO="bar"): + ... print(os.environ["FOO"]) # prints "bar" + >>> print(os.environ["FOO"]) # raises KeyError + ``` + """ + existing_vars = {} + for key, value in kwargs.items(): + key = key.upper() + if key in os.environ: + existing_vars[key] = os.environ[key] + os.environ[key] = str(value) + + try: + yield + finally: + for key in kwargs: + key = key.upper() + if key in existing_vars: + # restore previous value + os.environ[key] = existing_vars[key] + else: + os.environ.pop(key, None) + + +def purge_accelerate_environment(func_or_cls): + """Decorator to clean up accelerate environment variables set by the decorated class or function. + + In some circumstances, calling certain classes or functions can result in accelerate env vars being set and not + being cleaned up afterwards. As an example, when calling: + + TrainingArguments(fp16=True, ...) + + The following env var will be set: + + ACCELERATE_MIXED_PRECISION=fp16 + + This can affect subsequent code, since the env var takes precedence over TrainingArguments(fp16=False). This is + especially relevant for unit testing, where we want to avoid the individual tests to have side effects on one + another. Decorate the unit test function or whole class with this decorator to ensure that after each test, the env + vars are cleaned up. This works for both unittest.TestCase and normal classes (pytest); it also works when + decorating the parent class. + + """ + prefix = "ACCELERATE_" + + @contextmanager + def env_var_context(): + # Store existing accelerate env vars + existing_vars = {k: v for k, v in os.environ.items() if k.startswith(prefix)} + try: + yield + finally: + # Restore original env vars or remove new ones + for key in [k for k in os.environ if k.startswith(prefix)]: + if key in existing_vars: + os.environ[key] = existing_vars[key] + else: + os.environ.pop(key, None) + + def wrap_function(func): + @wraps(func) + def wrapper(*args, **kwargs): + with env_var_context(): + return func(*args, **kwargs) + + wrapper._accelerate_is_purged_environment_wrapped = True + return wrapper + + if not isinstance(func_or_cls, type): + return wrap_function(func_or_cls) + + # Handle classes by wrapping test methods + def wrap_test_methods(test_class_instance): + for name in dir(test_class_instance): + if name.startswith("test"): + method = getattr(test_class_instance, name) + if callable(method) and not hasattr(method, "_accelerate_is_purged_environment_wrapped"): + setattr(test_class_instance, name, wrap_function(method)) + return test_class_instance + + # Handle inheritance + wrap_test_methods(func_or_cls) + func_or_cls.__init_subclass__ = classmethod(lambda cls, **kw: wrap_test_methods(cls)) + return func_or_cls diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py b/venv/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef1ef7372ba9a366e2655ebdce7c2e10c22f3f8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py @@ -0,0 +1,793 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import functools +import os +import shutil +import warnings +from collections import defaultdict +from contextlib import nullcontext +from pathlib import Path +from typing import Callable + +import torch + +from ..logging import get_logger +from .constants import FSDP_MODEL_NAME, OPTIMIZER_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_NAME +from .dataclasses import get_module_class_from_name +from .modeling import get_non_persistent_buffers, is_peft_model +from .other import get_module_children_bottom_up, is_compiled_module, save +from .versions import is_torch_version + + +logger = get_logger(__name__) + + +def enable_fsdp_ram_efficient_loading(): + """ + Enables RAM efficient loading of Hugging Face models for FSDP in the environment. + """ + # Sets values for `transformers.modeling_utils.is_fsdp_enabled` + if "ACCELERATE_USE_FSDP" not in os.environ: + os.environ["ACCELERATE_USE_FSDP"] = "True" + os.environ["FSDP_CPU_RAM_EFFICIENT_LOADING"] = "True" + + +def disable_fsdp_ram_efficient_loading(): + """ + Disables RAM efficient loading of Hugging Face models for FSDP in the environment. + """ + os.environ["FSDP_CPU_RAM_EFFICIENT_LOADING"] = "False" + + +def _get_model_state_dict(model, adapter_only=False, sd_options=None): + if adapter_only and is_peft_model(model): + from peft import get_peft_model_state_dict + + return get_peft_model_state_dict(model, adapter_name=model.active_adapter) + + # Invariant: `sd_options` is not None only for FSDP2 + if sd_options is not None: + from torch.distributed.checkpoint.state_dict import get_model_state_dict + + return get_model_state_dict(model, options=sd_options) + else: + return model.state_dict() + + +def _set_model_state_dict(model, state_dict, adapter_only=False, sd_options=None): + if adapter_only and is_peft_model(model): + from peft import set_peft_model_state_dict + + return set_peft_model_state_dict(model, state_dict, adapter_name=model.active_adapter) + + # Invariant: `sd_options` is not None only for FSDP2 + if sd_options is not None: + from torch.distributed.checkpoint.state_dict import set_model_state_dict + + return set_model_state_dict(model, state_dict, options=sd_options) + else: + return model.load_state_dict(state_dict) + + +def _prepare_sd_options(fsdp_plugin): + sd_options = None + + # we use this only for FSDP2, as it requires torch >= 2.6.0 and this api requires torch >= 2.2.0 + if fsdp_plugin.fsdp_version == 2: + from torch.distributed.checkpoint.state_dict import StateDictOptions + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + sd_options = StateDictOptions( + full_state_dict=fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT, + cpu_offload=getattr(fsdp_plugin.state_dict_config, "offload_to_cpu", False), + broadcast_from_rank0=getattr(fsdp_plugin.state_dict_config, "rank0_only", False), + ) + + return sd_options + + +def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0, adapter_only=False): + # Note: We import here to reduce import time from general modules, and isolate outside dependencies + import torch.distributed.checkpoint as dist_cp + from torch.distributed.checkpoint.default_planner import DefaultSavePlanner + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + os.makedirs(output_dir, exist_ok=True) + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT + # so, only enable it when num_processes>1 + is_multi_process = accelerator.num_processes > 1 + fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process + fsdp_plugin.state_dict_config.rank0_only = is_multi_process + + ctx = ( + FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ) + if fsdp_plugin.fsdp_version == 1 + else nullcontext() + ) + sd_options = _prepare_sd_options(fsdp_plugin) + + with ctx: + state_dict = _get_model_state_dict(model, adapter_only=adapter_only, sd_options=sd_options) + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" + output_model_file = os.path.join(output_dir, weights_name) + if accelerator.process_index == 0: + logger.info(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + logger.info(f"Model saved to {output_model_file}") + # Invariant: `LOCAL_STATE_DICT` is never possible with `FSDP2` + elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: + weights_name = ( + f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + output_model_file = os.path.join(output_dir, weights_name) + logger.info(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + logger.info(f"Model saved to {output_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: + ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}") + os.makedirs(ckpt_dir, exist_ok=True) + logger.info(f"Saving model to {ckpt_dir}") + state_dict = {"model": state_dict} + + dist_cp.save( + state_dict=state_dict, + storage_writer=dist_cp.FileSystemWriter(ckpt_dir), + planner=DefaultSavePlanner(), + ) + logger.info(f"Model saved to {ckpt_dir}") + + +def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, adapter_only=False): + # Note: We import here to reduce import time from general modules, and isolate outside dependencies + import torch.distributed.checkpoint as dist_cp + from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + accelerator.wait_for_everyone() + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT + # so, only enable it when num_processes>1 + is_multi_process = accelerator.num_processes > 1 + fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process + fsdp_plugin.state_dict_config.rank0_only = is_multi_process + + ctx = ( + FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ) + if fsdp_plugin.fsdp_version == 1 + else nullcontext() + ) + sd_options = _prepare_sd_options(fsdp_plugin) + with ctx: + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + if type(model) is not FSDP and accelerator.process_index != 0 and not accelerator.is_fsdp2: + if not fsdp_plugin.sync_module_states and fsdp_plugin.fsdp_version == 1: + raise ValueError( + "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " + "initializing FSDP object" + ) + return + weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" + input_model_file = os.path.join(input_dir, weights_name) + logger.info(f"Loading model from {input_model_file}") + # we want an empty state dict for FSDP2 as we use `broadcast_from_rank0` + load_model = not accelerator.is_fsdp2 or accelerator.is_main_process + if load_model: + state_dict = torch.load(input_model_file, weights_only=True) + else: + state_dict = {} + logger.info(f"Model loaded from {input_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: + weights_name = ( + f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + input_model_file = os.path.join(input_dir, weights_name) + logger.info(f"Loading model from {input_model_file}") + state_dict = torch.load(input_model_file, weights_only=True) + logger.info(f"Model loaded from {input_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: + ckpt_dir = ( + os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}") + if f"{FSDP_MODEL_NAME}" not in input_dir + else input_dir + ) + logger.info(f"Loading model from {ckpt_dir}") + state_dict = {"model": _get_model_state_dict(model, adapter_only=adapter_only, sd_options=sd_options)} + dist_cp.load( + state_dict=state_dict, + storage_reader=dist_cp.FileSystemReader(ckpt_dir), + planner=DefaultLoadPlanner(), + ) + state_dict = state_dict["model"] + logger.info(f"Model loaded from {ckpt_dir}") + + load_result = _set_model_state_dict(model, state_dict, adapter_only=adapter_only, sd_options=sd_options) + return load_result + + +def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0): + # Note: We import here to reduce import time from general modules, and isolate outside dependencies + import torch.distributed.checkpoint as dist_cp + from torch.distributed.checkpoint.default_planner import DefaultSavePlanner + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + os.makedirs(output_dir, exist_ok=True) + + ctx = ( + FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ) + if fsdp_plugin.fsdp_version == 1 + else nullcontext() + ) + + sd_options = _prepare_sd_options(fsdp_plugin) + + with ctx: + if fsdp_plugin.fsdp_version == 2: + from torch.distributed.checkpoint.state_dict import get_optimizer_state_dict + + optim_state = get_optimizer_state_dict(model, optimizer, options=sd_options) + else: + optim_state = FSDP.optim_state_dict(model, optimizer) + + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + if accelerator.process_index == 0: + optim_state_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + output_optimizer_file = os.path.join(output_dir, optim_state_name) + logger.info(f"Saving Optimizer state to {output_optimizer_file}") + torch.save(optim_state, output_optimizer_file) + logger.info(f"Optimizer state saved in {output_optimizer_file}") + else: + ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") + os.makedirs(ckpt_dir, exist_ok=True) + logger.info(f"Saving Optimizer state to {ckpt_dir}") + dist_cp.save( + state_dict={"optimizer": optim_state}, + storage_writer=dist_cp.FileSystemWriter(ckpt_dir), + planner=DefaultSavePlanner(), + ) + logger.info(f"Optimizer state saved in {ckpt_dir}") + + +def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0, adapter_only=False): + # Note: We import here to reduce import time from general modules, and isolate outside dependencies + import torch.distributed.checkpoint as dist_cp + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + accelerator.wait_for_everyone() + ctx = ( + FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ) + if fsdp_plugin.fsdp_version == 1 + else nullcontext() + ) + sd_options = _prepare_sd_options(fsdp_plugin) + with ctx: + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + optim_state = None + if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: + optimizer_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + input_optimizer_file = os.path.join(input_dir, optimizer_name) + logger.info(f"Loading Optimizer state from {input_optimizer_file}") + optim_state = torch.load(input_optimizer_file, weights_only=True) + logger.info(f"Optimizer state loaded from {input_optimizer_file}") + else: + ckpt_dir = ( + os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") + if f"{OPTIMIZER_NAME}" not in input_dir + else input_dir + ) + logger.info(f"Loading Optimizer from {ckpt_dir}") + optim_state = {"optimizer": optimizer.state_dict()} + dist_cp.load( + optim_state, + checkpoint_id=ckpt_dir, + storage_reader=dist_cp.FileSystemReader(ckpt_dir), + ) + optim_state = optim_state["optimizer"] + logger.info(f"Optimizer loaded from {ckpt_dir}") + + if fsdp_plugin.fsdp_version == 1: + flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state) + optimizer.load_state_dict(flattened_osd) + else: + from torch.distributed.checkpoint.state_dict import set_optimizer_state_dict + + set_optimizer_state_dict(model, optimizer, optim_state, options=sd_options) + + +def _distributed_checkpoint_to_merged_weights(checkpoint_dir: str, save_path: str, safe_serialization: bool = True): + """ + Passthrough to `torch.distributed.checkpoint.format_utils.dcp_to_torch_save` + + Will save under `save_path` as either `model.safetensors` or `pytorch_model.bin`. + """ + # Note: We import here to reduce import time from general modules, and isolate outside dependencies + import torch.distributed.checkpoint as dist_cp + import torch.distributed.checkpoint.format_utils as dist_cp_format_utils + + state_dict = {} + save_path = Path(save_path) + save_path.mkdir(exist_ok=True) + dist_cp_format_utils._load_state_dict( + state_dict, + storage_reader=dist_cp.FileSystemReader(checkpoint_dir), + planner=dist_cp_format_utils._EmptyStateDictLoadPlanner(), + no_dist=True, + ) + save_path = save_path / SAFE_WEIGHTS_NAME if safe_serialization else save_path / WEIGHTS_NAME + + # To handle if state is a dict like {model: {...}} + if len(state_dict.keys()) == 1: + state_dict = state_dict[list(state_dict)[0]] + save(state_dict, save_path, safe_serialization=safe_serialization) + return save_path + + +def merge_fsdp_weights( + checkpoint_dir: str, output_path: str, safe_serialization: bool = True, remove_checkpoint_dir: bool = False +): + """ + Merge the weights from sharded FSDP model checkpoints into a single combined checkpoint. Should be used if + `SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}/model.safetensors` if + `safe_serialization` else `pytorch_model.bin`. + + Note: this is a CPU-bound process. + + Args: + checkpoint_dir (`str`): + The directory containing the FSDP checkpoints (can be either the model or optimizer). + output_path (`str`): + The path to save the merged checkpoint. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the merged weights with safetensors (recommended). + remove_checkpoint_dir (`bool`, *optional*, defaults to `False`): + Whether to remove the checkpoint directory after merging. + """ + checkpoint_dir = Path(checkpoint_dir) + from accelerate.state import PartialState + + if not is_torch_version(">=", "2.3.0"): + raise ValueError("`merge_fsdp_weights` requires PyTorch >= 2.3.0`") + + # Verify that the checkpoint directory exists + if not checkpoint_dir.exists(): + model_path_exists = (checkpoint_dir / "pytorch_model_fsdp_0").exists() + optimizer_path_exists = (checkpoint_dir / "optimizer_0").exists() + err = f"Tried to load from {checkpoint_dir} but couldn't find a valid metadata file." + if model_path_exists and optimizer_path_exists: + err += " However, potential model and optimizer checkpoint directories exist." + err += f"Please pass in either {checkpoint_dir}/pytorch_model_fsdp_0 or {checkpoint_dir}/optimizer_0" + err += "instead." + elif model_path_exists: + err += " However, a potential model checkpoint directory exists." + err += f"Please try passing in {checkpoint_dir}/pytorch_model_fsdp_0 instead." + elif optimizer_path_exists: + err += " However, a potential optimizer checkpoint directory exists." + err += f"Please try passing in {checkpoint_dir}/optimizer_0 instead." + raise ValueError(err) + + # To setup `save` to work + state = PartialState() + if state.is_main_process: + logger.info(f"Merging FSDP weights from {checkpoint_dir}") + save_path = _distributed_checkpoint_to_merged_weights(checkpoint_dir, output_path, safe_serialization) + logger.info(f"Successfully merged FSDP weights and saved to {save_path}") + if remove_checkpoint_dir: + logger.info(f"Removing old checkpoint directory {checkpoint_dir}") + shutil.rmtree(checkpoint_dir) + state.wait_for_everyone() + + +def ensure_weights_retied(param_init_fn, model: torch.nn.Module, device: torch.device): + _tied_names = getattr(model, "_tied_weights_keys", None) + if not _tied_names: + # if no tied names just passthrough + return param_init_fn + + # get map of parameter instances to params. + # - needed for replacement later + _tied_params = {} + for name in _tied_names: + name = name.split(".") + name, param_name = ".".join(name[:-1]), name[-1] + mod = model.get_submodule(name) + param = getattr(mod, param_name) + + _tied_params[id(param)] = None # placeholder for the param first + + # build param_init_fn for the case with tied params + def param_init_fn_tied_param(module: torch.nn.Module): + # track which params to tie + # - usually only 1, but for completeness consider > 1 + params_to_tie = defaultdict(list) + for n, param in module.named_parameters(recurse=False): + if id(param) in _tied_params: + params_to_tie[id(param)].append(n) + + # call the param init fn, which potentially re-allocates the + # parameters + module = param_init_fn(module) + + # search the parameters again and tie them up again + for id_key, _param_names in params_to_tie.items(): + for param_name in _param_names: + param = _tied_params[id_key] + if param is None: + # everything will be tied to the first time the + # param is observed + _tied_params[id_key] = getattr(module, param_name) + else: + setattr(module, param_name, param) # tie + + return module + + return param_init_fn_tied_param + + +def fsdp2_load_full_state_dict(accelerator, model: torch.nn.Module, full_sd: dict): + """ + Loads the full state dict (could be only on rank 0) into the sharded model. This is done by broadcasting the + parameters from rank 0 to all other ranks. This function modifies the model in-place. + + Args: + accelerator (`Accelerator`): The accelerator instance + model (`torch.nn.Module`): + The model to load the state dict into, expected to be on meta device or a VRAM spike can occur + full_sd (`dict`): The full state dict to load, can only be on rank 0 + """ + import torch.distributed as dist + from torch.distributed.tensor import distribute_tensor + + # Model was previously copied to meta device + meta_sharded_sd = model.state_dict() + sharded_sd = {} + + # Rank 0 distributes the full state dict to other ranks + def _infer_parameter_dtype(model, param_name, empty_param): + try: + old_param = model.get_parameter_or_buffer(param_name) + except AttributeError: + # Need this for LORA, as there some params are not *parameters* of sorts + base_param_name, local_param_name = param_name.rsplit(".", 1) + submodule = model.get_submodule(base_param_name) + old_param = getattr(submodule, local_param_name) + + is_torch_e4m3fn_available = hasattr(torch, "float8_e4m3fn") + casting_dtype = None + is_param_float8_e4m3fn = is_torch_e4m3fn_available and empty_param.dtype == torch.float8_e4m3fn + + if empty_param.dtype.is_floating_point and not is_param_float8_e4m3fn: + casting_dtype = old_param.dtype + + return old_param is not None and old_param.is_contiguous(), casting_dtype + + def _cast_and_contiguous(tensor, to_contiguous, dtype): + if dtype is not None: + tensor = tensor.to(dtype=dtype) + if to_contiguous: + tensor = tensor.contiguous() + return tensor + + if accelerator.is_main_process: + for (param_name, full_param), sharded_param in zip(full_sd.items(), meta_sharded_sd.values()): + device_mesh = sharded_param.device_mesh + full_param = full_param.detach().to(device_mesh.device_type) + dist.broadcast(full_param, src=0, group=device_mesh.get_group()) + sharded_tensor = distribute_tensor(full_param, device_mesh, sharded_param.placements) + to_contiguous, casting_dtype = _infer_parameter_dtype( + model, + param_name, + full_param, + ) + sharded_tensor = _cast_and_contiguous(sharded_tensor, to_contiguous, casting_dtype) + sharded_sd[param_name] = sharded_tensor + # We need this else to have a matching `broadcast` for all of the ranks, else we deadlock + else: + for param_name, sharded_param in meta_sharded_sd.items(): + device_mesh = sharded_param.device_mesh + full_tensor = torch.empty(sharded_param.size(), device=device_mesh.device_type, dtype=sharded_param.dtype) + dist.broadcast(full_tensor, src=0, group=device_mesh.get_group()) + sharded_tensor = distribute_tensor(full_tensor, device_mesh, sharded_param.placements) + to_contiguous, casting_dtype = _infer_parameter_dtype( + model, + param_name, + full_tensor, + ) + sharded_tensor = _cast_and_contiguous(sharded_tensor, to_contiguous, casting_dtype) + sharded_sd[param_name] = sharded_tensor + + # we set `assign=True` because our params are on meta device + model.load_state_dict(sharded_sd, assign=True) + return model + + +def fsdp2_switch_optimizer_parameters(optimizer: torch.optim.Optimizer, mapping: dict): + """ + Switches the parameters of the optimizer to new ones (sharded parameters in usual case). This function modifies the + optimizer in-place. + + Args: + optimizer (`torch.optim.Optimizer`): Optimizer instance which contains the original model parameters + mapping (`dict`): Mapping from the original parameter (specified by `data_ptr`) to the sharded parameter + + Raises: + KeyError: + If a parameter in the optimizer couldn't be switched to its sharded version. This should never happen and + indicates a bug. If we kept the original params instead of raising, the training wouldn't be numerically + correct and weights wouldn't get updated. + """ + from torch.distributed.tensor import DTensor + + accessor_mapping = {} + + accessor_mapping[DTensor] = "_local_tensor" + try: + for param_group in optimizer.param_groups: + param_group["params"] = [mapping[p.data_ptr] for p in param_group["params"]] + except KeyError: + # This shouldn't ever happen, but we want to fail here else training wouldn't be numerically correct + # This basically means that we're missing a mapping from the original parameter to the sharded parameter + raise KeyError( + "A parameter in the optimizer couldn't be switched to its sharded version. This breaks the training. Please raise an issue on GitHub." + ) + + +def fsdp2_apply_ac(accelerator, model: torch.nn.Module): + """ + Applies the activation checkpointing to the model. + + Args: + accelerator (`Accelerator`): The accelerator instance + model (`torch.nn.Module`): The model to apply the activation checkpointing to + + Returns: + `torch.nn.Module`: The model with the activation checkpointing applied + """ + + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + checkpoint_wrapper, + ) + + auto_wrap_policy_func = fsdp2_prepare_auto_wrap_policy(accelerator.state.fsdp_plugin, model) + + for layer_name, layer in get_module_children_bottom_up(model, return_fqns=True)[:-1]: + if len(layer_name.split(".")) > 1: + parent_name, child_name = layer_name.rsplit(".", 1) + else: + parent_name = None + child_name = layer_name + + parent_module = model.get_submodule(parent_name) if parent_name else model + if auto_wrap_policy_func(parent_module): + layer = checkpoint_wrapper(layer, preserve_rng_state=False) + parent_module.register_module(child_name, layer) + + return model + + +def fsdp2_prepare_model(accelerator, model: torch.nn.Module) -> torch.nn.Module: + """Prepares the model for FSDP2 in-place. Also returns the model to avoid misuse of the original model. + + Args: + accelerator (`Accelerator`): The accelerator instance + model (`torch.nn.Module`): The model to prepare + + Returns: + `torch.nn.Module`: Prepared model + """ + from torch.distributed.fsdp import FSDPModule, MixedPrecisionPolicy, fully_shard + + is_type_fsdp = isinstance(model, FSDPModule) or ( + is_compiled_module(model) and isinstance(model._orig_mod, FSDPModule) + ) + if is_type_fsdp: + return model + + fsdp2_plugin = accelerator.state.fsdp_plugin + + fsdp2_plugin.set_auto_wrap_policy(model) + + original_sd = model.state_dict() + mesh = getattr(accelerator, "torch_device_mesh", None) + + fsdp2_kwargs = { + "reshard_after_forward": fsdp2_plugin.reshard_after_forward, + "offload_policy": fsdp2_plugin.cpu_offload, + # `fully_shard` doesn't accept `None` in case of `MixedPrecisionPolicy` + "mp_policy": fsdp2_plugin.mixed_precision_policy or MixedPrecisionPolicy(), + "mesh": mesh[tuple(accelerator.parallelism_config.fsdp_dim_names)] if mesh is not None else None, + } + + model_has_params4bit = False + for name, param in model.named_parameters(): + # this is a temporary fix whereby loading models with bnb params cannot be moved from + # GPU to a meta device due with FSDP2 because torch operations don't return the original class type + # bypassing the move to meta will still cause the VRAM spike, but at least it still will load + if param.__class__.__name__ == "Params4bit": + model_has_params4bit = True + break + + if fsdp2_plugin.cpu_ram_efficient_loading and not model_has_params4bit: + # Context: `fully_shard` moves the model to GPU if it was on CPU, however it can also be on `meta` and then it stays there even after `fully_shard` + # For this reason, we need to move the model to `meta` device, as then sharding happens on `meta` device + # If we kept the model on CPU (`cpu_ram_efficient_loading` has model be on CPU on all ranks, though non-main ranks only have `torch.emtpy`), `fully_shard` would move it to GPU + # Afterwards, when we call `fsdp2_load_full_state_dict`, us creating the state_dict would result into briefly having two copies of model state_dict on the GPU -> VRAM spike + + # We need to keep the original non-persistent buffers, as those MAY not be in the state_dict, resulting in them staying on meta device + # Also, these buffers aren't getting sharded by default + # We get the FQNs of all non-persistent buffers, to re-register them after + non_persistent_buffer_fqns = get_non_persistent_buffers(model, recurse=True, fqns=True) + original_non_persistent_buffers = copy.deepcopy( + {k: v for k, v in model.named_buffers() if k in non_persistent_buffer_fqns} + ) + # We move the model to meta device, as then sharding happens on meta device + model = model.to(torch.device("meta")) + # We need to re-tie the weights, not exactly sure why, but if we don't do this, reference to `lm_head/embed_tokens` stay hanging -> more VRAM usage + # We assume `transformers` models have a `tie_weights` method if they support it + if hasattr(model, "tie_weights"): + model.tie_weights() + + auto_wrap_policy_func = fsdp2_prepare_auto_wrap_policy(fsdp2_plugin, model) + if auto_wrap_policy_func is not None: + # We skip the model itself, as that one is always wrapped + for module in get_module_children_bottom_up(model)[:-1]: + if auto_wrap_policy_func(module) and not isinstance(module, FSDPModule): + fully_shard(module, **fsdp2_kwargs) + + if not isinstance(model, FSDPModule): + fully_shard(model, **fsdp2_kwargs) + + if fsdp2_plugin.cpu_ram_efficient_loading: + # If `cpu_ram_efficient_loading` is enabled, only rank 0 loads the weights + # Other ranks have an empty model on `meta` device, so we need to distribute the weights properly + fsdp2_load_full_state_dict(accelerator, model, original_sd) + + if fsdp2_plugin.cpu_ram_efficient_loading and not model_has_params4bit: + # We re-register the buffers, as they may not be in the state_dict + for fqn, buffer_tensor in original_non_persistent_buffers.items(): + buffer_tensor = buffer_tensor.to(accelerator.device) + + if "." in fqn: + parent_fqn, local_buffer_name = fqn.rsplit(".", 1) + parent_module = model.get_submodule(parent_fqn) + else: + local_buffer_name = fqn + parent_module = model + + parent_module.register_buffer(local_buffer_name, buffer_tensor, persistent=False) + + # We need to tie the weights again, as call to `load_full_state_dict` breaks the tie + # Needs to be called both here and above + # removing this call makes the have slightly different loss + # removing the call above leads to extra memory usage as explained in the comment above + if hasattr(model, "tie_weights"): + model.tie_weights() + + # There is no `dtype` attribution for nn.Module + # Set it to None if it doesn't exist and do the upcast always + model_dtype = getattr(model, "dtype", None) + if accelerator.mixed_precision != "no" and (model_dtype is None or model_dtype != torch.float32): + # We upcast the model according to `deepspeed`'s implementation + # More info about this can be found in `accelerator.py:prepare_model`s FSDP1 section + model = model.to(torch.float32) + if accelerator.is_main_process: + # TODO(siro1): Add a warning for each parameter that was upcasted + warnings.warn( + "FSDP upcast of low precision parameters to fp32 (since mixed_precision != 'no') may affect the precision of model checkpoints." + ) + return model + + +def fsdp2_prepare_auto_wrap_policy(fsdp2_plugin, model: torch.nn.Module) -> Callable[[torch.nn.Module], bool]: + """Prepares the auto wrap policy based on its type, done to mimic the behaviour of FSDP1 auto wrap policy. + + Args: + fsdp2_plugin (`FullyShardedDataParallelPlugin`): + Instance of `FullyShardedDataParallelPlugin` containing the configuration options + auto_wrap_policy_type (`str`): + Either `transformer` or `size` + model (`torch.nn.Module`): + The model to wrap + + Returns: + `Callable[[torch.nn.Module], bool]`: + The auto wrap policy function to be applied to the model + """ + from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy + + fn = fsdp2_plugin.auto_wrap_policy + + if isinstance(fn, functools.partial): + fn = fn.func + + if fn is transformer_auto_wrap_policy: + no_split_modules = getattr(model, "_no_split_modules", None) + if no_split_modules is None: + no_split_modules = [] + transformer_cls_names_to_wrap = list(no_split_modules) + if fsdp2_plugin.transformer_cls_names_to_wrap is not None: + transformer_cls_names_to_wrap = fsdp2_plugin.transformer_cls_names_to_wrap + transformer_cls_to_wrap = set() + + for layer_class in transformer_cls_names_to_wrap: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise ValueError(f"Could not find the transformer layer class {layer_class} in the model.") + transformer_cls_to_wrap.add(transformer_cls) + + def policy(module: torch.nn.Module) -> bool: + if fsdp2_plugin.transformer_cls_names_to_wrap is None: + return False + return isinstance(module, tuple(transformer_cls_to_wrap)) + + elif fn is size_based_auto_wrap_policy: + + def policy(module: torch.nn.Module) -> bool: + module_num_params = sum(p.numel() for p in module.parameters()) + return module_num_params > fsdp2_plugin.min_num_params + else: + return None + + return policy + + +def get_fsdp2_grad_scaler(**kwargs): + """ + Returns a `GradScaler` for FSDP2, as the current implementation of `get_grad_scaler` doesn't accept other args. We + need this as current `get_grad_scaler` accepts only `distributed_type` as arg, which doesn't differentiate between + FSDP1 and FSDP2 + """ + from torch.amp.grad_scaler import GradScaler + + return GradScaler(**kwargs) + + +def fsdp2_canonicalize_names(named_params: dict) -> dict: + """Removes parameter name modifiers in order to map them back to their original names. + + See huggingface/accelerate#3554 for more context. + + Args: + named_params (`dict`): The named parameters dictionary to canonicalize. + + Returns: + `dict`: The canonicalized named parameters dictionary + """ + named_params = {k.replace("._checkpoint_wrapped_module", ""): v for k, v in named_params.items()} + named_params = { + k.replace("_orig_mod.", "") if k.startswith("_orig_mod.") else k: v for k, v in named_params.items() + } + named_params = {k.replace("._orig_mod", ""): v for k, v in named_params.items()} + return named_params diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/imports.py b/venv/lib/python3.10/site-packages/accelerate/utils/imports.py new file mode 100644 index 0000000000000000000000000000000000000000..a919827994824cdbc745acac83f03e501c8688f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/imports.py @@ -0,0 +1,551 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import importlib.metadata +import os +import sys +import warnings +from functools import lru_cache, wraps + +import torch +from packaging import version +from packaging.version import parse + +from .environment import parse_flag_from_env, patch_environment, str_to_bool +from .versions import compare_versions, is_torch_version + + +# Try to run Torch native job in an environment with TorchXLA installed by setting this value to 0. +USE_TORCH_XLA = parse_flag_from_env("USE_TORCH_XLA", default=True) + +_torch_xla_available = False +if USE_TORCH_XLA: + try: + import torch_xla.core.xla_model as xm # noqa: F401 + import torch_xla.runtime + + _torch_xla_available = True + except ImportError: + pass + +# Keep it for is_tpu_available. It will be removed along with is_tpu_available. +_tpu_available = _torch_xla_available + +# Cache this result has it's a C FFI call which can be pretty time-consuming +_torch_distributed_available = torch.distributed.is_available() + + +def _is_package_available(pkg_name, metadata_name=None): + # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version + package_exists = importlib.util.find_spec(pkg_name) is not None + if package_exists: + try: + # Some libraries have different names in the metadata + _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) + return True + except importlib.metadata.PackageNotFoundError: + return False + + +def is_torch_distributed_available() -> bool: + return _torch_distributed_available + + +def is_xccl_available(): + if is_torch_version(">=", "2.7.0"): + return torch.distributed.distributed_c10d.is_xccl_available() + if is_ipex_available(): + return False + return False + + +def is_ccl_available(): + try: + pass + except ImportError: + print( + "Intel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) XPUs, but it is not" + " detected. If you see \"ValueError: Invalid backend: 'ccl'\" error, please install Intel(R) oneCCL" + " Bindings for PyTorch*." + ) + return importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None + + +def get_ccl_version(): + return importlib.metadata.version("oneccl_bind_pt") + + +def is_import_timer_available(): + return _is_package_available("import_timer") + + +def is_pynvml_available(): + return _is_package_available("pynvml") or _is_package_available("pynvml", "nvidia-ml-py") + + +def is_pytest_available(): + return _is_package_available("pytest") + + +def is_msamp_available(): + return _is_package_available("msamp", "ms-amp") + + +def is_schedulefree_available(): + return _is_package_available("schedulefree") + + +def is_transformer_engine_available(): + if is_hpu_available(): + return _is_package_available("intel_transformer_engine", "intel-transformer-engine") + else: + return _is_package_available("transformer_engine", "transformer-engine") + + +def is_lomo_available(): + return _is_package_available("lomo_optim") + + +def is_cuda_available(): + """ + Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda + uninitialized. + """ + with patch_environment(PYTORCH_NVML_BASED_CUDA_CHECK="1"): + available = torch.cuda.is_available() + + return available + + +@lru_cache +def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): + """ + Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set + the USE_TORCH_XLA to false. + """ + assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." + + if not _torch_xla_available: + return False + elif check_is_gpu: + return torch_xla.runtime.device_type() in ["GPU", "CUDA"] + elif check_is_tpu: + return torch_xla.runtime.device_type() == "TPU" + + return True + + +def is_torchao_available(): + package_exists = _is_package_available("torchao") + if package_exists: + torchao_version = version.parse(importlib.metadata.version("torchao")) + return compare_versions(torchao_version, ">=", "0.6.1") + return False + + +def is_deepspeed_available(): + return _is_package_available("deepspeed") + + +def is_pippy_available(): + return is_torch_version(">=", "2.4.0") + + +def is_bf16_available(ignore_tpu=False): + "Checks if bf16 is supported, optionally ignoring the TPU" + if is_torch_xla_available(check_is_tpu=True): + return not ignore_tpu + if is_cuda_available(): + return torch.cuda.is_bf16_supported() + if is_mlu_available(): + return torch.mlu.is_bf16_supported() + if is_xpu_available(): + return torch.xpu.is_bf16_supported() + if is_mps_available(): + return False + return True + + +def is_fp16_available(): + "Checks if fp16 is supported" + if is_habana_gaudi1(): + return False + + return True + + +def is_fp8_available(): + "Checks if fp8 is supported" + return is_msamp_available() or is_transformer_engine_available() or is_torchao_available() + + +def is_4bit_bnb_available(): + package_exists = _is_package_available("bitsandbytes") + if package_exists: + bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) + return compare_versions(bnb_version, ">=", "0.39.0") + return False + + +def is_8bit_bnb_available(): + package_exists = _is_package_available("bitsandbytes") + if package_exists: + bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) + return compare_versions(bnb_version, ">=", "0.37.2") + return False + + +def is_bnb_available(min_version=None): + package_exists = _is_package_available("bitsandbytes") + if package_exists and min_version is not None: + bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) + return compare_versions(bnb_version, ">=", min_version) + else: + return package_exists + + +def is_bitsandbytes_multi_backend_available(): + if not is_bnb_available(): + return False + import bitsandbytes as bnb + + return "multi_backend" in getattr(bnb, "features", set()) + + +def is_torchvision_available(): + return _is_package_available("torchvision") + + +def is_megatron_lm_available(): + if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1: + if importlib.util.find_spec("megatron") is not None: + try: + megatron_version = parse(importlib.metadata.version("megatron-core")) + if compare_versions(megatron_version, ">=", "0.8.0"): + return importlib.util.find_spec(".training", "megatron") + except Exception as e: + warnings.warn(f"Parse Megatron version failed. Exception:{e}") + return False + + +def is_transformers_available(): + return _is_package_available("transformers") + + +def is_datasets_available(): + return _is_package_available("datasets") + + +def is_peft_available(): + return _is_package_available("peft") + + +def is_timm_available(): + return _is_package_available("timm") + + +def is_triton_available(): + if is_xpu_available(): + return _is_package_available("triton", "pytorch-triton-xpu") + return _is_package_available("triton") + + +def is_aim_available(): + package_exists = _is_package_available("aim") + if package_exists: + aim_version = version.parse(importlib.metadata.version("aim")) + return compare_versions(aim_version, "<", "4.0.0") + return False + + +def is_tensorboard_available(): + return _is_package_available("tensorboard") or _is_package_available("tensorboardX") + + +def is_wandb_available(): + return _is_package_available("wandb") + + +def is_comet_ml_available(): + return _is_package_available("comet_ml") + + +def is_swanlab_available(): + return _is_package_available("swanlab") + + +def is_trackio_available(): + return sys.version_info >= (3, 10) and _is_package_available("trackio") + + +def is_boto3_available(): + return _is_package_available("boto3") + + +def is_rich_available(): + if _is_package_available("rich"): + return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False) + return False + + +def is_sagemaker_available(): + return _is_package_available("sagemaker") + + +def is_tqdm_available(): + return _is_package_available("tqdm") + + +def is_clearml_available(): + return _is_package_available("clearml") + + +def is_pandas_available(): + return _is_package_available("pandas") + + +def is_matplotlib_available(): + return _is_package_available("matplotlib") + + +def is_mlflow_available(): + if _is_package_available("mlflow"): + return True + + if importlib.util.find_spec("mlflow") is not None: + try: + _ = importlib.metadata.metadata("mlflow-skinny") + return True + except importlib.metadata.PackageNotFoundError: + return False + return False + + +def is_mps_available(min_version="1.12"): + "Checks if MPS device is available. The minimum version required is 1.12." + # With torch 1.12, you can use torch.backends.mps + # With torch 2.0.0, you can use torch.mps + return is_torch_version(">=", min_version) and torch.backends.mps.is_available() and torch.backends.mps.is_built() + + +def is_ipex_available(): + "Checks if ipex is installed." + + def get_major_and_minor_from_version(full_version): + return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) + + _torch_version = importlib.metadata.version("torch") + if importlib.util.find_spec("intel_extension_for_pytorch") is None: + return False + _ipex_version = "N/A" + try: + _ipex_version = importlib.metadata.version("intel_extension_for_pytorch") + except importlib.metadata.PackageNotFoundError: + return False + torch_major_and_minor = get_major_and_minor_from_version(_torch_version) + ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) + if torch_major_and_minor != ipex_major_and_minor: + warnings.warn( + f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," + f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." + ) + return False + return True + + +@lru_cache +def is_mlu_available(check_device=False): + """ + Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu + uninitialized. + """ + if importlib.util.find_spec("torch_mlu") is None: + return False + + import torch_mlu # noqa: F401 + + with patch_environment(PYTORCH_CNDEV_BASED_MLU_CHECK="1"): + available = torch.mlu.is_available() + + return available + + +@lru_cache +def is_musa_available(check_device=False): + "Checks if `torch_musa` is installed and potentially if a MUSA is in the environment" + if importlib.util.find_spec("torch_musa") is None: + return False + + import torch_musa # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no MUSA is found + _ = torch.musa.device_count() + return torch.musa.is_available() + except RuntimeError: + return False + return hasattr(torch, "musa") and torch.musa.is_available() + + +@lru_cache +def is_npu_available(check_device=False): + "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" + if importlib.util.find_spec("torch_npu") is None: + return False + + import torch_npu # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no NPU is found + _ = torch.npu.device_count() + return torch.npu.is_available() + except RuntimeError: + return False + return hasattr(torch, "npu") and torch.npu.is_available() + + +@lru_cache +def is_sdaa_available(check_device=False): + "Checks if `torch_sdaa` is installed and potentially if a SDAA is in the environment" + if importlib.util.find_spec("torch_sdaa") is None: + return False + + import torch_sdaa # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no NPU is found + _ = torch.sdaa.device_count() + return torch.sdaa.is_available() + except RuntimeError: + return False + return hasattr(torch, "sdaa") and torch.sdaa.is_available() + + +@lru_cache +def is_hpu_available(init_hccl=False): + "Checks if `torch.hpu` is installed and potentially if a HPU is in the environment" + if ( + importlib.util.find_spec("habana_frameworks") is None + or importlib.util.find_spec("habana_frameworks.torch") is None + ): + return False + + import habana_frameworks.torch # noqa: F401 + + if init_hccl: + import habana_frameworks.torch.distributed.hccl as hccl # noqa: F401 + + return hasattr(torch, "hpu") and torch.hpu.is_available() + + +def is_habana_gaudi1(): + if is_hpu_available(): + import habana_frameworks.torch.utils.experimental as htexp # noqa: F401 + + if htexp._get_device_type() == htexp.synDeviceType.synDeviceGaudi: + return True + + return False + + +@lru_cache +def is_xpu_available(check_device=False): + """ + Checks if XPU acceleration is available either via `intel_extension_for_pytorch` or via stock PyTorch (>=2.4) and + potentially if a XPU is in the environment + """ + + if is_ipex_available(): + import intel_extension_for_pytorch # noqa: F401 + else: + if is_torch_version("<=", "2.3"): + return False + + if check_device: + try: + # Will raise a RuntimeError if no XPU is found + _ = torch.xpu.device_count() + return torch.xpu.is_available() + except RuntimeError: + return False + return hasattr(torch, "xpu") and torch.xpu.is_available() + + +def is_dvclive_available(): + return _is_package_available("dvclive") + + +def is_torchdata_available(): + return _is_package_available("torchdata") + + +# TODO: Remove this function once stateful_dataloader is a stable feature in torchdata. +def is_torchdata_stateful_dataloader_available(): + package_exists = _is_package_available("torchdata") + if package_exists: + torchdata_version = version.parse(importlib.metadata.version("torchdata")) + return compare_versions(torchdata_version, ">=", "0.8.0") + return False + + +def torchao_required(func): + """ + A decorator that ensures the decorated function is only called when torchao is available. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + if not is_torchao_available(): + raise ImportError( + "`torchao` is not available, please install it before calling this function via `pip install torchao`." + ) + return func(*args, **kwargs) + + return wrapper + + +# TODO: Rework this into `utils.deepspeed` and migrate the "core" chunks into `accelerate.deepspeed` +def deepspeed_required(func): + """ + A decorator that ensures the decorated function is only called when deepspeed is enabled. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + from accelerate.state import AcceleratorState + from accelerate.utils.dataclasses import DistributedType + + if AcceleratorState._shared_state != {} and AcceleratorState().distributed_type != DistributedType.DEEPSPEED: + raise ValueError( + "DeepSpeed is not enabled, please make sure that an `Accelerator` is configured for `deepspeed` " + "before calling this function." + ) + return func(*args, **kwargs) + + return wrapper + + +def is_weights_only_available(): + # Weights only with allowlist was added in 2.4.0 + # ref: https://github.com/pytorch/pytorch/pull/124331 + return is_torch_version(">=", "2.4.0") + + +def is_numpy_available(min_version="1.25.0"): + numpy_version = parse(importlib.metadata.version("numpy")) + return compare_versions(numpy_version, ">=", min_version) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/launch.py b/venv/lib/python3.10/site-packages/accelerate/utils/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..020c0e820e5554d4a5f80f2a86b7e5315bc8d06a --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/launch.py @@ -0,0 +1,760 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import subprocess +import sys +import warnings +from ast import literal_eval +from shutil import which +from typing import Any + +import torch + +from ..commands.config.config_args import SageMakerConfig +from ..utils import ( + DynamoBackend, + PrecisionType, + is_ccl_available, + is_fp8_available, + is_hpu_available, + is_ipex_available, + is_mlu_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_torch_xla_available, + is_xpu_available, +) +from ..utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS +from ..utils.other import get_free_port, is_port_in_use, merge_dicts +from ..utils.versions import compare_versions +from .dataclasses import DistributedType, SageMakerDistributedType + + +def _filter_args(args, parser, default_args=[]): + """ + Filters out all `accelerate` specific args + """ + new_args, _ = parser.parse_known_args(default_args) + for key, value in vars(args).items(): + if key in vars(new_args).keys(): + setattr(new_args, key, value) + return new_args + + +def _get_mpirun_args(): + """ + Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs + are: OpenMPI, Intel MPI, or MVAPICH. + + Returns: Program name and arg names for hostfile, num processes, and processes per node + """ + # Find the MPI program name + mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)] + + if len(mpi_apps) == 0: + raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.") + + # Call the app with the --version flag to determine which MPI app is installed + mpi_app = mpi_apps[0] + mpirun_version = subprocess.check_output([mpi_app, "--version"]) + + if b"Open MPI" in mpirun_version: + return mpi_app, "--hostfile", "-n", "--npernode", "--bind-to" + else: + # Intel MPI and MVAPICH both use the same arg names + return mpi_app, "-f", "-n", "-ppn", "" + + +def setup_fp8_env(args: argparse.Namespace, current_env: dict[str, str]): + """ + Setup the FP8 environment variables. + """ + prefix = "ACCELERATE_" + for arg in vars(args): + if arg.startswith("fp8_"): + value = getattr(args, arg) + if value is not None: + if arg == "fp8_override_linear_precision": + current_env[prefix + "FP8_OVERRIDE_FPROP"] = str(value[0]) + current_env[prefix + "FP8_OVERRIDE_DGRAD"] = str(value[1]) + current_env[prefix + "FP8_OVERRIDE_WGRAD"] = str(value[2]) + else: + current_env[f"{prefix}{arg.upper()}"] = str(getattr(args, arg)) + return current_env + + +def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> tuple[list[str], dict[str, str]]: + """ + Prepares and returns the command list and an environment with the correct simple launcher environment variables. + """ + cmd = [] + if args.no_python and args.module: + raise ValueError("--module and --no_python cannot be used together") + + num_processes = getattr(args, "num_processes", None) + num_machines = args.num_machines + if args.mpirun_hostfile is not None: + mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg, bind_to_arg = _get_mpirun_args() + bind_to = getattr(args, "bind-to", "socket") + nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1" + cmd += [ + mpi_app_name, + hostfile_arg, + args.mpirun_hostfile, + proc_per_node_arg, + nproc_per_node, + ] + if num_processes: + cmd += [num_proc_arg, str(num_processes)] + if bind_to_arg: + cmd += [bind_to_arg, bind_to] + if not args.no_python: + cmd.append(sys.executable) + if args.module: + cmd.append("-m") + cmd.append(args.training_script) + cmd.extend(args.training_script_args) + + current_env = os.environ.copy() + current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu) + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + if args.gpu_ids != "all" and args.gpu_ids is not None: + if is_xpu_available(): + current_env["ZE_AFFINITY_MASK"] = args.gpu_ids + elif is_mlu_available(): + current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids + elif is_sdaa_available(): + current_env["SDAA_VISIBLE_DEVICES"] = args.gpu_ids + elif is_musa_available(): + current_env["MUSA_VISIBLE_DEVICES"] = args.gpu_ids + elif is_npu_available(): + current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids + elif is_hpu_available(): + current_env["HABANA_VISIBLE_MODULES"] = args.gpu_ids + else: + current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids + if num_machines > 1: + assert args.main_process_ip is not None, ( + "When using multiple machines, you need to specify the main process IP." + ) + assert args.main_process_port is not None, ( + "When using multiple machines, you need to specify the main process port." + ) + + ccl_worker_count = getattr(args, "mpirun_ccl", 0) if is_ccl_available() else 0 + if (num_processes is not None and num_processes > 1) or num_machines > 1: + current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1" + current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500" + current_env["CCL_WORKER_COUNT"] = str(ccl_worker_count) + if current_env["ACCELERATE_USE_CPU"]: + current_env["KMP_AFFINITY"] = "granularity=fine,compact,1,0" + current_env["KMP_BLOCKTIME"] = str(1) + + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + if args.mixed_precision.lower() == "fp8": + if not is_fp8_available(): + raise RuntimeError( + "FP8 is not available on this machine. Please ensure that either Transformer Engine, MSAMP or torchao is installed." + ) + current_env = setup_fp8_env(args, current_env) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError( + f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." + ) + current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value + current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode + current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) + current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) + current_env["ACCELERATE_DYNAMO_USE_REGIONAL_COMPILATION"] = str(args.dynamo_use_regional_compilation) + + current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) + if is_ipex_available(): + current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower() + if args.enable_cpu_affinity: + current_env["ACCELERATE_CPU_AFFINITY"] = "1" + return cmd, current_env + + +def prepare_multi_gpu_env(args: argparse.Namespace) -> dict[str, str]: + """ + Prepares and returns an environment with the correct multi-GPU environment variables. + """ + # get free port and update configurations + if args.main_process_port == 0: + args.main_process_port = get_free_port() + + elif args.main_process_port is None: + args.main_process_port = 29500 + + num_processes = args.num_processes + num_machines = args.num_machines + main_process_ip = args.main_process_ip + main_process_port = args.main_process_port + if num_machines > 1: + args.nproc_per_node = str(num_processes // num_machines) + args.nnodes = str(num_machines) + args.node_rank = int(args.machine_rank) + if getattr(args, "same_network", False): + args.master_addr = str(main_process_ip) + args.master_port = str(main_process_port) + else: + args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" + else: + args.nproc_per_node = str(num_processes) + if main_process_port is not None: + args.master_port = str(main_process_port) + + # only need to check port availability in main process, in case we have to start multiple launchers on the same machine + # for some reasons like splitting log files. + need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 + if need_port_check and is_port_in_use(main_process_port): + if num_machines <= 1: + args.standalone = True + warnings.warn( + f"Port `{main_process_port}` is already in use. " + "Accelerate will attempt to launch in a standalone-like mode by finding an open port automatically for this session. " + "If this current attempt fails, or for more control in future runs, please specify a different port " + "(e.g., `--main_process_port `) or use `--main_process_port 0` for automatic selection " + "in your launch command or Accelerate config file." + ) + else: + raise ConnectionError( + f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " + "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" + " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." + ) + + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + args.module = True + elif args.no_python: + args.no_python = True + + current_env = os.environ.copy() + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + gpu_ids = getattr(args, "gpu_ids", "all") + if gpu_ids != "all" and args.gpu_ids is not None: + if is_xpu_available(): + current_env["ZE_AFFINITY_MASK"] = gpu_ids + elif is_mlu_available(): + current_env["MLU_VISIBLE_DEVICES"] = gpu_ids + elif is_sdaa_available(): + current_env["SDAA_VISIBLE_DEVICES"] = gpu_ids + elif is_musa_available(): + current_env["MUSA_VISIBLE_DEVICES"] = gpu_ids + elif is_npu_available(): + current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids + elif is_hpu_available(): + current_env["HABANA_VISIBLE_MODULES"] = gpu_ids + else: + current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids + mixed_precision = args.mixed_precision.lower() + try: + mixed_precision = PrecisionType(mixed_precision) + except ValueError: + raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.") + + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + if args.mixed_precision.lower() == "fp8": + if not is_fp8_available(): + raise RuntimeError( + "FP8 is not available on this machine. Please ensure that either Transformer Engine, MSAMP or torchao is installed." + ) + current_env = setup_fp8_env(args, current_env) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError( + f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." + ) + current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value + current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode + current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) + current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) + current_env["ACCELERATE_DYNAMO_USE_REGIONAL_COMPILATION"] = str(args.dynamo_use_regional_compilation) + + if args.use_fsdp: + current_env["ACCELERATE_USE_FSDP"] = "true" + if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states: + raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`") + + current_env["FSDP_VERSION"] = str(args.fsdp_version) if hasattr(args, "fsdp_version") else "1" + + # For backwards compatibility, we support this in launched scripts, + # however, we do not ask users for this in `accelerate config` CLI + current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy) + + current_env["FSDP_RESHARD_AFTER_FORWARD"] = str(args.fsdp_reshard_after_forward).lower() + current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower() + current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params) + if args.fsdp_auto_wrap_policy is not None: + current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy) + if args.fsdp_transformer_layer_cls_to_wrap is not None: + current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap) + if args.fsdp_backward_prefetch is not None: + current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch) + if args.fsdp_state_dict_type is not None: + current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type) + current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower() + current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower() + current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower() + current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower() + current_env["FSDP_ACTIVATION_CHECKPOINTING"] = str(args.fsdp_activation_checkpointing).lower() + if getattr(args, "fsdp_ignored_modules", None) is not None: + current_env["FSDP_IGNORED_MODULES"] = str(args.fsdp_ignored_modules) + + if args.use_megatron_lm: + prefix = "MEGATRON_LM_" + current_env["ACCELERATE_USE_MEGATRON_LM"] = "true" + current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree) + current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree) + current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping) + if args.megatron_lm_num_micro_batches is not None: + current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches) + if args.megatron_lm_sequence_parallelism is not None: + current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism) + if args.megatron_lm_recompute_activations is not None: + current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations) + if args.megatron_lm_use_distributed_optimizer is not None: + current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer) + + current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) + if args.enable_cpu_affinity: + current_env["ACCELERATE_CPU_AFFINITY"] = "1" + + if not args.use_parallelism_config: + return current_env + + prefix = "PARALLELISM_CONFIG_" + if args.use_parallelism_config: + current_env["ACCELERATE_USE_PARALLELISM_CONFIG"] = "true" + current_env[prefix + "DP_REPLICATE_SIZE"] = str(args.parallelism_config_dp_replicate_size) + current_env[prefix + "TP_SIZE"] = str(args.parallelism_config_tp_size) + current_env[prefix + "CP_SIZE"] = str(args.parallelism_config_cp_size) + current_env[prefix + "DP_SHARD_SIZE"] = str(args.parallelism_config_dp_shard_size) + if args.parallelism_config_cp_size > 1: + current_env[prefix + "CP_COMM_STRATEGY"] = str(args.parallelism_config_cp_comm_strategy) + + return current_env + + +def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> tuple[list[str], dict[str, str]]: + """ + Prepares and returns the command list and an environment with the correct DeepSpeed environment variables. + """ + # get free port and update configurations + if args.main_process_port == 0: + args.main_process_port = get_free_port() + + elif args.main_process_port is None: + args.main_process_port = 29500 + + num_processes = args.num_processes + num_machines = args.num_machines + main_process_ip = args.main_process_ip + main_process_port = args.main_process_port + cmd = None + + # make sure launcher is not None + if args.deepspeed_multinode_launcher is None: + # set to default pdsh + args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0] + + if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + cmd = ["deepspeed"] + cmd.extend(["--hostfile", str(args.deepspeed_hostfile)]) + if args.deepspeed_multinode_launcher == "nossh": + if compare_versions("deepspeed", "<", "0.14.5"): + raise ValueError("nossh launcher requires DeepSpeed >= 0.14.5") + cmd.extend(["--node_rank", str(args.machine_rank), "--no_ssh"]) + else: + cmd.extend(["--no_local_rank", "--launcher", str(args.deepspeed_multinode_launcher)]) + if args.deepspeed_exclusion_filter is not None: + cmd.extend( + [ + "--exclude", + str(args.deepspeed_exclusion_filter), + ] + ) + elif args.deepspeed_inclusion_filter is not None: + cmd.extend( + [ + "--include", + str(args.deepspeed_inclusion_filter), + ] + ) + else: + cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)]) + if main_process_ip: + cmd.extend(["--master_addr", str(main_process_ip)]) + cmd.extend(["--master_port", str(main_process_port)]) + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + cmd.append("--module") + elif args.no_python: + cmd.append("--no_python") + cmd.append(args.training_script) + cmd.extend(args.training_script_args) + elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]: + args.nproc_per_node = str(num_processes // num_machines) + args.nnodes = str(num_machines) + args.node_rank = int(args.machine_rank) + if getattr(args, "same_network", False): + args.master_addr = str(main_process_ip) + args.master_port = str(main_process_port) + else: + args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" + else: + args.nproc_per_node = str(num_processes) + if main_process_port is not None: + args.master_port = str(main_process_port) + + # only need to check port availability in main process, in case we have to start multiple launchers on the same machine + # for some reasons like splitting log files. + need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 + if need_port_check and is_port_in_use(main_process_port): + if num_machines <= 1: + args.standalone = True + warnings.warn( + f"Port `{main_process_port}` is already in use. " + "Accelerate will attempt to launch in a standalone-like mode by finding an open port automatically for this session. " + "If this current attempt fails, or for more control in future runs, please specify a different port " + "(e.g., `--main_process_port `) or use `--main_process_port 0` for automatic selection " + "in your launch command or Accelerate config file." + ) + else: + raise ConnectionError( + f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " + "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" + " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." + ) + + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + args.module = True + elif args.no_python: + args.no_python = True + + current_env = os.environ.copy() + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + gpu_ids = getattr(args, "gpu_ids", "all") + if gpu_ids != "all" and args.gpu_ids is not None: + if is_xpu_available(): + current_env["ZE_AFFINITY_MASK"] = gpu_ids + elif is_mlu_available(): + current_env["MLU_VISIBLE_DEVICES"] = gpu_ids + elif is_sdaa_available(): + current_env["SDAA_VISIBLE_DEVICES"] = gpu_ids + elif is_musa_available(): + current_env["MUSA_VISIBLE_DEVICES"] = gpu_ids + elif is_npu_available(): + current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids + elif is_hpu_available(): + current_env["HABANA_VISIBLE_MODULES"] = gpu_ids + else: + current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath(".")) + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + if args.mixed_precision.lower() == "fp8": + if not is_fp8_available(): + raise RuntimeError( + "FP8 is not available on this machine. Please ensure that either Transformer Engine, MSAMP or torchao is installed." + ) + current_env = setup_fp8_env(args, current_env) + current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower() + current_env["ACCELERATE_USE_DEEPSPEED"] = "true" + if args.zero_stage is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage) + if args.gradient_accumulation_steps is not None: + current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps) + if args.gradient_clipping is not None: + current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower() + if args.offload_optimizer_device is not None: + current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower() + if args.offload_param_device is not None: + current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower() + if args.zero3_init_flag is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower() + if args.zero3_save_16bit_model is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower() + if args.deepspeed_config_file is not None: + current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file) + if args.enable_cpu_affinity: + current_env["ACCELERATE_CPU_AFFINITY"] = "1" + if args.deepspeed_moe_layer_cls_names is not None: + current_env["ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES"] = str(args.deepspeed_moe_layer_cls_names) + return cmd, current_env + + +def prepare_tpu( + args: argparse.Namespace, current_env: dict[str, str], pod: bool = False +) -> tuple[argparse.Namespace, dict[str, str]]: + """ + Prepares and returns an environment with the correct TPU environment variables. + """ + if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True): + if args.downcast_bf16: + current_env["XLA_DOWNCAST_BF16"] = "1" + else: + current_env["XLA_USE_BF16"] = "1" + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + if pod: + # Take explicit args and set them up for XLA + args.vm = args.tpu_vm + args.tpu = args.tpu_name + return args, current_env + + +def _convert_nargs_to_dict(nargs: list[str]) -> dict[str, str]: + if len(nargs) < 0: + return {} + # helper function to infer type for argsparser + + def _infer_type(s): + try: + s = float(s) + + if s // 1 == s: + return int(s) + return s + except ValueError: + return s + + parser = argparse.ArgumentParser() + _, unknown = parser.parse_known_args(nargs) + for index, argument in enumerate(unknown): + if argument.startswith(("-", "--")): + action = None + if index + 1 < len(unknown): # checks if next index would be in list + if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key + # raise an error if element is store_true or store_false + raise ValueError( + "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" + ) + else: # raise an error if last element is store_true or store_false + raise ValueError( + "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" + ) + # adds argument to parser based on action_store true + if action is None: + parser.add_argument(argument, type=_infer_type) + else: + parser.add_argument(argument, action=action) + + return { + key: (literal_eval(value) if value in ("True", "False") else value) + for key, value in parser.parse_args(nargs).__dict__.items() + } + + +def prepare_sagemager_args_inputs( + sagemaker_config: SageMakerConfig, args: argparse.Namespace +) -> tuple[argparse.Namespace, dict[str, Any]]: + # configure environment + print("Configuring Amazon SageMaker environment") + os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region + + # configure credentials + if sagemaker_config.profile is not None: + os.environ["AWS_PROFILE"] = sagemaker_config.profile + elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None: + os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id + os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key + else: + raise OSError("You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile") + + # extract needed arguments + source_dir = os.path.dirname(args.training_script) + if not source_dir: # checks if string is empty + source_dir = "." + entry_point = os.path.basename(args.training_script) + if not entry_point.endswith(".py"): + raise ValueError(f'Your training script should be a python script and not "{entry_point}"') + + print("Converting Arguments to Hyperparameters") + hyperparameters = _convert_nargs_to_dict(args.training_script_args) + + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError( + f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." + ) + + # Environment variables to be set for use during training job + environment = { + "ACCELERATE_USE_SAGEMAKER": "true", + "ACCELERATE_MIXED_PRECISION": str(mixed_precision), + "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value, + "ACCELERATE_DYNAMO_MODE": args.dynamo_mode, + "ACCELERATE_DYNAMO_USE_FULLGRAPH": str(args.dynamo_use_fullgraph), + "ACCELERATE_DYNAMO_USE_DYNAMIC": str(args.dynamo_use_dynamic), + "ACCELERATE_DYNAMO_USE_REGIONAL_COMPILATION": str(args.dynamo_use_regional_compilation), + "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value, + } + if args.mixed_precision.lower() == "fp8": + if not is_fp8_available(): + raise RuntimeError( + "FP8 is not available on this machine. Please ensure that either Transformer Engine, MSAMP or torchao is installed." + ) + environment = setup_fp8_env(args, environment) + # configure distribution set up + distribution = None + if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL: + distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} + + # configure sagemaker inputs + sagemaker_inputs = None + if sagemaker_config.sagemaker_inputs_file is not None: + print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file") + sagemaker_inputs = {} + with open(sagemaker_config.sagemaker_inputs_file) as file: + for i, line in enumerate(file): + if i == 0: + continue + l = line.split("\t") + sagemaker_inputs[l[0]] = l[1].strip() + print(f"Loaded SageMaker Inputs: {sagemaker_inputs}") + + # configure sagemaker metrics + sagemaker_metrics = None + if sagemaker_config.sagemaker_metrics_file is not None: + print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file") + sagemaker_metrics = [] + with open(sagemaker_config.sagemaker_metrics_file) as file: + for i, line in enumerate(file): + if i == 0: + continue + l = line.split("\t") + metric_dict = { + "Name": l[0], + "Regex": l[1].strip(), + } + sagemaker_metrics.append(metric_dict) + print(f"Loaded SageMaker Metrics: {sagemaker_metrics}") + + # configure session + print("Creating Estimator") + args = { + "image_uri": sagemaker_config.image_uri, + "entry_point": entry_point, + "source_dir": source_dir, + "role": sagemaker_config.iam_role_name, + "transformers_version": sagemaker_config.transformers_version, + "pytorch_version": sagemaker_config.pytorch_version, + "py_version": sagemaker_config.py_version, + "base_job_name": sagemaker_config.base_job_name, + "instance_count": sagemaker_config.num_machines, + "instance_type": sagemaker_config.ec2_instance_type, + "debugger_hook_config": False, + "distribution": distribution, + "hyperparameters": hyperparameters, + "environment": environment, + "metric_definitions": sagemaker_metrics, + } + + if sagemaker_config.additional_args is not None: + args = merge_dicts(sagemaker_config.additional_args, args) + return args, sagemaker_inputs + + +def env_var_path_add(env_var_name, path_to_add): + """ + Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the + caller to set it in os.environ. + """ + paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0] + paths.append(str(path_to_add)) + return ":".join(paths) + + +class PrepareForLaunch: + """ + Prepare a function that will launched in a distributed setup. + + Args: + launcher (`Callable`): + The function to launch. + distributed_type ([`~state.DistributedType`]): + The distributed type to prepare for. + debug (`bool`, *optional*, defaults to `False`): + Whether or not this is a debug launch. + """ + + def __init__(self, launcher, distributed_type="NO", debug=False): + self.launcher = launcher + self.distributed_type = DistributedType(distributed_type) + self.debug = debug + + def __call__(self, index, *args): + if self.debug: + world_size = int(os.environ.get("WORLD_SIZE")) + rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE") + torch.distributed.init_process_group( + "gloo", + rank=index, + store=torch.distributed.FileStore(rdv_file, world_size), + world_size=world_size, + ) + elif self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + ): + # Prepare the environment for torch.distributed + os.environ["LOCAL_RANK"] = str(index) + nproc = int(os.environ.get("NPROC", 1)) + node_rank = int(os.environ.get("NODE_RANK", 0)) + os.environ["RANK"] = str(nproc * node_rank + index) + + os.environ["FORK_LAUNCHED"] = str(1) + self.launcher(*args) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py b/venv/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..9f6d87b2edb13299e0b6b34dd32851f020415242 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py @@ -0,0 +1,1424 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import math +import os +from abc import ABC +from functools import partial + +import torch +import torch.nn.functional as F +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler +from .imports import is_megatron_lm_available +from .operations import recursively_apply, send_to_device + + +if is_megatron_lm_available(): + from megatron.core import mpu, tensor_parallel + from megatron.core.distributed import DistributedDataParallel as LocalDDP + from megatron.core.distributed import finalize_model_grads + from megatron.core.enums import ModelType + from megatron.core.num_microbatches_calculator import get_num_microbatches + from megatron.core.optimizer import get_megatron_optimizer + from megatron.core.parallel_state import get_tensor_model_parallel_group, get_tensor_model_parallel_src_rank + from megatron.core.pipeline_parallel import get_forward_backward_func + from megatron.core.utils import get_model_config + from megatron.inference.text_generation.communication import broadcast_int_list, broadcast_tensor + from megatron.inference.text_generation.generation import ( + beam_search_and_return_on_first_stage, + generate_tokens_probs_and_return_on_first_stage, + ) + from megatron.legacy.data.dataset_utils import build_train_valid_test_datasets + from megatron.legacy.model import BertModel, Float16Module, GPTModel, T5Model + from megatron.legacy.model.classification import Classification + from megatron.training import ( + get_args, + get_tensorboard_writer, + get_tokenizer, + print_rank_last, + ) + from megatron.training.arguments import ( + _add_data_args, + _add_validation_args, + core_transformer_config_from_args, + parse_args, + validate_args, + ) + from megatron.training.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint + from megatron.training.global_vars import set_global_variables + from megatron.training.initialize import ( + _compile_dependencies, + _init_autoresume, + _initialize_distributed, + _set_random_seed, + set_jit_fusion_options, + write_args_to_tensorboard, + ) + from megatron.training.tokenizer.tokenizer import _vocab_size_with_padding + from megatron.training.training import ( + build_train_valid_test_data_iterators, + get_optimizer_param_scheduler, + num_floating_point_operations, + setup_model_and_optimizer, + train_step, + training_log, + ) + from megatron.training.utils import ( + average_losses_across_data_parallel_group, + calc_params_l2_norm, + get_ltor_masks_and_position_ids, + unwrap_model, + ) + + +# model utilities +def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True): + """Build the model.""" + args = get_args() + mode = "pre-training" if args.pretraining_flag else "fine-tuning" + if args.rank == 0: + print(f"Building {args.model_type_name} model in the {mode} mode.") + print( + "The Megatron LM model weights are initialized at random in `accelerator.prepare`. " + "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup." + ) + config = core_transformer_config_from_args(args) + if args.model_type_name == "bert": + if args.pretraining_flag: + num_tokentypes = 2 if args.bert_binary_head else 0 + model = BertModel( + config=config, + num_tokentypes=num_tokentypes, + add_binary_head=args.bert_binary_head, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + ) + else: + model = Classification( + config=config, + num_classes=args.num_labels, + num_tokentypes=2, + pre_process=pre_process, + post_process=post_process, + ) + elif args.model_type_name == "gpt": + model = GPTModel( + config=config, + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + ) + elif args.model_type_name == "t5": + model = T5Model( + config=config, + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + add_encoder=add_encoder, + add_decoder=add_decoder, + ) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + return model + + +def prepare_model_optimizer_scheduler(accelerator): + accelerator.print("Preparing model optimizer scheduler") + args = get_args() + if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None: + if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None: + raise ValueError( + "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`." + ) + custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function + model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func) + optimizer = prepare_optimizer(accelerator, model) + scheduler = prepare_scheduler(accelerator, optimizer, scheduler=None) + else: + model_type = ModelType.encoder_or_decoder + if args.model_type_name == "t5": + model_type = ModelType.encoder_and_decoder + model_provider_func_ = model_provider_func + if accelerator.state.megatron_lm_plugin.custom_model_provider_function is not None: + model_provider_func_ = accelerator.state.megatron_lm_plugin.custom_model_provider_function + (model, optimizer, scheduler) = setup_model_and_optimizer( + model_provider_func_, + model_type, + no_wd_decay_cond=args.no_wd_decay_cond, + scale_lr_cond=args.scale_lr_cond, + lr_mult=args.lr_mult, + ) + args.model_len = len(model) + return model, optimizer, scheduler + + +# dataloader utilities +class MegatronLMDummyDataLoader: + """ + Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training + + Args: + **dataset_kwargs: Megatron data arguments. + """ + + def __init__(self, **dataset_kwargs): + parser = argparse.ArgumentParser() + parser = _add_data_args(parser) + parser = _add_validation_args(parser) + data_args = parser.parse_known_args() + self.dataset_args = vars(data_args[0]) + self.dataset_args.update(dataset_kwargs) + self.dataset_args["megatron_dataset_flag"] = True + + def set_megatron_data_args(self): + args = get_args() + for key, value in self.dataset_args.items(): + old_value = getattr(args, key, "") + if old_value != value: + print( + f"WARNING: MegatronLMDummyDataLoader overriding arguments for {key}:{old_value} with {key}:{value}" + ) + setattr(args, key, value) + + def get_train_valid_test_datasets_provider(self, accelerator): + def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + dataset_args = { + "data_prefix": args.data_path if isinstance(args.data_path, (list, tuple)) else [args.data_path], + "splits_string": args.split, + "train_valid_test_num_samples": train_val_test_num_samples, + "seed": args.seed, + } + if args.model_type_name == "bert": + dataset_args.update( + { + "max_seq_length": args.seq_length, + "binary_head": args.bert_binary_head, + } + ) + elif args.model_type_name == "gpt": + dataset_args.update( + { + "max_seq_length": args.seq_length, + } + ) + elif args.model_type_name == "t5": + dataset_args.update( + { + "max_seq_length": args.encoder_seq_length, + "max_seq_length_dec": args.decoder_seq_length, + "dataset_type": "t5", + } + ) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args) + return train_ds, valid_ds, test_ds + + if accelerator.state.megatron_lm_plugin.custom_megatron_datasets_provider_function is not None: + return accelerator.state.megatron_lm_plugin.custom_megatron_datasets_provider_function + try: + args = get_args() + # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source + if args.model_type_name == "bert": + from pretrain_bert import train_valid_test_datasets_provider + + train_valid_test_datasets_provider.is_distributed = True + return train_valid_test_datasets_provider + elif args.model_type_name == "gpt": + from pretrain_gpt import train_valid_test_datasets_provider + + train_valid_test_datasets_provider.is_distributed = True + return train_valid_test_datasets_provider + elif args.model_type_name == "t5": + from pretrain_t5 import train_valid_test_datasets_provider + + train_valid_test_datasets_provider.is_distributed = True + return train_valid_test_datasets_provider + except ImportError: + pass + return train_valid_test_datasets_provider + + def build_train_valid_test_data_iterators(self, accelerator): + args = get_args() + + train_valid_test_dataset_provider = self.get_train_valid_test_datasets_provider(accelerator) + if args.virtual_pipeline_model_parallel_size is not None: + train_data_iterator = [] + valid_data_iterator = [] + test_data_iterator = [] + for i in range(getattr(args, "model_len", 0)): + mpu.set_virtual_pipeline_model_parallel_rank(i) + iterators = build_train_valid_test_data_iterators(train_valid_test_dataset_provider) + train_data_iterator.append(iterators[0]) + valid_data_iterator.append(iterators[1]) + test_data_iterator.append(iterators[2]) + else: + train_data_iterator, valid_data_iterator, test_data_iterator = build_train_valid_test_data_iterators( + train_valid_test_dataset_provider + ) + + return train_data_iterator, valid_data_iterator, test_data_iterator + + +def _handle_megatron_data_iterator(accelerator, data_iterator): + class DummyMegatronDataloader: + def __iter__(self): + return self + + def __next__(self): + return {} + + is_data_iterator_empty = data_iterator is None + is_src_data_iterator_empty = torch.tensor(is_data_iterator_empty, dtype=torch.bool, device=accelerator.device) + torch.distributed.broadcast( + is_src_data_iterator_empty, get_tensor_model_parallel_src_rank(), group=get_tensor_model_parallel_group() + ) + if not is_src_data_iterator_empty and is_data_iterator_empty: + return DummyMegatronDataloader() + return data_iterator + + +def prepare_data_loader(accelerator, dataloader): + accelerator.print("Preparing dataloader") + args = get_args() + if not args.megatron_dataset_flag: + from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader + + micro_batch_size = args.micro_batch_size * args.num_micro_batches + kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS} + if kwargs["batch_size"] is None: + if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler): + kwargs["sampler"].batch_size = micro_batch_size + else: + del kwargs["sampler"] + del kwargs["shuffle"] + del kwargs["batch_size"] + kwargs["batch_sampler"].batch_size = micro_batch_size + else: + del kwargs["batch_sampler"] + kwargs["batch_size"] = micro_batch_size + + dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs) + # split_batches: + # Megatron only needs to fetch different data between different dp groups, + # and does not need to split the data within the dp group. + return prepare_data_loader( + dataloader, + accelerator.device, + num_processes=mpu.get_data_parallel_world_size(), + process_index=mpu.get_data_parallel_rank(), + split_batches=False, + put_on_device=True, + rng_types=accelerator.rng_types.copy(), + dispatch_batches=accelerator.dispatch_batches, + ) + else: + if args.consumed_samples is not None: + ( + args.consumed_train_samples, + args.consumed_valid_samples, + args.consumed_test_samples, + ) = args.consumed_samples + else: + args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0 + args.micro_batch_size = args.micro_batch_size * args.num_micro_batches + # In order to be compatible with data in transform format, + # it needs to increase the size of mbs first, + # and then split the large batch data into some mbs. + ( + train_data_iterator, + valid_data_iterator, + test_data_iterator, + ) = dataloader.build_train_valid_test_data_iterators(accelerator) + args.micro_batch_size = args.micro_batch_size // args.num_micro_batches + + train_data_iterator = _handle_megatron_data_iterator( + accelerator=accelerator, data_iterator=train_data_iterator + ) + valid_data_iterator = _handle_megatron_data_iterator( + accelerator=accelerator, data_iterator=valid_data_iterator + ) + test_data_iterator = _handle_megatron_data_iterator(accelerator=accelerator, data_iterator=test_data_iterator) + + return train_data_iterator, valid_data_iterator, test_data_iterator + + +# optimizer utilities +class MegatronLMOptimizerWrapper(AcceleratedOptimizer): + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + + def zero_grad(self, set_to_none=None): + pass # `model(**batch)` is doing that automatically. Therefore, its implementation is not needed + + def step(self): + pass # `model(**batch)` is doing that automatically. Therefore, its implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + return self.optimizer.skipped_iter + + +def prepare_optimizer(accelerator, model): + accelerator.print("Preparing optimizer") + args = get_args() + return get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult) + + +# scheduler utilities +class MegatronLMDummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int): + Total number of steps. + warmup_num_steps (int): + Number of steps for warmup. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.kwargs = kwargs + + +class MegatronLMSchedulerWrapper(AcceleratedScheduler): + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self, *args, **kwargs): + return # `model(**batch)` is doing that automatically. Therefore, its implementation is not needed + + +def prepare_scheduler(accelerator, optimizer, scheduler): + accelerator.print("Preparing scheduler") + scheduler = get_optimizer_param_scheduler(optimizer) + return scheduler + + +class AbstractTrainStep(ABC): + """Abstract class for batching, forward pass and loss handler.""" + + def __init__(self, name): + super().__init__() + self.name = name + + def get_batch_func(self, accelerator, megatron_dataset_flag): + pass + + def get_forward_step_func(self): + pass + + def get_loss_func(self, accelerator): + pass + + +class BertTrainStep(AbstractTrainStep): + """ + Bert train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, accelerator, args): + super().__init__("BertTrainStep") + self.get_batch = self.get_batch_func(accelerator, args.megatron_dataset_flag) + self.loss_func = self.get_loss_func(accelerator, args.pretraining_flag, args.num_labels) + self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head) + if not args.model_return_dict: + self.model_output_class = None + else: + from transformers.modeling_outputs import SequenceClassifierOutput + + self.model_output_class = SequenceClassifierOutput + + def get_batch_func(self, accelerator, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Build the batch.""" + + # Items and their type. + keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + tokens = data_b["text"].long() + types = data_b["types"].long() + sentence_order = data_b["is_random"].long() + loss_mask = data_b["loss_mask"].float() + lm_labels = data_b["labels"].long() + padding_mask = data_b["padding_mask"].long() + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + def get_batch_transformer(data_iterator): + """Build the batch.""" + data = next(data_iterator) + data = send_to_device(data, torch.cuda.current_device()) + + # Unpack. + tokens = data["input_ids"].long() + padding_mask = data["attention_mask"].long() + if "token_type_ids" in data: + types = data["token_type_ids"].long() + else: + types = None + if "labels" in data: + lm_labels = data["labels"].long() + loss_mask = (data["labels"] != -100).to(torch.float) + else: + lm_labels = None + loss_mask = None + if "next_sentence_label" in data: + sentence_order = data["next_sentence_label"].long() + else: + sentence_order = None + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + if accelerator.state.megatron_lm_plugin.custom_get_batch_function is not None: + return accelerator.state.megatron_lm_plugin.custom_get_batch_function + if megatron_dataset_flag: + try: + # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source + from pretrain_bert import get_batch + + return get_batch + except ImportError: + pass + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self, accelerator, pretraining_flag, num_labels): + def loss_func_pretrain(loss_mask, sentence_order, output_tensor): + lm_loss_, sop_logits = output_tensor + + lm_loss_ = lm_loss_.float() + loss_mask = loss_mask.float() + lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + if sop_logits is not None: + sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1) + sop_loss = sop_loss.float() + loss = lm_loss + sop_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss]) + return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]} + + else: + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + return loss, {"lm loss": averaged_losses[0]} + + def loss_func_finetune(labels, logits): + if num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)): + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, num_labels), labels.view(-1)) + else: + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + averaged_losses = average_losses_across_data_parallel_group([loss]) + return loss, {"loss": averaged_losses[0]} + + if accelerator.state.megatron_lm_plugin.custom_loss_function is not None: + return accelerator.state.megatron_lm_plugin.custom_loss_function + if pretraining_flag: + return loss_func_pretrain + else: + return loss_func_finetune + + def get_forward_step_func(self, pretraining_flag, bert_binary_head): + def forward_step(data_iterator, model): + """Forward step.""" + tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator) + if not bert_binary_head: + types = None + # Forward pass through the model. + if pretraining_flag: + output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels) + return output_tensor, partial(self.loss_func, loss_mask, sentence_order) + else: + logits = model(tokens, padding_mask, tokentype_ids=types) + return logits, partial(self.loss_func, labels) + + return forward_step + + +class GPTTrainStep(AbstractTrainStep): + """ + GPT train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, accelerator, args): + super().__init__("GPTTrainStep") + self.get_batch = self.get_batch_func(accelerator, args.megatron_dataset_flag) + self.loss_func = self.get_loss_func(accelerator) + self.forward_step = self.get_forward_step_func() + self.eod_token = args.padded_vocab_size - 1 + if args.vocab_file is not None: + tokenizer = get_tokenizer() + self.eod_token = tokenizer.eod + self.reset_position_ids = args.reset_position_ids + self.reset_attention_mask = args.reset_attention_mask + self.eod_mask_loss = args.eod_mask_loss + if not args.model_return_dict: + self.model_output_class = None + else: + from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions + + self.model_output_class = CausalLMOutputWithCrossAttentions + + def get_batch_func(self, accelerator, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Generate a batch""" + # Items and their type. + keys = ["text"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_ = data_b["text"].long() + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and position ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss + ) + + return tokens, labels, loss_mask, attention_mask, position_ids + + def get_batch_transformer(data_iterator): + data = next(data_iterator) + data = {"input_ids": data["input_ids"]} + data = send_to_device(data, torch.cuda.current_device()) + + tokens_ = data["input_ids"].long() + padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token + tokens_ = torch.concat([tokens_, padding], dim=1) + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + # Get the masks and position ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True + ) + return tokens, labels, loss_mask, attention_mask, position_ids + + if accelerator.state.megatron_lm_plugin.custom_get_batch_function is not None: + return accelerator.state.megatron_lm_plugin.custom_get_batch_function + if megatron_dataset_flag: + try: + # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source + from pretrain_gpt import get_batch + + return get_batch + except ImportError: + pass + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self, accelerator): + args = get_args() + + def loss_func(loss_mask, output_tensor): + if args.return_logits: + losses, logits = output_tensor + else: + losses = output_tensor + losses = losses.float() + loss_mask = loss_mask.view(-1).float() + if args.context_parallel_size > 1: + loss = torch.cat([torch.sum(losses.view(-1) * loss_mask).view(1), loss_mask.sum().view(1)]) + torch.distributed.all_reduce(loss, group=mpu.get_context_parallel_group()) + loss = loss[0] / loss[1] + else: + loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() + + # Check individual rank losses are not NaN prior to DP all-reduce. + if args.check_for_nan_in_loss_and_grad: + global_rank = torch.distributed.get_rank() + assert not loss.isnan(), ( + f"Rank {global_rank}: found NaN in local forward loss calculation. " + f"Device: {torch.cuda.current_device()}, node: {os.uname()[1]}" + ) + + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss]) + + output_dict = {"lm loss": averaged_loss[0]} + if args.return_logits: + output_dict.update({"logits": logits}) + return loss, output_dict + + if accelerator.state.megatron_lm_plugin.custom_loss_function is not None: + return accelerator.state.megatron_lm_plugin.custom_loss_function + return loss_func + + def get_forward_step_func(self): + def forward_step(data_iterator, model): + """Forward step.""" + # Get the batch. + tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) + output_tensor = model(tokens, position_ids, attention_mask, labels=labels) + + return output_tensor, partial(self.loss_func, loss_mask) + + return forward_step + + +class T5TrainStep(AbstractTrainStep): + """ + T5 train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, accelerator, args): + super().__init__("T5TrainStep") + self.get_batch = self.get_batch_func(accelerator, args.megatron_dataset_flag) + self.loss_func = self.get_loss_func(accelerator) + self.forward_step = self.get_forward_step_func() + if not args.model_return_dict: + self.model_output_class = None + else: + from transformers.modeling_outputs import Seq2SeqLMOutput + + self.model_output_class = Seq2SeqLMOutput + + @staticmethod + def attn_mask_postprocess(attention_mask): + # We create a 3D attention mask from a 2D tensor mask. + # [b, 1, s] + attention_mask_b1s = attention_mask.unsqueeze(1) + # [b, s, 1] + attention_mask_bs1 = attention_mask.unsqueeze(2) + # [b, s, s] + attention_mask_bss = attention_mask_b1s * attention_mask_bs1 + # Convert attention mask to binary: + extended_attention_mask = attention_mask_bss < 0.5 + return extended_attention_mask + + @staticmethod + def get_decoder_mask(seq_length, device): + attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device)) + attention_mask = attention_mask < 0.5 + return attention_mask + + @staticmethod + def get_enc_dec_mask(attention_mask, dec_seq_length, device): + batch_size, _ = attention_mask.shape + # We create a 3D attention mask from a 2D tensor mask. + # [b, 1, s] + attention_mask_b1s = attention_mask.unsqueeze(1) + # [b, s, 1] + attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device) + attention_mask_bss = attention_mask_bs1 * attention_mask_b1s + extended_attention_mask = attention_mask_bss < 0.5 + return extended_attention_mask + + def get_batch_func(self, accelerator, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Build the batch.""" + + keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_enc = data_b["text_enc"].long() + tokens_dec = data_b["text_dec"].long() + labels = data_b["labels"].long() + loss_mask = data_b["loss_mask"].float() + + enc_mask = data_b["enc_mask"] < 0.5 + dec_mask = data_b["dec_mask"] < 0.5 + enc_dec_mask = data_b["enc_dec_mask"] < 0.5 + + return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask + + def get_batch_transformer(data_iterator): + """Build the batch.""" + data = next(data_iterator) + data = send_to_device(data, torch.cuda.current_device()) + + tokens_enc = data["input_ids"].long() + labels = data["labels"].long() + loss_mask = (labels != -100).to(torch.float) + if "decoder_input_ids" in data: + tokens_dec = data["decoder_input_ids"].long() + else: + tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long) + tokens_dec[..., 1:] = labels[..., :-1].clone() + tokens_dec[..., 0] = 0 + tokens_dec.masked_fill_(tokens_dec == -100, 0) + enc_mask = T5TrainStep.attn_mask_postprocess(data["attention_mask"].long()) + dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device) + enc_dec_mask = T5TrainStep.get_enc_dec_mask( + data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device + ) + + return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask + + if accelerator.state.megatron_lm_plugin.custom_get_batch_function is not None: + return accelerator.state.megatron_lm_plugin.custom_get_batch_function + if megatron_dataset_flag: + try: + # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source + from pretrain_t5 import get_batch + + return get_batch + except ImportError: + pass + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self, accelerator): + def loss_func(loss_mask, output_tensor): + lm_loss_ = output_tensor.float() + lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + + return loss, {"lm loss": averaged_losses[0]} + + if accelerator.state.megatron_lm_plugin.custom_loss_function is not None: + return accelerator.state.megatron_lm_plugin.custom_loss_function + return loss_func + + def get_forward_step_func(self): + def forward_step(data_iterator, model): + """Forward step.""" + # Get the batch. + tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch( + data_iterator + ) + # Forward model lm_labels + output_tensor = model( + tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels + ) + + return output_tensor, partial(self.loss_func, loss_mask) + + return forward_step + + +def finish_mpu_init(): + # torch.distributed initialization + args = get_args() + # Pytorch distributed. + _initialize_distributed() + + # Random seeds for reproducibility. + if args.rank == 0: + print(f"> setting random seeds to {args.seed} ...") + _set_random_seed(args.seed, args.data_parallel_random_init) + + +# intialize megatron setup +def initialize(accelerator, extra_args_provider=None, args_defaults={}): + accelerator.print("Initializing Megatron-LM") + assert torch.cuda.is_available(), "Megatron requires CUDA." + + # Parse arguments + args = parse_args(extra_args_provider, ignore_unknown_args=True) + + # Set defaults + for key, value in args_defaults.items(): + if getattr(args, key, None) is not None: + if args.rank == 0: + print( + f"WARNING: overriding default arguments for {key}:{getattr(args, key)} with {key}:{value}", + flush=True, + ) + setattr(args, key, value) + + if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False): + assert args.load is not None, "--use-checkpoints-args requires --load argument" + load_args_from_checkpoint(args) + + validate_args(args) + + # set global args, build tokenizer, and set adlr-autoresume, + # tensorboard-writer, and timers. + set_global_variables(args) + + # Megatron's MPU is the master. Complete initialization right away. + finish_mpu_init() + + # Autoresume. + _init_autoresume() + + # Compile dependencies. + _compile_dependencies() + + # Set pytorch JIT layer fusion options and warmup JIT functions. + set_jit_fusion_options() + args = get_args() + if getattr(args, "padded_vocab_size", None) is None: + args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args) + if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2: + args.bert_binary_head = True + else: + args.bert_binary_head = False + args.iteration = 0 + + +class MegatronEngine(torch.nn.Module): + """ + Megatron-LM model wrapper + + Args: + accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use. + model: Megatron-LM model + optimizer: Megatron-LM optimizer + lr_scheduler: Megatron-LM lr scheduler + """ + + def __init__(self, accelerator, model, optimizer, scheduler): + super().__init__() + self.module = model + self.base_model = model[0] + self.optimizer = optimizer + self.scheduler = scheduler + args = get_args() + if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None: + self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class( + args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs + ) + elif args.model_type_name == "bert": + self.train_step_handler = BertTrainStep(accelerator, args) + elif args.model_type_name == "gpt": + self.train_step_handler = GPTTrainStep(accelerator, args) + elif args.model_type_name == "t5": + self.train_step_handler = T5TrainStep(accelerator, args) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + self.optimizer.skipped_iter = False + + # Tracking loss. + self.total_loss_dict = {} + self.eval_total_loss_dict = {} + self.iteration = 0 + self.report_memory_flag = True + self.num_floating_point_operations_so_far = 0 + self.module_config = None + if args.tensorboard_dir is not None: + write_args_to_tensorboard() + + def get_module_config(self): + args = get_args() + config = get_model_config(self.module[0]) + # Setup some training config params + config.grad_scale_func = self.optimizer.scale_loss + if isinstance(self.module[0], LocalDDP) and args.overlap_grad_reduce: + assert config.no_sync_func is None, ( + "When overlap_grad_reduce is True, config.no_sync_func must be None; " + "a custom no_sync_func is not supported when overlapping grad-reduce" + ) + config.no_sync_func = [model_chunk.no_sync for model_chunk in self.module] + if len(self.module) == 1: + config.no_sync_func = config.no_sync_func[0] + if args.delay_grad_reduce: + config.grad_sync_func = [model_chunk.start_grad_sync for model_chunk in self.module] + if len(self.module) == 1: + config.grad_sync_func = config.grad_sync_func[0] + if args.overlap_param_gather and args.delay_param_gather: + config.param_sync_func = [ + lambda x: self.optimizer.finish_param_sync(model_index, x) for model_index in range(len(self.module)) + ] + if len(self.module) == 1: + config.param_sync_func = config.param_sync_func[0] + config.finalize_model_grads_func = finalize_model_grads + return config + + def train(self): + for model_module in self.module: + model_module.train() + + if self.module_config is None: + self.module_config = self.get_module_config() + + self.log_eval_results() + + def eval(self): + for model_module in self.module: + model_module.eval() + + if self.module_config is None: + self.module_config = self.get_module_config() + + def get_batch_data_iterator(self, batch_data): + args = get_args() + data_chunks = [] + if len(batch_data) > 0: + if args.num_micro_batches > 1: + for i in range(0, args.num_micro_batches): + data_chunks.append( + { + k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] + for k, v in batch_data.items() + } + ) + else: + data_chunks = [batch_data] + + if len(self.module) > 1: + batch_data_iterator = ( + [iter(data_chunks) for _ in range(len(self.module))] + if len(batch_data) > 0 + else [None] * len(self.module) + ) + else: + batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None + return batch_data_iterator + + def train_step(self, **batch_data): + """ + Training step for Megatron-LM + + Args: + batch_data (:obj:`dict`): The batch data to train on. + """ + + batch_data_iterator = self.get_batch_data_iterator(batch_data) + + loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad = train_step( + forward_step_func=self.train_step_handler.forward_step, + data_iterator=batch_data_iterator, + model=self.module, + optimizer=self.optimizer, + opt_param_scheduler=self.scheduler, + config=self.module_config, + ) + + self.optimizer.skipped_iter = skipped_iter == 1 + + return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad + + def eval_step(self, **batch_data): + """ + Evaluation step for Megatron-LM + + Args: + batch_data (:obj:`dict`): The batch data to evaluate on. + """ + + args = get_args() + batch_data_iterator = self.get_batch_data_iterator(batch_data) + forward_backward_func = get_forward_backward_func() + loss_dicts = forward_backward_func( + forward_step_func=self.train_step_handler.forward_step, + data_iterator=batch_data_iterator, + model=self.module, + num_microbatches=get_num_microbatches(), + seq_length=args.seq_length, + micro_batch_size=args.micro_batch_size, + forward_only=True, + ) + # Empty unused memory + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + args.consumed_valid_samples += ( + mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() + ) + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Average loss across microbatches. + loss_reduced = {} + for key in loss_dicts[0]: + losses_reduced_for_key = [x[key] for x in loss_dicts] + if len(losses_reduced_for_key[0].shape) == 0: + loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) + else: + loss_reduced[key] = torch.concat(losses_reduced_for_key) + return loss_reduced + return {} + + def forward(self, **batch_data): + # During training, we use train_step() + # model(**batch_data) performs following operations by delegating it to `self.train_step`: + # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism + # 2. Set grad to zero. + # 3. forward pass and backward pass using Pipeline Parallelism + # 4. Empty unused memory. + # 5. Reduce gradients. + # 6. Update parameters. + # 7. Gather params when using Distributed Optimizer (Data Parallelism). + # 8. Update learning rate if scheduler is specified. + # 9. Empty unused memory. + # 10. Average loss across microbatches and across DP ranks. + # + # During evaluation, we use eval_step() + args = get_args() + if self.module[0].training: + loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data) + self.iteration += 1 + batch_size = mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() + args.consumed_train_samples += batch_size + self.num_floating_point_operations_so_far += num_floating_point_operations(args, batch_size) + if args.tensorboard_dir is not None: + # Logging. + loss_scale = self.optimizer.get_loss_scale().item() + params_norm = None + if args.log_params_norm: + params_norm = calc_params_l2_norm(self.model) + self.report_memory_flag = training_log( + loss_dict, + self.total_loss_dict, + self.optimizer.param_groups[0]["lr"], + self.iteration, + loss_scale, + self.report_memory_flag, + skipped_iter, + grad_norm, + params_norm, + num_zeros_in_grad, + ) + else: + loss_dict = self.eval_step(**batch_data) + if args.tensorboard_dir is not None: + for key in loss_dict: + self.eval_total_loss_dict[key] = ( + self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] + ) + self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get( + key + "_num_iters", torch.cuda.FloatTensor([0.0]) + ) + torch.cuda.FloatTensor([1.0]) + + loss = torch.tensor(0.0, device=torch.cuda.current_device()) + for key in loss_dict: + if len(loss_dict[key].shape) == 0: + loss += loss_dict[key] + + logits = None + if "logits" in loss_dict: + logits = loss_dict["logits"] + if self.train_step_handler.model_output_class is not None: + return self.train_step_handler.model_output_class(loss=loss, logits=logits) + return loss + + def log_eval_results(self): + args = get_args() + if args.tensorboard_dir is None or self.iteration == 0: + return + args = get_args() + writer = get_tensorboard_writer() + string = f"validation loss at iteration {self.iteration} | " + for key in self.eval_total_loss_dict: + if key.endswith("_num_iters"): + continue + value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + "_num_iters"] + string += f"{key} value: {value} | " + ppl = math.exp(min(20, value.item())) + if args.pretraining_flag: + string += f"{key} PPL: {ppl} | " + if writer: + writer.add_scalar(f"{key} validation", value.item(), self.iteration) + if args.pretraining_flag: + writer.add_scalar(f"{key} validation ppl", ppl, self.iteration) + + length = len(string) + 1 + print_rank_last("-" * length) + print_rank_last(string) + print_rank_last("-" * length) + self.eval_total_loss_dict = {} + + def save_checkpoint(self, output_dir): + self.log_eval_results() + args = get_args() + args.save = output_dir + torch.distributed.barrier() + save_checkpoint( + self.iteration, + self.module, + self.optimizer, + self.scheduler, + num_floating_point_operations_so_far=self.num_floating_point_operations_so_far, + ) + torch.distributed.barrier() + + def load_checkpoint(self, input_dir): + args = get_args() + args.load = input_dir + args.consumed_train_samples = 0 + args.consumed_valid_samples = 0 + torch.distributed.barrier() + iteration, num_floating_point_operations_so_far = load_checkpoint(self.module, self.optimizer, self.scheduler) + torch.distributed.barrier() + self.iteration = iteration + self.num_floating_point_operations_so_far = num_floating_point_operations_so_far + if args.fp16 and self.iteration == 0: + self.optimizer.reload_model_params() + + def megatron_generate( + self, + inputs, + attention_mask=None, + max_length=None, + max_new_tokens=None, + num_beams=None, + temperature=None, + top_k=None, + top_p=None, + length_penalty=None, + **kwargs, + ): + """ + Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along + with sampling. Refer the Megatron-LM repo for more details + + Args: + inputs (torch.Tensor): input ids + attention_mask (torch.Tensor, optional): attention mask. Defaults to None. + max_length (int, optional): max length of the generated sequence. Defaults to None. + Either this or max_new_tokens should be provided. + max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None. + Either this or max_length should be provided. + num_beams (int, optional): number of beams to use for beam search. Defaults to None. + temperature (float, optional): temperature for sampling. Defaults to 1.0. + top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0. + top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0. + length_penalty (float, optional): length penalty for beam search. Defaults to None. + kwargs: additional key-value arguments + """ + + # checking if required arguments are passed + args = get_args() + if args.model_type_name != "gpt": + raise NotImplementedError("Generate method is not implemented for this model") + + if args.data_parallel_size > 1: + raise ValueError("Generate method requires data parallelism to be 1") + + if args.sequence_parallel: + raise ValueError("Generate method requires sequence parallelism to be False") + + if args.recompute_granularity is not None: + raise ValueError("Checkpoint activations cannot be set for inference") + + if args.vocab_file is None: + raise ValueError("Vocab file is required for inference") + + # Prepare inputs + if max_length is None and max_new_tokens is None: + raise ValueError("`max_length` or `max_new_tokens` are required for inference") + + if temperature is None: + temperature = 1.0 + elif not (0.0 < temperature <= 100.0): + raise ValueError("temperature must be a positive number less than or equal to 100.0") + + if top_k is None: + top_k = 0 + elif not (0 <= top_k <= 1000): + raise ValueError("top_k must be a positive number less than or equal to 1000") + + if top_p is None: + top_p = 0.0 + elif top_p > 0.0 and top_k > 0.0: + raise ValueError("top_p and top_k sampling cannot be set together") + else: + if not (0.0 <= top_p <= 1.0): + raise ValueError("top_p must be less than or equal to 1.0") + + top_p_decay = kwargs.get("top_p_decay", 0.0) + if not (0.0 <= top_p_decay <= 1.0): + raise ValueError("top_p_decay must be less than or equal to 1.0") + + top_p_bound = kwargs.get("top_p_bound", 0.0) + if not (0.0 <= top_p_bound <= 1.0): + raise ValueError("top_p_bound must be less than or equal to 1.0") + + add_BOS = kwargs.get("add_BOS", False) + if not (isinstance(add_BOS, bool)): + raise ValueError("add_BOS must be a boolean") + + beam_width = num_beams + if beam_width is not None: + if not isinstance(beam_width, int): + raise ValueError("beam_width must be an integer") + if beam_width < 1: + raise ValueError("beam_width must be greater than 0") + if inputs.shape[0] > 1: + return "When doing beam_search, batch size must be 1" + + tokenizer = get_tokenizer() + + stop_token = kwargs.get("stop_token", tokenizer.eod) + if stop_token is not None: + if not isinstance(stop_token, int): + raise ValueError("stop_token must be an integer") + + if length_penalty is None: + length_penalty = 1.0 + + sizes_list = None + prompts_tokens_tensor = None + prompts_length_tensor = None + if torch.distributed.get_rank() == 0: + # Get the prompts length. + if attention_mask is None: + prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0]) + else: + prompts_length_tensor = attention_mask.sum(axis=-1).cuda() + + if max_new_tokens is None: + max_new_tokens = max_length - inputs.shape[1] + if max_new_tokens <= 0: + raise ValueError("max_new_tokens must be greater than 0") + + if add_BOS: + max_length = max_new_tokens + inputs.shape[1] + 1 + # making sure that `max_length` is a multiple of 4 to leverage fused kernels + max_length = 4 * math.ceil(max_length / 4) + max_new_tokens = max_length - (inputs.shape[1] + 1) + padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) + prompts_tokens_tensor = torch.concat( + [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1 + ) + else: + # making sure that `max_length` is a multiple of 4 to leverage fused kernels + max_length = max_new_tokens + inputs.shape[1] + max_length = 4 * math.ceil(max_length / 4) + max_new_tokens = max_length - inputs.shape[1] + padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) + prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1) + + # We need the sizes of these tensors for the boradcast + sizes_list = [ + prompts_tokens_tensor.size(0), # Batch size + prompts_tokens_tensor.size(1), + ] # Sequence length + + # First, broadcast the sizes. + sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0) + + # Now that we have the sizes, we can boradcast the tokens + # and length tensors. + sizes = sizes_tensor.tolist() + context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0) + context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0) + + # Run the inference + random_seed = kwargs.get("random_seed", 0) + torch.random.manual_seed(random_seed) + unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module)) + if beam_width is not None: + tokens, _ = beam_search_and_return_on_first_stage( + unwrapped_model, + context_tokens_tensor, + context_length_tensor, + beam_width, + stop_token=stop_token, + num_return_gen=1, + length_penalty=length_penalty, + ) + else: + tokens, _, _ = generate_tokens_probs_and_return_on_first_stage( + unwrapped_model, + context_tokens_tensor, + context_length_tensor, + return_output_log_probs=False, + top_k=top_k, + top_p=top_p, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + use_eod_token_for_early_termination=True, + ) + return tokens + + +# other utilities +def avg_losses_across_data_parallel_group(losses): + """ + Average losses across data parallel group. + + Args: + losses (List[Tensor]): List of losses to average across data parallel group. + """ + + return average_losses_across_data_parallel_group(losses) + + +def gather_across_data_parallel_groups(tensor): + """ + Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather across data parallel ranks. + + """ + + def _gpu_gather_one(tensor): + if tensor.ndim == 0: + tensor = tensor.clone()[None] + output_tensors = [ + torch.empty_like(tensor) + for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group())) + ] + torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group()) + return torch.cat(output_tensors, dim=0) + + return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/memory.py b/venv/lib/python3.10/site-packages/accelerate/utils/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..e25649168ab85f189c9b115898611840858389fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/memory.py @@ -0,0 +1,207 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A collection of utilities for ensuring that training can always occur. Heavily influenced by the +[toma](https://github.com/BlackHC/toma) library. +""" + +import functools +import gc +import importlib +import inspect +import warnings + +import torch +from packaging import version + +from .imports import ( + is_cuda_available, + is_hpu_available, + is_ipex_available, + is_mlu_available, + is_mps_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_xpu_available, +) +from .versions import compare_versions + + +def clear_device_cache(garbage_collection=False): + """ + Clears the device cache by calling `torch.{backend}.empty_cache`. Can also run `gc.collect()`, but do note that + this is a *considerable* slowdown and should be used sparingly. + """ + if garbage_collection: + gc.collect() + + if is_xpu_available(): + torch.xpu.empty_cache() + elif is_mlu_available(): + torch.mlu.empty_cache() + elif is_sdaa_available(): + torch.sdaa.empty_cache() + elif is_musa_available(): + torch.musa.empty_cache() + elif is_npu_available(): + torch.npu.empty_cache() + elif is_mps_available(min_version="2.0"): + torch.mps.empty_cache() + elif is_cuda_available(): + torch.cuda.empty_cache() + elif is_hpu_available(): + # torch.hpu.empty_cache() # not available on hpu as it reserves all device memory for the current process + pass + + +def release_memory(*objects): + """ + Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. + Returned objects should be reassigned to the same variables. + + Args: + objects (`Iterable`): + An iterable of objects + Returns: + A list of `None` objects to replace `objects` + + Example: + + ```python + >>> import torch + >>> from accelerate.utils import release_memory + + >>> a = torch.ones(1000, 1000).cuda() + >>> b = torch.ones(1000, 1000).cuda() + >>> a, b = release_memory(a, b) + ``` + """ + if not isinstance(objects, list): + objects = list(objects) + for i in range(len(objects)): + objects[i] = None + clear_device_cache(garbage_collection=True) + return objects + + +def should_reduce_batch_size(exception: Exception) -> bool: + """ + Checks if `exception` relates to CUDA out-of-memory, XPU out-of-memory, CUDNN not supported, or CPU out-of-memory + + Args: + exception (`Exception`): + An exception + """ + _statements = [ + " out of memory.", # OOM for CUDA, HIP, XPU + "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU + "DefaultCPUAllocator: can't allocate memory", # CPU OOM + "FATAL ERROR :: MODULE:PT_DEVMEM Allocation failed", # HPU OOM + ] + if isinstance(exception, RuntimeError) and len(exception.args) == 1: + return any(err in exception.args[0] for err in _statements) + return False + + +def find_executable_batch_size( + function: callable = None, starting_batch_size: int = 128, reduce_batch_size_fn: callable = None +): + """ + A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or + CUDNN, the batch size is multiplied by 0.9 and passed to `function` + + `function` must take in a `batch_size` parameter as its first argument. + + Args: + function (`callable`, *optional*): + A function to wrap + starting_batch_size (`int`, *optional*): + The batch size to try and fit into memory + + Example: + + ```python + >>> from accelerate.utils import find_executable_batch_size + + + >>> @find_executable_batch_size(starting_batch_size=128) + ... def train(batch_size, model, optimizer): + ... ... + + + >>> train(model, optimizer) + ``` + """ + if function is None: + return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size) + + batch_size = starting_batch_size + if reduce_batch_size_fn is None: + + def reduce_batch_size_fn(): + nonlocal batch_size + batch_size = int(batch_size * 0.9) + return batch_size + + def decorator(*args, **kwargs): + nonlocal batch_size + clear_device_cache(garbage_collection=True) + params = list(inspect.signature(function).parameters.keys()) + # Guard against user error + if len(params) < (len(args) + 1): + arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])]) + raise TypeError( + f"Batch size was passed into `{function.__name__}` as the first argument when called." + f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" + ) + while True: + if batch_size == 0: + raise RuntimeError("No executable batch size found, reached zero.") + try: + return function(batch_size, *args, **kwargs) + except Exception as e: + if should_reduce_batch_size(e): + clear_device_cache(garbage_collection=True) + batch_size = reduce_batch_size_fn() + else: + raise + + return decorator + + +def get_xpu_available_memory(device_index: int): + if version.parse(torch.__version__).release >= version.parse("2.6").release: + # torch.xpu.mem_get_info API is available starting from PyTorch 2.6 + # It further requires PyTorch built with the SYCL runtime which supports API + # to query available device memory. If not available, exception will be + # raised. Version of SYCL runtime used to build PyTorch is being reported + # with print(torch.version.xpu) and corresponds to the version of Intel DPC++ + # SYCL compiler. First version to support required feature is 20250001. + try: + return torch.xpu.mem_get_info(device_index)[0] + except Exception: + pass + elif is_ipex_available(): + ipex_version = version.parse(importlib.metadata.version("intel_extension_for_pytorch")) + if compare_versions(ipex_version, ">=", "2.5"): + from intel_extension_for_pytorch.xpu import mem_get_info + + return mem_get_info(device_index)[0] + + warnings.warn( + "The XPU `mem_get_info` API is available in IPEX version >=2.5 or PyTorch >=2.6. The current returned available memory is incorrect. Please consider upgrading your IPEX or PyTorch version." + ) + return torch.xpu.max_memory_allocated(device_index) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py b/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..dccac1a8c5add94299a13f33005b0263c7f01ca3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py @@ -0,0 +1,2198 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import gc +import inspect +import json +import logging +import os +import re +import shutil +import tempfile +import warnings +from collections import OrderedDict, defaultdict +from typing import Optional, Union + +import torch +from torch import distributed as dist +from torch import nn + +from ..state import AcceleratorState +from .constants import SAFE_WEIGHTS_NAME, WEIGHTS_NAME +from .dataclasses import AutocastKwargs, CustomDtype, DistributedType +from .imports import ( + is_hpu_available, + is_mlu_available, + is_mps_available, + is_musa_available, + is_npu_available, + is_peft_available, + is_sdaa_available, + is_torch_xla_available, + is_xpu_available, +) +from .memory import clear_device_cache, get_xpu_available_memory +from .offload import load_offloaded_weight, offload_weight, save_offload_index +from .tqdm import is_tqdm_available, tqdm +from .versions import is_torch_version + + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + +if is_mlu_available(check_device=False): + import torch_mlu # noqa: F401 + +if is_sdaa_available(check_device=False): + import torch_sdaa # noqa: F401 + +if is_musa_available(check_device=False): + import torch_musa # noqa: F401 + +from safetensors import safe_open +from safetensors.torch import load_file as safe_load_file + + +WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" + +logger = logging.getLogger(__name__) + + +def is_peft_model(model): + from .other import extract_model_from_parallel + + if is_peft_available(): + from peft import PeftModel + + return is_peft_available() and isinstance(extract_model_from_parallel(model), PeftModel) + + +def check_device_same(first_device, second_device): + """ + Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False` + for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same + + Args: + first_device (`torch.device`): + First device to check + second_device (`torch.device`): + Second device to check + """ + if first_device.type != second_device.type: + return False + + if first_device.type != "cpu" and first_device.index is None: + # In case the first_device is a cuda device and have + # the index attribute set to `None`, default it to `0` + first_device = torch.device(first_device.type, index=0) + + if second_device.type != "cpu" and second_device.index is None: + # In case the second_device is a cuda device and have + # the index attribute set to `None`, default it to `0` + second_device = torch.device(second_device.type, index=0) + + return first_device == second_device + + +def convert_file_size_to_int(size: Union[int, str]): + """ + Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). + + Args: + size (`int` or `str`): The size to convert. Will be directly returned if an `int`. + + Example: + + ```py + >>> convert_file_size_to_int("1MiB") + 1048576 + ``` + """ + mem_size = -1 + err_msg = ( + f"`size` {size} is not in a valid format. Use an integer for bytes, or a string with an unit (like '5.0GB')." + ) + try: + if isinstance(size, int): + mem_size = size + elif size.upper().endswith("GIB"): + mem_size = int(float(size[:-3]) * (2**30)) + elif size.upper().endswith("MIB"): + mem_size = int(float(size[:-3]) * (2**20)) + elif size.upper().endswith("KIB"): + mem_size = int(float(size[:-3]) * (2**10)) + elif size.upper().endswith("GB"): + int_size = int(float(size[:-2]) * (10**9)) + mem_size = int_size // 8 if size.endswith("b") else int_size + elif size.upper().endswith("MB"): + int_size = int(float(size[:-2]) * (10**6)) + mem_size = int_size // 8 if size.endswith("b") else int_size + elif size.upper().endswith("KB"): + int_size = int(float(size[:-2]) * (10**3)) + mem_size = int_size // 8 if size.endswith("b") else int_size + except ValueError: + raise ValueError(err_msg) + + if mem_size < 0: + raise ValueError(err_msg) + return mem_size + + +def dtype_byte_size(dtype: torch.dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + + Example: + + ```py + >>> dtype_byte_size(torch.float32) + 4 + ``` + """ + if dtype == torch.bool: + return 1 / 8 + elif dtype == CustomDtype.INT2: + return 1 / 4 + elif dtype == CustomDtype.INT4: + return 1 / 2 + elif dtype == CustomDtype.FP8: + return 1 + elif is_torch_version(">=", "2.1.0") and dtype in [torch.float8_e4m3fn, torch.float8_e5m2]: + return 1 + bit_search = re.search(r"[^\d](\d+)$", str(dtype)) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def id_tensor_storage(tensor: torch.Tensor) -> tuple[torch.device, int, int]: + """ + Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For + example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is + guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with + non-overlapping lifetimes may have the same id. + """ + _SIZE = { + torch.int64: 8, + torch.float32: 4, + torch.int32: 4, + torch.bfloat16: 2, + torch.float16: 2, + torch.int16: 2, + torch.uint8: 1, + torch.int8: 1, + torch.bool: 1, + torch.float64: 8, + } + try: + storage_ptr = tensor.untyped_storage().data_ptr() + storage_size = tensor.untyped_storage().nbytes() + except Exception: + try: + # Fallback for torch==1.10 + storage_ptr = tensor.storage().data_ptr() + storage_size = tensor.storage().size() * _SIZE[tensor.dtype] + except NotImplementedError: + # Fallback for meta storage + storage_ptr = 0 + # On torch >=2.0 this is the tensor size + storage_size = tensor.nelement() * _SIZE[tensor.dtype] + + return tensor.device, storage_ptr, storage_size + + +def set_module_tensor_to_device( + module: nn.Module, + tensor_name: str, + device: Union[int, str, torch.device], + value: Optional[torch.Tensor] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + fp16_statistics: Optional[torch.HalfTensor] = None, + tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None, + non_blocking: bool = False, + clear_cache: bool = True, +): + """ + A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing + `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). + + Args: + module (`torch.nn.Module`): + The module in which the tensor we want to move lives. + tensor_name (`str`): + The full name of the parameter/buffer. + device (`int`, `str` or `torch.device`): + The device on which to set the tensor. + value (`torch.Tensor`, *optional*): + The value of the tensor (useful when going from the meta device to any other device). + dtype (`torch.dtype`, *optional*): + If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to + the dtype of the existing parameter in the model. + fp16_statistics (`torch.HalfTensor`, *optional*): + The list of fp16 statistics to set on the module, used for 8 bit model serialization. + tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`): + A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given + execution device, this parameter is useful to reuse the first available pointer of a shared weight on the + device for all others, instead of duplicating memory. + non_blocking (`bool`, *optional*, defaults to `False`): + If `True`, the device transfer will be asynchronous with respect to the host, if possible. + clear_cache (`bool`, *optional*, defaults to `True`): + Whether or not to clear the device cache after setting the tensor on the device. + """ + # Recurse if needed + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + + if tensor_name not in module._parameters and tensor_name not in module._buffers: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + is_buffer = tensor_name in module._buffers + old_value = getattr(module, tensor_name) + + # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight + # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer. + if ( + value is not None + and tied_params_map is not None + and value.data_ptr() in tied_params_map + and device in tied_params_map[value.data_ptr()] + ): + module._parameters[tensor_name] = tied_params_map[value.data_ptr()][device] + return + elif ( + tied_params_map is not None + and old_value.data_ptr() in tied_params_map + and device in tied_params_map[old_value.data_ptr()] + ): + module._parameters[tensor_name] = tied_params_map[old_value.data_ptr()][device] + return + + if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") + + param = module._parameters[tensor_name] if tensor_name in module._parameters else None + param_cls = type(param) + + if value is not None: + # We can expect mismatches when using bnb 4bit since Params4bit will reshape and pack the weights. + # In other cases, we want to make sure we're not loading checkpoints that do not match the config. + if old_value.shape != value.shape and param_cls.__name__ != "Params4bit": + raise ValueError( + f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this looks incorrect.' + ) + + if dtype is None: + # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model + value = value.to(old_value.dtype, non_blocking=non_blocking) + elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + value = value.to(dtype, non_blocking=non_blocking) + + device_quantization = None + with torch.no_grad(): + # leave it on cpu first before moving them to cuda + # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0 + if ( + param is not None + and param.device.type not in ("cuda", "xpu") + and torch.device(device).type in ("cuda", "xpu") + and param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"] + ): + device_quantization = device + device = "cpu" + # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). + if isinstance(device, int): + if is_npu_available(): + device = f"npu:{device}" + elif is_mlu_available(): + device = f"mlu:{device}" + elif is_sdaa_available(): + device = f"sdaa:{device}" + elif is_musa_available(): + device = f"musa:{device}" + elif is_hpu_available(): + device = "hpu" + if "xpu" in str(device) and not is_xpu_available(): + raise ValueError(f'{device} is not available, you should use device="cpu" instead') + if value is None: + new_value = old_value.to(device, non_blocking=non_blocking) + if dtype is not None and device in ["meta", torch.device("meta")]: + if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + new_value = new_value.to(dtype, non_blocking=non_blocking) + + if not is_buffer: + module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad) + elif isinstance(value, torch.Tensor): + new_value = value.to(device, non_blocking=non_blocking) + else: + new_value = torch.tensor(value, device=device) + if device_quantization is not None: + device = device_quantization + if is_buffer: + module._buffers[tensor_name] = new_value + elif value is not None or not check_device_same(torch.device(device), module._parameters[tensor_name].device): + param_cls = type(module._parameters[tensor_name]) + kwargs = module._parameters[tensor_name].__dict__ + if param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"]: + if param_cls.__name__ == "Int8Params" and new_value.dtype == torch.float32: + # downcast to fp16 if any - needed for 8bit serialization + new_value = new_value.to(torch.float16, non_blocking=non_blocking) + # quantize module that are going to stay on the cpu so that we offload quantized weights + if device == "cpu" and param_cls.__name__ == "Int8Params": + new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(0).to("cpu") + new_value.CB = new_value.CB.to("cpu") + new_value.SCB = new_value.SCB.to("cpu") + else: + new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to( + device, non_blocking=non_blocking + ) + elif param_cls.__name__ in ["QTensor", "QBitsTensor"]: + new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad).to( + device, non_blocking=non_blocking + ) + elif param_cls.__name__ in ["AffineQuantizedTensor"]: + new_value = new_value.to(device, non_blocking=non_blocking) + else: + new_value = param_cls(new_value, requires_grad=old_value.requires_grad).to( + device, non_blocking=non_blocking + ) + + module._parameters[tensor_name] = new_value + if fp16_statistics is not None: + module._parameters[tensor_name].SCB = fp16_statistics.to(device, non_blocking=non_blocking) + del fp16_statistics + # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight + if ( + module.__class__.__name__ == "Linear8bitLt" + and getattr(module.weight, "SCB", None) is None + and str(module.weight.device) != "meta" + ): + # quantize only if necessary + device_index = torch.device(device).index if torch.device(device).type == "cuda" else None + if not getattr(module.weight, "SCB", None) and device_index is not None: + if module.bias is not None and module.bias.device.type != "meta": + # if a bias exists, we need to wait until the bias is set on the correct device + module = module.cuda(device_index) + elif module.bias is None: + # if no bias exists, we can quantize right away + module = module.cuda(device_index) + elif ( + module.__class__.__name__ == "Linear4bit" + and getattr(module.weight, "quant_state", None) is None + and str(module.weight.device) != "meta" + ): + # quantize only if necessary + device_index = torch.device(device).index if torch.device(device).type == "cuda" else None + if not getattr(module.weight, "quant_state", None) and device_index is not None: + module.weight = module.weight.cuda(device_index) + + # clean pre and post forward hook + if clear_cache and device != "cpu": + clear_device_cache() + + # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in + # order to avoid duplicating memory, see above. + if ( + tied_params_map is not None + and old_value.data_ptr() in tied_params_map + and device not in tied_params_map[old_value.data_ptr()] + ): + tied_params_map[old_value.data_ptr()][device] = new_value + elif ( + value is not None + and tied_params_map is not None + and value.data_ptr() in tied_params_map + and device not in tied_params_map[value.data_ptr()] + ): + tied_params_map[value.data_ptr()][device] = new_value + + +def named_module_tensors( + module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False +): + """ + A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True` + it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`. + + Args: + module (`torch.nn.Module`): + The module we want the tensors on. + include_buffer (`bool`, *optional*, defaults to `True`): + Whether or not to include the buffers in the result. + recurse (`bool`, *optional`, defaults to `False`): + Whether or not to go look in every submodule or just return the direct parameters and buffers. + remove_non_persistent (`bool`, *optional*, defaults to `False`): + Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers = + True + """ + yield from module.named_parameters(recurse=recurse) + + if include_buffers: + non_persistent_buffers = set() + if remove_non_persistent: + non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse) + for named_buffer in module.named_buffers(recurse=recurse): + name, _ = named_buffer + if name not in non_persistent_buffers: + yield named_buffer + + +def get_non_persistent_buffers(module: nn.Module, recurse: bool = False, fqns: bool = False): + """ + Gather all non persistent buffers of a given modules into a set + + Args: + module (`nn.Module`): + The module we want the non persistent buffers on. + recurse (`bool`, *optional*, defaults to `False`): + Whether or not to go look in every submodule or just return the direct non persistent buffers. + fqns (`bool`, *optional*, defaults to `False`): + Whether or not to return the fully-qualified names of the non persistent buffers. + """ + + non_persistent_buffers_set = module._non_persistent_buffers_set + if recurse: + for n, m in module.named_modules(): + if fqns: + non_persistent_buffers_set |= {n + "." + b for b in m._non_persistent_buffers_set} + else: + non_persistent_buffers_set |= m._non_persistent_buffers_set + + return non_persistent_buffers_set + + +class FindTiedParametersResult(list): + """ + This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not + a list or on the `values` method as in the future this will be removed. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def values(self): + warnings.warn( + "The 'values' method of FindTiedParametersResult is deprecated and will be removed in Accelerate v1.3.0. ", + FutureWarning, + ) + return sum([x[1:] for x in self], []) + + +def check_tied_parameters_in_config(model: nn.Module): + """ + Check if there is any indication in the given model that some weights should be tied. + + Args: + model (`torch.nn.Module`): The model to inspect + + Returns: + bool: True if the model needs to have tied weights + """ + + # based on model.tie_weights() method + has_tied_word_embedding = False + has_tied_encoder_decoder = False + has_tied_module = False + + if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]: + has_tied_word_embedding = False + model_decoder_config = None + if hasattr(model, "config"): + model_decoder_config = ( + model.config.get_text_config(decoder=True) + if hasattr(model.config, "get_text_config") + else model.config + ) + has_tied_word_embedding = ( + model_decoder_config is not None + and getattr(model_decoder_config, "tie_word_embeddings", False) + and model.get_output_embeddings() + ) + + has_tied_encoder_decoder = ( + hasattr(model, "config") + and getattr(model.config, "is_encoder_decoder", False) + and getattr(model.config, "tie_encoder_decoder", False) + ) + has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules()) + return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module]) + + +def _get_param_device(param, device_map): + if param in device_map: + return device_map[param] + parent_param = ".".join(param.split(".")[:-1]) + if parent_param == param: + raise ValueError(f"The `device_map` does not contain the module {param}.") + else: + return _get_param_device(parent_param, device_map) + + +def check_tied_parameters_on_same_device(tied_params, device_map): + """ + Check if tied parameters are on the same device + + Args: + tied_params (`List[List[str]]`): + A list of lists of parameter names being all tied together. + + device_map (`Dict[str, Union[int, str, torch.device]]`): + A map that specifies where each submodule should go. + + """ + for tie_param in tied_params: + tie_param_devices = {} + for param in tie_param: + tie_param_devices[param] = _get_param_device(param, device_map) + if len(set(tie_param_devices.values())) > 1: + logger.warning( + f"Tied parameters are on different devices: {tie_param_devices}. " + "Please modify your custom device map or set `device_map='auto'`. " + ) + + +def find_tied_parameters(model: torch.nn.Module, **kwargs): + """ + Find the tied parameters in a given model. + + + + The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore + them. + + + + Args: + model (`torch.nn.Module`): The model to inspect. + + Returns: + List[List[str]]: A list of lists of parameter names being all tied together. + + Example: + + ```py + >>> from collections import OrderedDict + >>> import torch.nn as nn + + >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) + >>> model.linear2.weight = model.linear1.weight + >>> find_tied_parameters(model) + [['linear1.weight', 'linear2.weight']] + ``` + """ + + # get ALL model parameters and their names + all_named_parameters = {name: param for name, param in model.named_parameters(remove_duplicate=False)} + + # get ONLY unique named parameters, + # if parameter is tied and have multiple names, it will be included only once + no_duplicate_named_parameters = {name: param for name, param in model.named_parameters(remove_duplicate=True)} + + # the difference of the two sets will give us the tied parameters + tied_param_names = set(all_named_parameters.keys()) - set(no_duplicate_named_parameters.keys()) + + # 'tied_param_names' contains the names of parameters that are tied in the model, but we do not know + # which names refer to the same parameter. To identify this, we need to group them together. + tied_param_groups = {} + for tied_param_name in tied_param_names: + tied_param = all_named_parameters[tied_param_name] + for param_name, param in no_duplicate_named_parameters.items(): + # compare if parameters are the same, if so, group their names together + if param is tied_param: + if param_name not in tied_param_groups: + tied_param_groups[param_name] = [] + tied_param_groups[param_name].append(tied_param_name) + + return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in tied_param_groups.items()]) + + +def retie_parameters(model, tied_params): + """ + Reties tied parameters in a given model if the link was broken (for instance when adding hooks). + + Args: + model (`torch.nn.Module`): + The model in which to retie parameters. + tied_params (`List[List[str]]`): + A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`. + """ + for tied_group in tied_params: + param_to_tie = None + # two loops : the first one to set param_to_tie , the second one to change the values of tied_group + for param_name in tied_group: + module = model + splits = param_name.split(".") + for split in splits[:-1]: + module = getattr(module, split) + param = getattr(module, splits[-1]) + if param_to_tie is None and param.device != torch.device("meta"): + param_to_tie = param + break + if param_to_tie is not None: + for param_name in tied_group: + module = model + splits = param_name.split(".") + for split in splits[:-1]: + module = getattr(module, split) + setattr(module, splits[-1], param_to_tie) + + +def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype: + """ + Just does torch.dtype(dtype) if necessary. + """ + if isinstance(dtype, str): + # We accept "torch.float16" or just "float16" + dtype = dtype.replace("torch.", "") + dtype = getattr(torch, dtype) + return dtype + + +def compute_module_sizes( + model: nn.Module, + dtype: Optional[Union[str, torch.device]] = None, + special_dtypes: Optional[dict[str, Union[str, torch.device]]] = None, + buffers_only: bool = False, +): + """ + Compute the size of each submodule of a given model. + """ + if dtype is not None: + dtype = _get_proper_dtype(dtype) + dtype_size = dtype_byte_size(dtype) + if special_dtypes is not None: + special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} + special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} + module_sizes = defaultdict(int) + + module_list = [] + + if not buffers_only: + module_list = named_module_tensors(model, recurse=True) + else: + module_list = model.named_buffers(recurse=True) + + for name, tensor in module_list: + if special_dtypes is not None and name in special_dtypes: + size = tensor.numel() * special_dtypes_size[name] + elif dtype is None: + size = tensor.numel() * dtype_byte_size(tensor.dtype) + elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + # According to the code in set_module_tensor_to_device, these types won't be converted + # so use their original size here + size = tensor.numel() * dtype_byte_size(tensor.dtype) + else: + size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) + name_parts = name.split(".") + for idx in range(len(name_parts) + 1): + module_sizes[".".join(name_parts[:idx])] += size + + return module_sizes + + +def compute_module_total_buffer_size( + model: nn.Module, + dtype: Optional[Union[str, torch.device]] = None, + special_dtypes: Optional[dict[str, Union[str, torch.device]]] = None, +): + """ + Compute the total size of buffers in each submodule of a given model. + """ + module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes, buffers_only=True) + return module_sizes.get("", 0) + + +def get_max_layer_size( + modules: list[tuple[str, torch.nn.Module]], module_sizes: dict[str, int], no_split_module_classes: list[str] +): + """ + Utility function that will scan a list of named modules and return the maximum size used by one full layer. The + definition of a layer being: + - a module with no direct children (just parameters and buffers) + - a module whose class name is in the list `no_split_module_classes` + + Args: + modules (`List[Tuple[str, torch.nn.Module]]`): + The list of named modules where we want to determine the maximum layer size. + module_sizes (`Dict[str, int]`): + A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). + no_split_module_classes (`List[str]`): + A list of class names for layers we don't want to be split. + + Returns: + `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size. + """ + max_size = 0 + layer_names = [] + modules_to_treat = modules.copy() + while len(modules_to_treat) > 0: + module_name, module = modules_to_treat.pop(0) + modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else [] + if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: + # No splitting this one so we compare to the max_size + size = module_sizes[module_name] + if size > max_size: + max_size = size + layer_names = [module_name] + elif size == max_size: + layer_names.append(module_name) + else: + modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat + return max_size, layer_names + + +def get_max_memory(max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None): + """ + Get the maximum memory available if nothing is passed, converts string to int otherwise. + """ + import psutil + + if max_memory is None: + max_memory = {} + # Make sure CUDA is initialized on each GPU to have the right memory info. + if is_npu_available(): + for i in range(torch.npu.device_count()): + try: + _ = torch.tensor(0, device=torch.device("npu", i)) + max_memory[i] = torch.npu.mem_get_info(i)[0] + except Exception: + logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") + continue + elif is_mlu_available(): + for i in range(torch.mlu.device_count()): + try: + _ = torch.tensor(0, device=torch.device("mlu", i)) + max_memory[i] = torch.mlu.mem_get_info(i)[0] + except Exception: + logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") + continue + elif is_sdaa_available(): + for i in range(torch.sdaa.device_count()): + try: + _ = torch.tensor(0, device=torch.device("sdaa", i)) + max_memory[i] = torch.sdaa.mem_get_info(i)[0] + except Exception: + logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") + continue + elif is_musa_available(): + for i in range(torch.musa.device_count()): + try: + _ = torch.tensor(0, device=torch.device("musa", i)) + max_memory[i] = torch.musa.mem_get_info(i)[0] + except Exception: + logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") + continue + elif is_xpu_available(): + for i in range(torch.xpu.device_count()): + try: + _ = torch.tensor(0, device=torch.device("xpu", i)) + max_memory[i] = get_xpu_available_memory(i) + except Exception: + logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") + continue + elif is_hpu_available(): + for i in range(torch.hpu.device_count()): + try: + _ = torch.tensor(0, device=torch.device("hpu", i)) + max_memory[i] = torch.hpu.mem_get_info(i)[0] + except Exception: + logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") + continue + else: + for i in range(torch.cuda.device_count()): + try: + _ = torch.tensor([0], device=i) + max_memory[i] = torch.cuda.mem_get_info(i)[0] + except Exception: + logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") + continue + # allocate everything in the mps device as the RAM is shared + if is_mps_available(): + max_memory["mps"] = psutil.virtual_memory().available + else: + max_memory["cpu"] = psutil.virtual_memory().available + return max_memory + + for key in max_memory: + if isinstance(max_memory[key], str): + max_memory[key] = convert_file_size_to_int(max_memory[key]) + + # Need to sort the device by type to make sure that we allocate the gpu first. + # As gpu/npu/xpu are represented by int, we need to sort them first. + gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)] + gpu_devices.sort() + # check if gpu/npu/xpu devices are available and if not, throw a warning + if is_npu_available(): + num_devices = torch.npu.device_count() + elif is_mlu_available(): + num_devices = torch.mlu.device_count() + elif is_sdaa_available(): + num_devices = torch.sdaa.device_count() + elif is_musa_available(): + num_devices = torch.musa.device_count() + elif is_xpu_available(): + num_devices = torch.xpu.device_count() + elif is_hpu_available(): + num_devices = torch.hpu.device_count() + else: + num_devices = torch.cuda.device_count() + for device in gpu_devices: + if device >= num_devices or device < 0: + logger.warning(f"Device {device} is not available, available devices are {list(range(num_devices))}") + # Add the other devices in the preset order if they are available + all_devices = gpu_devices + [k for k in ["mps", "cpu", "disk"] if k in max_memory.keys()] + # Raise an error if a device is not recognized + for k in max_memory.keys(): + if k not in all_devices: + raise ValueError( + f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'" + ) + max_memory = {k: max_memory[k] for k in all_devices} + + return max_memory + + +def clean_device_map(device_map: dict[str, Union[int, str, torch.device]], module_name: str = ""): + """ + Cleans a device_map by grouping all submodules that go on the same device together. + """ + # Get the value of the current module and if there is only one split across several keys, regroup it. + prefix = "" if module_name == "" else f"{module_name}." + values = [v for k, v in device_map.items() if k.startswith(prefix)] + if len(set(values)) == 1 and len(values) > 1: + for k in [k for k in device_map if k.startswith(prefix)]: + del device_map[k] + device_map[module_name] = values[0] + + # Recurse over the children + children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)] + idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1 + children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules) + for child in children_modules: + clean_device_map(device_map, module_name=child) + + return device_map + + +def load_offloaded_weights(model, index, offload_folder): + """ + Loads the weights from the offload folder into the model. + + Args: + model (`torch.nn.Module`): + The model to load the weights into. + index (`dict`): + A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the + model. + offload_folder (`str`): + The folder where the offloaded weights are stored. + """ + if index is None or len(index) == 0: + # Nothing to do + return + for param_name, metadata in index.items(): + if "SCB" in param_name: + continue + fp16_statistics = None + if "weight" in param_name and param_name.replace("weight", "SCB") in index.keys(): + weight_name = param_name.replace("weight", "SCB") + fp16_statistics = load_offloaded_weight( + os.path.join(offload_folder, f"{weight_name}.dat"), index[weight_name] + ) + tensor_file = os.path.join(offload_folder, f"{param_name}.dat") + weight = load_offloaded_weight(tensor_file, metadata) + set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics) + + +def get_module_leaves(module_sizes): + module_children = {} + for module in module_sizes: + if module == "" or "." not in module: + continue + parent = module.rsplit(".", 1)[0] + module_children[parent] = module_children.get(parent, 0) + 1 + leaves = [module for module in module_sizes if module_children.get(module, 0) == 0 and module != ""] + return leaves + + +def get_balanced_memory( + model: nn.Module, + max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[list[str]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + special_dtypes: Optional[dict[str, Union[str, torch.device]]] = None, + low_zero: bool = False, +): + """ + Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU. + + + + All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the + meta device (as it would if initialized within the `init_empty_weights` context manager). + + + + Args: + model (`torch.nn.Module`): + The model to analyze. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + Example: `max_memory={0: "1GB"}`. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): + If provided, special dtypes to consider for some specific weights (will override dtype used as default for + all weights). + low_zero (`bool`, *optional*): + Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the + Transformers generate function). + """ + # Get default / clean up max_memory + user_not_set_max_memory = max_memory is None + max_memory = get_max_memory(max_memory) + + if is_npu_available(): + expected_device_type = "npu" + elif is_mlu_available(): + expected_device_type = "mlu" + elif is_sdaa_available(): + expected_device_type = "sdaa" + elif is_musa_available(): + expected_device_type = "musa" + elif is_xpu_available(): + expected_device_type = "xpu" + elif is_hpu_available(): + expected_device_type = "hpu" + elif is_mps_available(): + expected_device_type = "mps" + else: + expected_device_type = "cuda" + num_devices = len([d for d in max_memory if torch.device(d).type == expected_device_type and max_memory[d] > 0]) + + if num_devices == 0: + return max_memory + + if num_devices == 1: + # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer + low_zero = False + # If user just asked us to handle memory usage, we should avoid OOM + if user_not_set_max_memory: + for key in max_memory.keys(): + if isinstance(key, int): + max_memory[key] *= 0.9 # 90% is a good compromise + logger.info( + f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. " + "You can set `max_memory` in to a higher value to use more memory (at your own risk)." + ) + break # only one device + + module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) + per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices) + + # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get + # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to + # add which is the biggest of: + # - the size of no split block (if applicable) + # - the mean of the layer sizes + if no_split_module_classes is None: + no_split_module_classes = [] + elif not isinstance(no_split_module_classes, (list, tuple)): + no_split_module_classes = [no_split_module_classes] + + # Identify the size of the no_split_block modules + if len(no_split_module_classes) > 0: + no_split_children = {} + for name, size in module_sizes.items(): + if name == "": + continue + submodule = model + for submodule_name in name.split("."): + submodule = getattr(submodule, submodule_name) + class_name = submodule.__class__.__name__ + if class_name in no_split_module_classes and class_name not in no_split_children: + no_split_children[class_name] = size + + if set(no_split_children.keys()) == set(no_split_module_classes): + break + buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0 + else: + buffer = 0 + + # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters + leaves = get_module_leaves(module_sizes) + module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves} + # Once removed, leaves are the final modules. + leaves = get_module_leaves(module_sizes) + mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1)) + buffer = int(1.25 * max(buffer, mean_leaves)) + per_gpu += buffer + + # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them) + gpus_idx_list = list( + sorted( + device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0 + ) + ) + # The last device is left with max_memory just in case the buffer is not enough. + for idx in gpus_idx_list[:-1]: + max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx]) + + if low_zero: + min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)])) + max_memory[0] = min(min_zero, max_memory[0]) + + return max_memory + + +def calculate_maximum_sizes(model: torch.nn.Module): + "Computes the total size of the model and its largest layer" + sizes = compute_module_sizes(model) + # `transformers` models store this information for us + no_split_modules = getattr(model, "_no_split_modules", None) + if no_split_modules is None: + no_split_modules = [] + + modules_to_treat = ( + list(model.named_parameters(recurse=False)) + + list(model.named_children()) + + list(model.named_buffers(recurse=False)) + ) + largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules) + total_size = sizes[""] + return total_size, largest_layer + + +def _init_infer_auto_device_map( + model: nn.Module, + max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[list[str]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + special_dtypes: Optional[dict[str, Union[str, torch.device]]] = None, +) -> tuple[ + list[Union[int, str]], + dict[Union[int, str], Union[int, str]], + list[Union[int, str]], + list[int], + dict[str, int], + list[list[str]], + list[str], + list[tuple[str, nn.Module]], +]: + """ + Initialize variables required for computing the device map for model allocation. + """ + max_memory = get_max_memory(max_memory) + if no_split_module_classes is None: + no_split_module_classes = [] + elif not isinstance(no_split_module_classes, (list, tuple)): + no_split_module_classes = [no_split_module_classes] + + devices = list(max_memory.keys()) + if "disk" not in devices: + devices.append("disk") + gpus = [device for device in devices if device not in ["cpu", "disk"]] + + # Devices that need to keep space for a potential offloaded layer. + if "mps" in gpus: + main_devices = ["mps"] + elif len(gpus) > 0: + main_devices = [gpus[0], "cpu"] + else: + main_devices = ["cpu"] + + module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) + tied_parameters = find_tied_parameters(model) + if check_tied_parameters_in_config(model) and len(tied_parameters) == 0: + logger.warning( + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." + ) + + # Direct submodules and parameters + modules_to_treat = ( + list(model.named_parameters(recurse=False)) + + list(model.named_children()) + + list(model.named_buffers(recurse=False)) + ) + + return ( + devices, + max_memory, + main_devices, + gpus, + module_sizes, + tied_parameters, + no_split_module_classes, + modules_to_treat, + ) + + +def get_module_size_with_ties( + tied_params, + module_size, + module_sizes, + modules_to_treat, +) -> tuple[int, list[str], list[nn.Module]]: + """ + Calculate the total size of a module, including its tied parameters. + + Args: + tied_params (`List[str]`): The list of tied parameters. + module_size (`int`): The size of the module without tied parameters. + module_sizes (`Dict[str, int]`): A dictionary mapping each layer name to its size. + modules_to_treat (`List[Tuple[str, nn.Module]]`): The list of named modules to treat. + + Returns: + `Tuple[int, List[str], List[nn.Module]]`: The total size of the module, the names of the tied modules, and the + tied modules. + """ + if len(tied_params) < 1: + return module_size, [], [] + tied_module_names = [] + tied_modules = [] + + for tied_param in tied_params: + tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if tied_param.startswith(n + ".")][0] + tied_module_names.append(modules_to_treat[tied_module_index][0]) + tied_modules.append(modules_to_treat[tied_module_index][1]) + + module_size_with_ties = module_size + for tied_param, tied_module_name in zip(tied_params, tied_module_names): + module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param] + + return module_size_with_ties, tied_module_names, tied_modules + + +def fallback_allocate( + modules: list[tuple[str, nn.Module]], + module_sizes: dict[str, int], + size_limit: Union[int, str], + no_split_module_classes: Optional[list[str]] = None, + tied_parameters: Optional[list[list[str]]] = None, +) -> tuple[Optional[str], Optional[nn.Module], list[tuple[str, nn.Module]]]: + """ + Find a module that fits in the size limit using BFS and return it with its name and the remaining modules. + + Args: + modules (`List[Tuple[str, nn.Module]]`): + The list of named modules to search in. + module_sizes (`Dict[str, int]`): + A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). + size_limit (`Union[int, str]`): + The maximum size a module can have. + no_split_module_classes (`Optional[List[str]]`, *optional*): + A list of class names for layers we don't want to be split. + tied_parameters (`Optional[List[List[str]]`, *optional*): + A list of lists of parameter names being all tied together. + + Returns: + `Tuple[Optional[str], Optional[nn.Module], List[Tuple[str, nn.Module]]]`: A tuple containing: + - The name of the module that fits within the size limit. + - The module itself. + - The list of remaining modules after the found module is removed. + """ + try: + size_limit = convert_file_size_to_int(size_limit) + except ValueError: + return None, None, modules + + if no_split_module_classes is None: + no_split_module_classes = [] + + if tied_parameters is None: + tied_parameters = [] + + modules_to_search = modules.copy() + module_found = False + + while modules_to_search: + name, module = modules_to_search.pop(0) + + tied_param_groups = [ + tied_group + for tied_group in tied_parameters + if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group) + ] + + tied_params = sum( + [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_groups], [] + ) + + module_size_with_ties, _, _ = get_module_size_with_ties( + tied_params, module_sizes[name], module_sizes, modules_to_search + ) + + # If the module fits in the size limit, we found it. + if module_size_with_ties <= size_limit: + module_found = True + break + + # The module is too big, we need to split it if possible. + modules_children = ( + [] + if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor) + else list(module.named_children()) + ) + + # Split fails, move to the next module + if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: + continue + + # split is possible, add the children to the list of modules to search + modules_children = list(module.named_parameters(recurse=False)) + modules_children + modules_to_search = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_search + + if not module_found: + return None, None, modules + + # Prepare the module list for removal of the found module + current_names = [n for n, _ in modules] + dot_idx = [i for i, c in enumerate(name) if c == "."] + + for dot_index in dot_idx: + parent_name = name[:dot_index] + if parent_name in current_names: + parent_module_idx = current_names.index(parent_name) + _, parent_module = modules[parent_module_idx] + module_children = list(parent_module.named_parameters(recurse=False)) + list( + parent_module.named_children() + ) + modules = ( + modules[:parent_module_idx] + + [(f"{parent_name}.{n}", v) for n, v in module_children] + + modules[parent_module_idx + 1 :] + ) + current_names = [n for n, _ in modules] + + # Now the target module should be directly in the list + target_idx = current_names.index(name) + name, module = modules.pop(target_idx) + + return name, module, modules + + +def infer_auto_device_map( + model: nn.Module, + max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[list[str]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + special_dtypes: Optional[dict[str, Union[str, torch.dtype]]] = None, + verbose: bool = False, + clean_result: bool = True, + offload_buffers: bool = False, + fallback_allocation: bool = False, +): + """ + Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk, + such that: + - we don't exceed the memory available of any of the GPU. + - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that + has the largest size. + - if offload to the CPU is needed,we don't exceed the RAM available on the CPU. + - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk + that has the largest size. + + + + All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the + meta device (as it would if initialized within the `init_empty_weights` context manager). + + + + Args: + model (`torch.nn.Module`): + The model to analyze. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + Example: `max_memory={0: "1GB"}`. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): + If provided, special dtypes to consider for some specific weights (will override dtype used as default for + all weights). + verbose (`bool`, *optional*, defaults to `False`): + Whether or not to provide debugging statements as the function builds the device_map. + clean_result (`bool`, *optional*, defaults to `True`): + Clean the resulting device_map by grouping all submodules that go on the same device together. + offload_buffers (`bool`, *optional*, defaults to `False`): + In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as + well as the parameters. + fallback_allocation (`bool`, *optional*, defaults to `False`): + When regular allocation fails, try to allocate a module that fits in the size limit using BFS. + """ + + # Initialize the variables + ( + devices, + max_memory, + main_devices, + gpus, + module_sizes, + tied_parameters, + no_split_module_classes, + modules_to_treat, + ) = _init_infer_auto_device_map(model, max_memory, no_split_module_classes, dtype, special_dtypes) + + device_map = OrderedDict() + current_device = 0 + device_memory_used = {device: 0 for device in devices} + device_buffer_sizes = {} + device_minimum_assignment_memory = {} + + # Initialize maximum largest layer, to know which space to keep in memory + max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes) + + # Ready ? This is going to be a bit messy. + while len(modules_to_treat) > 0: + name, module = modules_to_treat.pop(0) + if verbose: + print(f"\nTreating module {name}.") + # Max size in the remaining layers may have changed since we took one, so we maybe update it. + max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")] + if len(max_layer_names) == 0: + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + # Assess size needed + module_size = module_sizes[name] + + # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module + # and the other is not. + # Note: If we are currently processing the name `compute.weight`, an other parameter named + # e.g. `compute.weight_submodule.parameter` + # needs to be considered outside the current module, hence the check with additional dots. + tied_param_groups = [ + tied_group + for tied_group in tied_parameters + if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group) + ] + + if verbose and len(tied_param_groups) > 0: + print(f" Found the relevant tied param groups {tied_param_groups}") + + # Then we keep track of all the parameters that are tied to the current module, but not in the current module + tied_params = sum( + [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_groups], [] + ) + + if verbose and len(tied_params) > 0: + print(f" So those parameters need to be taken into account {tied_params}") + + device = devices[current_device] + current_max_size = max_memory[device] if device != "disk" else None + current_memory_reserved = 0 + # Reduce max size available by the largest layer. + if devices[current_device] in main_devices: + current_max_size = current_max_size - max_layer_size + current_memory_reserved = max_layer_size + + module_size_with_ties, tied_module_names, tied_modules = get_module_size_with_ties( + tied_params, module_size, module_sizes, modules_to_treat + ) + + # The module and its tied modules fit on the current device. + if current_max_size is None or device_memory_used[device] + module_size_with_ties <= current_max_size: + if verbose: + output = f"Putting {name}" + + if tied_module_names: + output += f" and {tied_module_names}" + else: + output += f" (size={module_size})" + + if current_max_size is not None: + output += f" (available={current_max_size - device_memory_used[device]})" + + output += f" on {device}." + print(output) + + device_memory_used[device] += module_size_with_ties + + # Assign the primary module to the device. + device_map[name] = device + + # Assign tied modules if any. + for tied_module_name in tied_module_names: + if tied_module_name in [m[0] for m in modules_to_treat]: + # Find the index of the tied module in the list + tied_module_index = next(i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name) + # Remove the tied module from the list to prevent reprocessing + modules_to_treat.pop(tied_module_index) + + # Assign the tied module to the device + device_map[tied_module_name] = device + + # Buffer Handling + if not offload_buffers and isinstance(module, nn.Module): + # Compute the total buffer size for the module + current_buffer_size = compute_module_total_buffer_size( + module, dtype=dtype, special_dtypes=special_dtypes + ) + # Update the buffer size on the device + device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size + + continue + + # The current module itself fits, so we try to split the tied modules. + if len(tied_params) > 0 and device_memory_used[device] + module_size <= current_max_size: + # can we split one of the tied modules to make it smaller or do we need to go on the next device? + if verbose: + print( + f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space " + f"available {current_max_size - device_memory_used[device]}, needed size {module_size_with_ties})." + ) + split_happened = False + for tied_module_name, tied_module in zip(tied_module_names, tied_modules): + tied_module_children = list(tied_module.named_children()) + if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes: + # can't break this one. + continue + + if verbose: + print(f"Splitting {tied_module_name}.") + tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children + tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children] + tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0] + + modules_to_treat = ( + [(name, module)] + + modules_to_treat[:tied_module_index] + + tied_module_children + + modules_to_treat[tied_module_index + 1 :] + ) + # Update the max layer size. + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + split_happened = True + break + + if split_happened: + continue + + # If the tied module is not split, we go to the next device + if verbose: + print("None of the tied module can be split, going to the next device.") + + # The current module itself doesn't fit, so we have to split it or go to the next device. + if device_memory_used[device] + module_size >= current_max_size: + # Split or not split? + modules_children = ( + [] + if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor) + else list(module.named_children()) + ) + if verbose: + print( + f"Not enough space on {devices[current_device]} to put {name} (space available " + f"{current_max_size - device_memory_used[device]}, module size {module_size})." + ) + if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: + # -> no split, we go to the next device + if verbose: + print("This module cannot be split, going to the next device.") + + else: + # -> split, we replace the module studied by its children + parameters + if verbose: + print(f"Splitting {name}.") + modules_children = list(module.named_parameters(recurse=False)) + modules_children + modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat + # Update the max layer size. + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + continue + + # If no module is assigned to the current device, we attempt to allocate a fallback module + # if fallback_allocation is enabled. + if device_memory_used[device] == 0 and fallback_allocation and device != "disk": + # We try to allocate a module that fits in the size limit using BFS. + # Recompute the current max size as we need to consider the current module as well. + current_max_size = max_memory[device] - max(max_layer_size, module_size_with_ties) + + fallback_module_name, fallback_module, remaining_modules = fallback_allocate( + modules_to_treat, + module_sizes, + current_max_size - device_memory_used[device], + no_split_module_classes, + tied_parameters, + ) + # use the next iteration to put the fallback module on the next device to avoid code duplication + if fallback_module is not None: + modules_to_treat = [(fallback_module_name, fallback_module)] + [(name, module)] + remaining_modules + continue + + if device_memory_used[device] == 0: + device_minimum_assignment_memory[device] = module_size_with_ties + current_memory_reserved + + # Neither the current module nor any tied modules can be split, so we move to the next device. + device_memory_used[device] = device_memory_used[device] + current_memory_reserved + current_device += 1 + modules_to_treat = [(name, module)] + modules_to_treat + + device_memory_used = {device: mem for device, mem in device_memory_used.items() if mem > 0} + + if clean_result: + device_map = clean_device_map(device_map) + + non_gpu_buffer_size = device_buffer_sizes.get("cpu", 0) + device_buffer_sizes.get("disk", 0) + if non_gpu_buffer_size > 0 and not offload_buffers: + is_buffer_fit_any_gpu = False + for gpu_device, gpu_max_memory in max_memory.items(): + if gpu_device == "cpu" or gpu_device == "disk": + continue + + if not is_buffer_fit_any_gpu: + gpu_memory_used = device_memory_used.get(gpu_device, 0) + + if gpu_max_memory >= non_gpu_buffer_size + gpu_memory_used: + is_buffer_fit_any_gpu = True + + if len(gpus) > 0 and not is_buffer_fit_any_gpu: + warnings.warn( + f"Current model requires {non_gpu_buffer_size} bytes of buffer for offloaded layers, which seems does " + f"not fit any GPU's remaining memory. If you are experiencing a OOM later, please consider using " + f"offload_buffers=True." + ) + + if device_minimum_assignment_memory: + devices_info = "\n".join( + f" - {device}: {mem} bytes required" for device, mem in device_minimum_assignment_memory.items() + ) + logger.info( + f"Based on the current allocation process, no modules could be assigned to the following devices due to " + f"insufficient memory:\n" + f"{devices_info}\n" + f"These minimum requirements are specific to this allocation attempt and may vary. Consider increasing " + f"the available memory for these devices to at least the specified minimum, or adjusting the model config." + ) + return device_map + + +def check_device_map(model: nn.Module, device_map: dict[str, Union[int, str, torch.device]]): + """ + Checks a device map covers everything in a given model. + + Args: + model (`torch.nn.Module`): The model to check the device map against. + device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check. + """ + all_module_names = dict(model.named_modules()) + invalid_keys = [k for k in device_map if k != "" and k not in all_module_names] + + if invalid_keys: + warnings.warn( + f"The following device_map keys do not match any submodules in the model: {invalid_keys}", UserWarning + ) + + all_model_tensors = [name for name, _ in model.state_dict().items()] + for module_name in device_map.keys(): + if module_name == "": + all_model_tensors.clear() + break + else: + all_model_tensors = [ + name + for name in all_model_tensors + if not name == module_name and not name.startswith(module_name + ".") + ] + if len(all_model_tensors) > 0: + non_covered_params = ", ".join(all_model_tensors) + raise ValueError( + f"The device_map provided does not give any device for the following parameters: {non_covered_params}" + ) + + +def load_state_dict(checkpoint_file, device_map=None): + """ + Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the + weights can be fast-loaded directly on the GPU. + + Args: + checkpoint_file (`str`): The path to the checkpoint to load. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + """ + if checkpoint_file.endswith(".safetensors"): + with safe_open(checkpoint_file, framework="pt") as f: + metadata = f.metadata() + weight_names = f.keys() + + if metadata is None: + logger.warning( + f"The safetensors archive passed at {checkpoint_file} does not contain metadata. " + "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata." + ) + metadata = {"format": "pt"} + + if metadata.get("format") not in ["pt", "tf", "flax"]: + raise OSError( + f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " + "you save your model with the `save_pretrained` method." + ) + elif metadata["format"] != "pt": + raise ValueError(f"The checkpoint passed was saved with {metadata['format']}, we need a the pt format.") + if device_map is None: + return safe_load_file(checkpoint_file) + else: + # if we only have one device we can load everything directly + if len(set(device_map.values())) == 1: + device = list(device_map.values())[0] + target_device = device + if isinstance(device, int): + if is_npu_available(): + target_device = f"npu:{device}" + elif is_hpu_available(): + target_device = "hpu" + + return safe_load_file(checkpoint_file, device=target_device) + + devices = list(set(device_map.values()) - {"disk"}) + # cpu device should always exist as fallback option + if "cpu" not in devices: + devices.append("cpu") + + # For each device, get the weights that go there + device_weights = {device: [] for device in devices} + for module_name, device in device_map.items(): + if device in devices: + device_weights[device].extend( + [k for k in weight_names if k == module_name or k.startswith(module_name + ".")] + ) + + # all weights that haven't defined a device should be loaded on CPU + device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])]) + tensors = {} + if is_tqdm_available(): + progress_bar = tqdm( + main_process_only=False, + total=sum([len(device_weights[device]) for device in devices]), + unit="w", + smoothing=0, + leave=False, + ) + else: + progress_bar = None + for device in devices: + target_device = device + if isinstance(device, int): + if is_npu_available(): + target_device = f"npu:{device}" + elif is_hpu_available(): + target_device = "hpu" + + with safe_open(checkpoint_file, framework="pt", device=target_device) as f: + for key in device_weights[device]: + if progress_bar is not None: + progress_bar.set_postfix(dev=device, refresh=False) + progress_bar.set_description(key) + tensors[key] = f.get_tensor(key) + if progress_bar is not None: + progress_bar.update() + if progress_bar is not None: + progress_bar.close() + + return tensors + else: + return torch.load(checkpoint_file, map_location=torch.device("cpu"), weights_only=True) + + +def get_state_dict_offloaded_model(model: nn.Module): + """ + Returns the state dictionary for an offloaded model via iterative onloading + + Args: + model (`torch.nn.Module`): + The offloaded model we want to save + """ + + state_dict = {} + placeholders = set() + for name, module in model.named_modules(): + if name == "": + continue + + try: + with align_module_device(module, "cpu"): + module_state_dict = module.state_dict() + except MemoryError: + raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None + + for key in module_state_dict: + # ignore placeholder parameters that are still on the meta device + if module_state_dict[key].device == torch.device("meta"): + placeholders.add(name + f".{key}") + continue + params = module_state_dict[key] + state_dict[name + f".{key}"] = params.to("cpu") # move buffers to cpu + for key in placeholders.copy(): + if key in state_dict: + placeholders.remove(key) + if placeholders: + logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}") + + return state_dict + + +def get_state_dict_from_offload( + module: nn.Module, + module_name: str, + state_dict: dict[str, Union[str, torch.tensor]], + device_to_put_offload: Union[int, str, torch.device] = "cpu", +): + """ + Retrieve the state dictionary (with parameters) from an offloaded module and load into a specified device (defaults + to cpu). + + Args: + module: (`torch.nn.Module`): + The module we want to retrieve a state dictionary from + module_name: (`str`): + The name of the module of interest + state_dict (`Dict[str, Union[int, str, torch.device]]`): + Dictionary of {module names: parameters} + device_to_put_offload (`Union[int, str, torch.device]`): + Device to load offloaded parameters into, defaults to the cpu. + """ + + root = module_name[: module_name.rfind(".")] # module name without .weight or .bias + + # do not move parameters if the module is not offloaded + if not has_offloaded_params(module): + device_to_put_offload = None + + # assign the device to which the offloaded parameters will be sent + with align_module_device(module, device_to_put_offload): + for m_key, params in module.state_dict().items(): + if (root + f".{m_key}") in state_dict: + state_dict[root + f".{m_key}"] = params + + return state_dict + + +def load_checkpoint_in_model( + model: nn.Module, + checkpoint: Union[str, os.PathLike], + device_map: Optional[dict[str, Union[int, str, torch.device]]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + offload_state_dict: bool = False, + offload_buffers: bool = False, + keep_in_fp32_modules: list[str] = None, + offload_8bit_bnb: bool = False, + strict: bool = False, + full_state_dict: bool = True, + broadcast_from_rank0: bool = False, +): + """ + Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are + loaded. + + + + Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To + group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. + + + + Args: + model (`torch.nn.Module`): + The model in which we want to load a checkpoint. + checkpoint (`str` or `os.PathLike`): + The folder checkpoint to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + offload_state_dict (`bool`, *optional*, defaults to `False`): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the buffers in the weights offloaded to disk. + keep_in_fp32_modules(`List[str]`, *optional*): + A list of the modules that we keep in `torch.float32` dtype. + offload_8bit_bnb (`bool`, *optional*): + Whether or not to enable offload of 8-bit modules on cpu/disk. + strict (`bool`, *optional*, defaults to `False`): + Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's + state_dict. + full_state_dict (`bool`, *optional*, defaults to `True`): if this is set to `True`, all the tensors in the + loaded state_dict will be gathered. No ShardedTensor and DTensor will be in the loaded state_dict. + broadcast_from_rank0 (`False`, *optional*, defaults to `False`): when the option is `True`, a distributed + `ProcessGroup` must be initialized. rank0 should receive a full state_dict and will broadcast the tensors + in the state_dict one by one to other ranks. Other ranks will receive the tensors and shard (if applicable) + according to the local shards in the model. + + """ + if offload_8bit_bnb: + from .bnb import quantize_and_offload_8bit + + tied_params = find_tied_parameters(model) + + if check_tied_parameters_in_config(model) and len(tied_params) == 0: + logger.warning( + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." + ) + if device_map is not None: + check_tied_parameters_on_same_device(tied_params, device_map) + + if offload_folder is None and device_map is not None and "disk" in device_map.values(): + raise ValueError( + "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`." + ) + elif offload_folder is not None and device_map is not None and "disk" in device_map.values(): + os.makedirs(offload_folder, exist_ok=True) + + if isinstance(dtype, str): + # We accept "torch.float16" or just "float16" + dtype = dtype.replace("torch.", "") + dtype = getattr(torch, dtype) + + checkpoint_files = None + index_filename = None + if os.path.isfile(checkpoint): + if str(checkpoint).endswith(".json"): + index_filename = checkpoint + else: + checkpoint_files = [checkpoint] + elif os.path.isdir(checkpoint): + # check if the whole state dict is present + potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME] + potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME] + if len(potential_state_bin) == 1: + checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])] + elif len(potential_state_safetensor) == 1: + checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])] + else: + # otherwise check for sharded checkpoints + potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")] + if len(potential_index) == 0: + raise ValueError( + f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file" + ) + elif len(potential_index) == 1: + index_filename = os.path.join(checkpoint, potential_index[0]) + else: + raise ValueError( + f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones." + ) + else: + raise ValueError( + "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded " + f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}." + ) + + if index_filename is not None: + checkpoint_folder = os.path.split(index_filename)[0] + with open(index_filename) as f: + index = json.loads(f.read()) + + if "weight_map" in index: + index = index["weight_map"] + checkpoint_files = sorted(list(set(index.values()))) + checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files] + + # Logic for missing/unexepected keys goes here. + + offload_index = {} + if offload_state_dict: + state_dict_folder = tempfile.mkdtemp() + state_dict_index = {} + + unexpected_keys = set() + model_keys = set(model.state_dict().keys()) + buffer_names = [name for name, _ in model.named_buffers()] + model_devices = {t.device for t in model.state_dict().values() if isinstance(t, torch.Tensor)} + model_physical_devices = model_devices - {torch.device("meta")} + for checkpoint_file in checkpoint_files: + if device_map is None: + # exception for multi-device loading was made for the meta device in torch v2.7.0 + # https://github.com/pytorch/pytorch/blob/v2.6.0/torch/distributed/checkpoint/state_dict.py#L557-L563 + # https://github.com/pytorch/pytorch/blob/v2.7.0-rc2/torch/distributed/checkpoint/state_dict.py#L575-L587 + if is_torch_version(">=", "2.2.0") and ( + (is_torch_version(">=", "2.7.0") and len(model_physical_devices) <= 1) or len(model_devices) <= 1 + ): + from torch.distributed.checkpoint.state_dict import StateDictOptions, set_model_state_dict + + broadcast_from_rank0 &= is_torch_version(">=", "2.4.0") + loaded_checkpoint = ( + load_state_dict(checkpoint_file, device_map=device_map) + if not broadcast_from_rank0 or dist.get_rank() == 0 + else {} + ) + set_model_state_dict( + model, + loaded_checkpoint, + options=StateDictOptions( + full_state_dict=full_state_dict, + strict=strict, + **({"broadcast_from_rank0": broadcast_from_rank0} if is_torch_version(">=", "2.4.0") else {}), + ), + ) + else: + loaded_checkpoint = load_state_dict(checkpoint_file, device_map=device_map) + model.load_state_dict(loaded_checkpoint, strict=strict) + + unexpected_keys.update(set(loaded_checkpoint.keys()) - model_keys) + else: + loaded_checkpoint = load_state_dict(checkpoint_file, device_map=device_map) + + for param_name, param in loaded_checkpoint.items(): + # skip SCB parameter (for 8-bit serialization) + if "SCB" in param_name: + continue + + if param_name not in model_keys: + unexpected_keys.add(param_name) + if not strict: + continue # Skip loading this parameter. + + module_name = param_name + + while len(module_name) > 0 and module_name not in device_map: + module_name = ".".join(module_name.split(".")[:-1]) + if module_name == "" and "" not in device_map: + # TODO: group all errors and raise at the end. + raise ValueError(f"{param_name} doesn't have any device set.") + param_device = device_map[module_name] + new_dtype = dtype + if dtype is not None and torch.is_floating_point(param): + if keep_in_fp32_modules is not None and dtype == torch.float16: + proceed = False + for key in keep_in_fp32_modules: + if ((key in param_name) and (key + "." in param_name)) or key == param_name: + proceed = True + break + if proceed: + new_dtype = torch.float32 + + if "weight" in param_name and param_name.replace("weight", "SCB") in loaded_checkpoint.keys(): + if param.dtype == torch.int8: + fp16_statistics = loaded_checkpoint[param_name.replace("weight", "SCB")] + else: + fp16_statistics = None + + if param_device == "disk": + if offload_buffers or param_name not in buffer_names: + if new_dtype is None: + new_dtype = param.dtype + if offload_8bit_bnb: + quantize_and_offload_8bit( + model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics + ) + continue + else: + set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) + offload_weight(param, param_name, offload_folder, index=offload_index) + elif param_device == "cpu" and offload_state_dict: + if new_dtype is None: + new_dtype = param.dtype + if offload_8bit_bnb: + quantize_and_offload_8bit( + model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics + ) + else: + set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) + offload_weight(param, param_name, state_dict_folder, index=state_dict_index) + else: + set_module_tensor_to_device( + model, + param_name, + param_device, + value=param, + dtype=new_dtype, + fp16_statistics=fp16_statistics, + ) + + # Force Python to clean up. + del loaded_checkpoint + gc.collect() + + if not strict and len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint at {checkpoint} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}. This may or may not be an issue - make sure that the checkpoint does not have unnecessary parameters, or that the model definition correctly corresponds to the checkpoint." + ) + + save_offload_index(offload_index, offload_folder) + + # Load back offloaded state dict on CPU + if offload_state_dict: + load_offloaded_weights(model, state_dict_index, state_dict_folder) + shutil.rmtree(state_dict_folder) + + retie_parameters(model, tied_params) + + +def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None): + """ + Return a context manager for autocasting mixed precision + + Args: + native_amp (`bool`, *optional*, defaults to False): + Whether mixed precision is actually enabled. + cache_enabled (`bool`, *optional*, defaults to True): + Whether the weight cache inside autocast should be enabled. + """ + state = AcceleratorState() + if autocast_kwargs is None: + autocast_kwargs = {} + else: + autocast_kwargs = autocast_kwargs.to_kwargs() + if native_amp: + device_type = ( + "cuda" + if (state.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_gpu=True)) + else state.device.type + ) + if state.mixed_precision == "fp16": + return torch.autocast(device_type=device_type, dtype=torch.float16, **autocast_kwargs) + elif state.mixed_precision in ["bf16", "fp8"] and state.distributed_type in [ + DistributedType.NO, + DistributedType.MULTI_CPU, + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_SDAA, + DistributedType.MULTI_MUSA, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_HPU, + DistributedType.FSDP, + DistributedType.XLA, + ]: + return torch.autocast(device_type=device_type, dtype=torch.bfloat16, **autocast_kwargs) + else: + return torch.autocast(device_type=device_type, **autocast_kwargs) + else: + return contextlib.nullcontext() + + +def get_grad_scaler(distributed_type: DistributedType = None, **kwargs): + """ + A generic helper which will initialize the correct `GradScaler` implementation based on the environment and return + it. + + Args: + distributed_type (`DistributedType`, *optional*, defaults to None): + The type of distributed environment. + kwargs: + Additional arguments for the utilized `GradScaler` constructor. + """ + if distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler + + return ShardedGradScaler(**kwargs) + if is_torch_xla_available(check_is_gpu=True): + import torch_xla.amp as xamp + + return xamp.GradScaler(**kwargs) + elif is_mlu_available(): + return torch.mlu.amp.GradScaler(**kwargs) + elif is_sdaa_available(): + return torch.sdaa.amp.GradScaler(**kwargs) + elif is_musa_available(): + return torch.musa.amp.GradScaler(**kwargs) + elif is_npu_available(): + return torch.npu.amp.GradScaler(**kwargs) + elif is_hpu_available(): + return torch.amp.GradScaler("hpu", **kwargs) + elif is_xpu_available(): + return torch.amp.GradScaler("xpu", **kwargs) + else: + if is_torch_version(">=", "2.3"): + return torch.amp.GradScaler("cuda", **kwargs) + else: + return torch.cuda.amp.GradScaler(**kwargs) + + +def has_offloaded_params(module: torch.nn.Module) -> bool: + """ + Checks if a module has offloaded parameters by checking if the given module has a AlignDevicesHook attached with + offloading enabled + + Args: + module (`torch.nn.Module`): The module to check for an offload hook. + + Returns: + bool: `True` if the module has an offload hook and offloading is enabled, `False` otherwise. + """ + from ..hooks import AlignDevicesHook # avoid circular import + + return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload + + +@contextlib.contextmanager +def align_module_device(module: torch.nn.Module, execution_device: Optional[torch.device] = None): + """ + Context manager that moves a module's parameters to the specified execution device. + + Args: + module (`torch.nn.Module`): + Module with parameters to align. + execution_device (`torch.device`, *optional*): + If provided, overrides the module's execution device within the context. Otherwise, use hook execution + device or pass + """ + if has_offloaded_params(module): + if execution_device is not None: + original_device = module._hf_hook.execution_device + module._hf_hook.execution_device = execution_device + + try: + module._hf_hook.pre_forward(module) + yield + finally: + module._hf_hook.post_forward(module, None) + if execution_device is not None: + module._hf_hook.execution_device = original_device + + elif execution_device is not None: + devices = {name: param.device for name, param in module.named_parameters(recurse=False)} + try: + for name in devices: + set_module_tensor_to_device(module, name, execution_device) + yield + finally: + for name, device in devices.items(): + set_module_tensor_to_device(module, name, device) + + else: + yield diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/offload.py b/venv/lib/python3.10/site-packages/accelerate/utils/offload.py new file mode 100644 index 0000000000000000000000000000000000000000..d8bff7dc6ad41ffc7f14555261d115d51b76ccec --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/offload.py @@ -0,0 +1,213 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from collections.abc import Mapping +from typing import Optional, Union + +import numpy as np +import torch +from safetensors import safe_open + + +def offload_weight(weight, weight_name, offload_folder, index=None): + dtype = None + # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16. + if str(weight.dtype) == "torch.bfloat16": + # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s. + weight = weight.view(torch.int16) + dtype = "bfloat16" + array = weight.cpu().numpy() + tensor_file = os.path.join(offload_folder, f"{weight_name}.dat") + if index is not None: + if dtype is None: + dtype = str(array.dtype) + index[weight_name] = {"dtype": dtype, "shape": list(array.shape)} + if array.ndim == 0: + array = array[None] + file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape) + file_array[:] = array[:] + file_array.flush() + return index + + +def load_offloaded_weight(weight_file, weight_info): + shape = tuple(weight_info["shape"]) + if shape == (): + # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor + shape = (1,) + + dtype = weight_info["dtype"] + if dtype == "bfloat16": + # NumPy does not support bfloat16 so this was saved as a int16 + dtype = "int16" + + weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r") + + if len(weight_info["shape"]) == 0: + weight = weight[0] + weight = torch.tensor(weight) + if weight_info["dtype"] == "bfloat16": + weight = weight.view(torch.bfloat16) + + return weight + + +def save_offload_index(index, offload_folder): + if index is None or len(index) == 0: + # Nothing to save + return + + offload_index_file = os.path.join(offload_folder, "index.json") + if os.path.isfile(offload_index_file): + with open(offload_index_file, encoding="utf-8") as f: + current_index = json.load(f) + else: + current_index = {} + current_index.update(index) + + with open(offload_index_file, "w", encoding="utf-8") as f: + json.dump(current_index, f, indent=2) + + +def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: dict[str, torch.Tensor]): + """ + Offload a state dict in a given folder. + + Args: + save_dir (`str` or `os.PathLike`): + The directory in which to offload the state dict. + state_dict (`Dict[str, torch.Tensor]`): + The dictionary of tensors to offload. + """ + os.makedirs(save_dir, exist_ok=True) + index = {} + for name, parameter in state_dict.items(): + index = offload_weight(parameter, name, save_dir, index=index) + + # Update index + save_offload_index(index, save_dir) + + +class PrefixedDataset(Mapping): + """ + Will access keys in a given dataset by adding a prefix. + + Args: + dataset (`Mapping`): Any map with string keys. + prefix (`str`): A prefix to add when trying to access any element in the underlying dataset. + """ + + def __init__(self, dataset: Mapping, prefix: str): + self.dataset = dataset + self.prefix = prefix + + def __getitem__(self, key): + return self.dataset[f"{self.prefix}{key}"] + + def __iter__(self): + return iter([key for key in self.dataset if key.startswith(self.prefix)]) + + def __len__(self): + return len(self.dataset) + + +class OffloadedWeightsLoader(Mapping): + """ + A collection that loads weights stored in a given state dict or memory-mapped on disk. + + Args: + state_dict (`Dict[str, torch.Tensor]`, *optional*): + A dictionary parameter name to tensor. + save_folder (`str` or `os.PathLike`, *optional*): + The directory in which the weights are stored (by `offload_state_dict` for instance). + index (`Dict`, *optional*): + A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default + to the index saved in `save_folder`. + """ + + def __init__( + self, + state_dict: dict[str, torch.Tensor] = None, + save_folder: Optional[Union[str, os.PathLike]] = None, + index: Mapping = None, + device=None, + ): + if state_dict is None and save_folder is None and index is None: + raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.") + + self.state_dict = {} if state_dict is None else state_dict + self.save_folder = save_folder + if index is None and save_folder is not None: + with open(os.path.join(save_folder, "index.json")) as f: + index = json.load(f) + self.index = {} if index is None else index + self.all_keys = list(self.state_dict.keys()) + self.all_keys.extend([key for key in self.index if key not in self.all_keys]) + self.device = device + + def __getitem__(self, key: str): + # State dict gets priority + if key in self.state_dict: + return self.state_dict[key] + weight_info = self.index[key] + if weight_info.get("safetensors_file") is not None: + device = "cpu" if self.device is None else self.device + tensor = None + try: + with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f: + tensor = f.get_tensor(weight_info.get("weight_name", key)) + except TypeError: + # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first + with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f: + tensor = f.get_tensor(weight_info.get("weight_name", key)) + + if "dtype" in weight_info: + tensor = tensor.to(getattr(torch, weight_info["dtype"])) + + if tensor.device != torch.device(device): + tensor = tensor.to(device) + return tensor + + weight_file = os.path.join(self.save_folder, f"{key}.dat") + return load_offloaded_weight(weight_file, weight_info) + + def __iter__(self): + return iter(self.all_keys) + + def __len__(self): + return len(self.all_keys) + + +def extract_submodules_state_dict(state_dict: dict[str, torch.Tensor], submodule_names: list[str]): + """ + Extract the sub state-dict corresponding to a list of given submodules. + + Args: + state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. + submodule_names (`List[str]`): The list of submodule names we want to extract. + """ + result = {} + for module_name in submodule_names: + # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the + # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance) + result.update( + { + key: param + for key, param in state_dict.items() + if key == module_name or key.startswith(module_name + ".") + } + ) + return result diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/operations.py b/venv/lib/python3.10/site-packages/accelerate/utils/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..088c0f6efd2a8d7e64a778d70450dd0966984446 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/operations.py @@ -0,0 +1,866 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A set of basic tensor ops compatible with tpu, gpu, and multigpu +""" + +import pickle +import warnings +from collections.abc import Mapping +from contextlib import contextmanager, nullcontext +from functools import update_wrapper, wraps +from typing import Any + +import torch + +from ..state import AcceleratorState, PartialState +from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES +from .dataclasses import DistributedType, TensorInformation +from .imports import ( + is_npu_available, + is_torch_distributed_available, + is_torch_xla_available, +) + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + +if is_torch_distributed_available(): + from torch.distributed import ReduceOp + + +def is_torch_tensor(tensor): + return isinstance(tensor, torch.Tensor) + + +def is_torch_xpu_tensor(tensor): + return isinstance( + tensor, + torch.xpu.FloatTensor, + torch.xpu.ByteTensor, + torch.xpu.IntTensor, + torch.xpu.LongTensor, + torch.xpu.HalfTensor, + torch.xpu.DoubleTensor, + torch.xpu.BFloat16Tensor, + ) + + +def is_tensor_information(tensor_info): + return isinstance(tensor_info, TensorInformation) + + +def is_namedtuple(data): + """ + Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a + `namedtuple` perfectly. + """ + return isinstance(data, tuple) and hasattr(data, "_asdict") and hasattr(data, "_fields") + + +def honor_type(obj, generator): + """ + Cast a generator to the same type as obj (list, tuple, or namedtuple) + """ + # Some objects may not be able to instantiate from a generator directly + if is_namedtuple(obj): + return type(obj)(*list(generator)) + else: + return type(obj)(generator) + + +def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): + """ + Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. + + Args: + func (`callable`): + The function to recursively apply. + data (nested list/tuple/dictionary of `main_type`): + The data on which to apply `func` + *args: + Positional arguments that will be passed to `func` when applied on the unpacked data. + main_type (`type`, *optional*, defaults to `torch.Tensor`): + The base type of the objects to which apply `func`. + error_on_other_type (`bool`, *optional*, defaults to `False`): + Whether to return an error or not if after unpacking `data`, we get on an object that is not of type + `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. + **kwargs (additional keyword arguments, *optional*): + Keyword arguments that will be passed to `func` when applied on the unpacked data. + + Returns: + The same data structure as `data` with `func` applied to every object of type `main_type`. + """ + if isinstance(data, (tuple, list)): + return honor_type( + data, + ( + recursively_apply( + func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs + ) + for o in data + ), + ) + elif isinstance(data, Mapping): + return type(data)( + { + k: recursively_apply( + func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs + ) + for k, v in data.items() + } + ) + elif test_type(data): + return func(data, *args, **kwargs) + elif error_on_other_type: + raise TypeError( + f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " + f"objects that are valid for `{test_type.__name__}` should be passed." + ) + return data + + +def send_to_device(tensor, device, non_blocking=False, skip_keys=None): + """ + Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to send to a given device. + device (`torch.device`): + The device to send the data to. + + Returns: + The same data structure as `tensor` with all tensors sent to the proper device. + """ + if is_torch_tensor(tensor) or hasattr(tensor, "to"): + # `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)). + if device == "npu": + device = "npu:0" + try: + return tensor.to(device, non_blocking=non_blocking) + except TypeError: # .to() doesn't accept non_blocking as kwarg + return tensor.to(device) + except AssertionError as error: + # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). + # This call is inside the try-block since is_npu_available is not supported by torch.compile. + if is_npu_available(): + if isinstance(device, int): + device = f"npu:{device}" + else: + raise error + try: + return tensor.to(device, non_blocking=non_blocking) + except TypeError: # .to() doesn't accept non_blocking as kwarg + return tensor.to(device) + elif isinstance(tensor, (tuple, list)): + return honor_type( + tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor) + ) + elif isinstance(tensor, Mapping): + if isinstance(skip_keys, str): + skip_keys = [skip_keys] + elif skip_keys is None: + skip_keys = [] + return type(tensor)( + { + k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) + for k, t in tensor.items() + } + ) + else: + return tensor + + +def get_data_structure(data): + """ + Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): + The data to send to analyze. + + Returns: + The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors. + """ + + def _get_data_structure(tensor): + return TensorInformation(shape=tensor.shape, dtype=tensor.dtype) + + return recursively_apply(_get_data_structure, data) + + +def get_shape(data): + """ + Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): + The data to send to analyze. + + Returns: + The same data structure as `data` with lists of tensor shapes instead of tensors. + """ + + def _get_shape(tensor): + return list(tensor.shape) + + return recursively_apply(_get_shape, data) + + +def initialize_tensors(data_structure): + """ + Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`]. + + Returns: + The same data structure as `data` with tensors instead of [`~utils.TensorInformation`]. + """ + + def _initialize_tensor(tensor_info): + return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype) + + return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information) + + +def find_batch_size(data): + """ + Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. + + Returns: + `int`: The batch size. + """ + if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0): + raise ValueError(f"Cannot find the batch size from empty {type(data)}.") + + if isinstance(data, (tuple, list)): + return find_batch_size(data[0]) + elif isinstance(data, Mapping): + for k in data.keys(): + return find_batch_size(data[k]) + elif not isinstance(data, torch.Tensor): + raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.") + return data.shape[0] + + +def ignorant_find_batch_size(data): + """ + Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. + + Returns: + `int`: The batch size. + """ + try: + return find_batch_size(data) + except (ValueError, TypeError): + pass + return None + + +def listify(data): + """ + Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers. + + Returns: + The same data structure as `data` with lists of numbers instead of `torch.Tensor`. + """ + + def _convert_to_list(tensor): + tensor = tensor.detach().cpu() + if tensor.dtype == torch.bfloat16: + # As of Numpy 1.21.4, NumPy does not support bfloat16 (see + # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ). + # Until Numpy adds bfloat16, we must convert float32. + tensor = tensor.to(torch.float32) + return tensor.tolist() + + return recursively_apply(_convert_to_list, data) + + +def _tpu_gather(tensor): + def _tpu_gather_one(tensor): + if tensor.ndim == 0: + tensor = tensor.clone()[None] + + # Can only gather contiguous tensors + if not tensor.is_contiguous(): + tensor = tensor.contiguous() + return xm.all_gather(tensor) + + res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True) + xm.mark_step() + return res + + +def _gpu_gather(tensor): + state = PartialState() + gather_op = torch.distributed.all_gather_into_tensor + + # FIXME: the below 2 lines are added to work-aound a bug related to INT64 collectives in oneCCL. Remove them once pytorch-2.9 is released. + if state.device.type == "xpu": + torch.xpu.synchronize() + + def _gpu_gather_one(tensor): + if tensor.ndim == 0: + tensor = tensor.clone()[None] + + # Can only gather contiguous tensors + if not tensor.is_contiguous(): + tensor = tensor.contiguous() + + if state.backend is not None and state.backend != "gloo": + # We use `empty` as `all_gather_into_tensor` slightly + # differs from `all_gather` for better efficiency, + # and we rely on the number of items in the tensor + # rather than its direct shape + output_tensors = torch.empty( + state.num_processes * tensor.numel(), + dtype=tensor.dtype, + device=state.device, + ) + gather_op(output_tensors, tensor) + return output_tensors.view(-1, *tensor.size()[1:]) + else: + # a backend of `None` is always CPU + # also gloo does not support `all_gather_into_tensor`, + # which will result in a larger memory overhead for the op + output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)] + torch.distributed.all_gather(output_tensors, tensor) + return torch.cat(output_tensors, dim=0) + + return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True) + + +class DistributedOperationException(Exception): + """ + An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the + tensors. + """ + + pass + + +def verify_operation(function): + """ + Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`. + """ + + @wraps(function) + def wrapper(*args, **kwargs): + if PartialState().distributed_type == DistributedType.NO or not PartialState().debug: + return function(*args, **kwargs) + operation = f"{function.__module__}.{function.__name__}" + if "tensor" in kwargs: + tensor = kwargs["tensor"] + else: + tensor = args[0] + if PartialState().device.type != find_device(tensor).type: + raise DistributedOperationException( + f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. " + f"Please move it to the {PartialState().device.type} before calling {operation}." + ) + shapes = get_shape(tensor) + output = gather_object([shapes]) + if output[0] is not None: + are_same = output.count(output[0]) == len(output) + if not are_same: + process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)]) + raise DistributedOperationException( + f"Cannot apply desired operation due to shape mismatches. " + "All shapes across devices must be valid." + f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}" + ) + return function(*args, **kwargs) + + return wrapper + + +def chained_operation(function): + """ + Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing + `DistributedOperationException`. + """ + + @wraps(function) + def wrapper(*args, **kwargs): + try: + return function(*args, **kwargs) + except DistributedOperationException as e: + operation = f"{function.__module__}.{function.__name__}" + raise DistributedOperationException( + f"Error found while calling `{operation}`. Please see the earlier error for more details." + ) from e + + return wrapper + + +@verify_operation +def gather(tensor): + """ + Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + + Returns: + The same data structure as `tensor` with all tensors sent to the proper device. + """ + if PartialState().distributed_type == DistributedType.XLA: + return _tpu_gather(tensor) + elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: + return _gpu_gather(tensor) + else: + return tensor + + +def _gpu_gather_object(object: Any): + output_objects = [None for _ in range(PartialState().num_processes)] + torch.distributed.all_gather_object(output_objects, object) + # all_gather_object returns a list of lists, so we need to flatten it + return [x for y in output_objects for x in y] + + +def gather_object(object: Any): + """ + Recursively gather object in a nested list/tuple/dictionary of objects from all devices. + + Args: + object (nested list/tuple/dictionary of picklable object): + The data to gather. + + Returns: + The same data structure as `object` with all the objects sent to every device. + """ + if PartialState().distributed_type == DistributedType.XLA: + raise NotImplementedError("gather objects in TPU is not supported") + elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: + return _gpu_gather_object(object) + else: + return object + + +def _gpu_broadcast(data, src=0): + def _gpu_broadcast_one(tensor, src=0): + torch.distributed.broadcast(tensor, src=src) + return tensor + + return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src) + + +def _tpu_broadcast(tensor, src=0, name="broadcast tensor"): + if isinstance(tensor, (list, tuple)): + return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor))) + elif isinstance(tensor, Mapping): + return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()}) + return xm.mesh_reduce(name, tensor, lambda x: x[src]) + + +TENSOR_TYPE_TO_INT = { + torch.float: 1, + torch.double: 2, + torch.half: 3, + torch.bfloat16: 4, + torch.uint8: 5, + torch.int8: 6, + torch.int16: 7, + torch.int32: 8, + torch.int64: 9, + torch.bool: 10, +} + +TENSOR_INT_TO_DTYPE = {v: k for k, v in TENSOR_TYPE_TO_INT.items()} + + +def gather_tensor_shape(tensor): + """ + Grabs the shape of `tensor` only available on one process and returns a tensor of its shape + """ + # Allocate 80 bytes to store the shape + max_tensor_dimension = 2**20 + state = PartialState() + base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device) + + # Since PyTorch can't just send a tensor to another GPU without + # knowing its size, we store the size of the tensor with data + # in an allocation + if tensor is not None: + shape = tensor.shape + tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype] + base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int) + # Perform a reduction to copy the size data onto all GPUs + base_tensor = reduce(base_tensor, reduction="sum") + base_tensor = base_tensor[base_tensor.nonzero()] + # The last non-zero data contains the coded dtype the source tensor is + dtype = int(base_tensor[-1:][0]) + base_tensor = base_tensor[:-1] + return base_tensor, dtype + + +def copy_tensor_to_devices(tensor=None) -> torch.Tensor: + """ + Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as + each worker doesn't need to know its shape when used (and tensor can be `None`) + + Args: + tensor (`torch.tensor`): + The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest + should be `None`. + """ + state = PartialState() + shape, dtype = gather_tensor_shape(tensor) + if tensor is None: + tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device) + return reduce(tensor, reduction="sum") + + +@verify_operation +def broadcast(tensor, from_process: int = 0): + """ + Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + from_process (`int`, *optional*, defaults to 0): + The process from which to send the data + + Returns: + The same data structure as `tensor` with all tensors broadcasted to the proper device. + """ + if PartialState().distributed_type == DistributedType.XLA: + return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast") + elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: + return _gpu_broadcast(tensor, src=from_process) + else: + return tensor + + +def broadcast_object_list(object_list, from_process: int = 0): + """ + Broadcast a list of picklable objects form one process to the others. + + Args: + object_list (list of picklable objects): + The list of objects to broadcast. This list will be modified inplace. + from_process (`int`, *optional*, defaults to 0): + The process from which to send the data. + + Returns: + The same list containing the objects from process 0. + """ + if PartialState().distributed_type == DistributedType.XLA: + for i, obj in enumerate(object_list): + object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process]) + elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: + torch.distributed.broadcast_object_list(object_list, src=from_process) + return object_list + + +def slice_tensors(data, tensor_slice, process_index=None, num_processes=None): + """ + Recursively takes a slice in a nested list/tuple/dictionary of tensors. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): + The data to slice. + tensor_slice (`slice`): + The slice to take. + + Returns: + The same data structure as `data` with all the tensors slices. + """ + + def _slice_tensor(tensor, tensor_slice): + return tensor[tensor_slice] + + return recursively_apply(_slice_tensor, data, tensor_slice) + + +def concatenate(data, dim=0): + """ + Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape. + + Args: + data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`): + The data to concatenate. + dim (`int`, *optional*, defaults to 0): + The dimension on which to concatenate. + + Returns: + The same data structure as `data` with all the tensors concatenated. + """ + if isinstance(data[0], (tuple, list)): + return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0])))) + elif isinstance(data[0], Mapping): + return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()}) + elif not isinstance(data[0], torch.Tensor): + raise TypeError(f"Can only concatenate tensors but got {type(data[0])}") + return torch.cat(data, dim=dim) + + +class CannotPadNestedTensorWarning(UserWarning): + pass + + +@chained_operation +def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they + can safely be gathered. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + dim (`int`, *optional*, defaults to 0): + The dimension on which to pad. + pad_index (`int`, *optional*, defaults to 0): + The value with which to pad. + pad_first (`bool`, *optional*, defaults to `False`): + Whether to pad at the beginning or the end. + """ + + def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): + if getattr(tensor, "is_nested", False): + warnings.warn( + "Cannot pad nested tensors without more information. Leaving unprocessed.", + CannotPadNestedTensorWarning, + ) + return tensor + if dim >= len(tensor.shape) or dim < -len(tensor.shape): + return tensor + # Convert negative dimensions to non-negative + if dim < 0: + dim += len(tensor.shape) + + # Gather all sizes + size = torch.tensor(tensor.shape, device=tensor.device)[None] + sizes = gather(size).cpu() + # Then pad to the maximum size + max_size = max(s[dim] for s in sizes) + if max_size == tensor.shape[dim]: + return tensor + + old_size = tensor.shape + new_size = list(old_size) + new_size[dim] = max_size + new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index + if pad_first: + indices = tuple( + slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size)) + ) + else: + indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size))) + new_tensor[indices] = tensor + return new_tensor + + return recursively_apply( + _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first + ) + + +def pad_input_tensors(tensor, batch_size, num_processes, dim=0): + """ + Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions. + + New tensors are just the last input repeated. + + E.g.: + Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4]) + + """ + + def _pad_input_tensors(tensor, batch_size, num_processes, dim=0): + remainder = batch_size // num_processes + last_inputs = batch_size - (remainder * num_processes) + if batch_size // num_processes == 0: + to_pad = num_processes - batch_size + else: + to_pad = num_processes - (batch_size // num_processes) + # In the rare case that `to_pad` is negative, + # we need to pad the last inputs - the found `to_pad` + if last_inputs > to_pad & to_pad < 1: + to_pad = last_inputs - to_pad + old_size = tensor.shape + new_size = list(old_size) + new_size[0] = batch_size + to_pad + new_tensor = tensor.new_zeros(tuple(new_size)) + indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size))) + new_tensor[indices] = tensor + return new_tensor + + return recursively_apply( + _pad_input_tensors, + tensor, + error_on_other_type=True, + batch_size=batch_size, + num_processes=num_processes, + dim=dim, + ) + + +@verify_operation +def reduce(tensor, reduction="mean", scale=1.0): + """ + Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the + mean of a given operation. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to reduce. + reduction (`str`, *optional*, defaults to `"mean"`): + A reduction method. Can be of "mean", "sum", or "none" + scale (`float`, *optional*): + A default scaling value to be applied after the reduce, only valied on XLA. + + Returns: + The same data structure as `data` with all the tensors reduced. + """ + + def _reduce_across_processes(tensor, reduction="mean", scale=1.0): + state = PartialState() + cloned_tensor = tensor.clone() + if state.distributed_type == DistributedType.NO: + return cloned_tensor + if state.distributed_type == DistributedType.XLA: + # Some processes may have different HLO graphs than other + # processes, for example in the breakpoint API + # accelerator.set_trigger(). Use mark_step to make HLOs + # the same on all processes. + xm.mark_step() + xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale) + xm.mark_step() + elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES: + torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM) + if reduction == "mean": + cloned_tensor /= state.num_processes + return cloned_tensor + + return recursively_apply( + _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale + ) + + +def convert_to_fp32(tensor): + """ + Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to convert from FP16/BF16 to FP32. + + Returns: + The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32. + """ + + def _convert_to_fp32(tensor): + return tensor.float() + + def _is_fp16_bf16_tensor(tensor): + return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in ( + torch.float16, + torch.bfloat16, + ) + + return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor) + + +class ConvertOutputsToFp32: + """ + Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16 + precision will be convert back to FP32. + + Args: + model_forward (`Callable`): + The function which outputs we want to treat. + + Returns: + The same function as `model_forward` but with converted outputs. + """ + + def __init__(self, model_forward): + self.model_forward = model_forward + update_wrapper(self, model_forward) + + def __call__(self, *args, **kwargs): + return convert_to_fp32(self.model_forward(*args, **kwargs)) + + def __getstate__(self): + raise pickle.PicklingError( + "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it." + ) + + +def convert_outputs_to_fp32(model_forward): + model_forward = ConvertOutputsToFp32(model_forward) + + def forward(*args, **kwargs): + return model_forward(*args, **kwargs) + + # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` + forward.__wrapped__ = model_forward + + return forward + + +def find_device(data): + """ + Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device). + + Args: + (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of. + """ + if isinstance(data, Mapping): + for obj in data.values(): + device = find_device(obj) + if device is not None: + return device + elif isinstance(data, (tuple, list)): + for obj in data: + device = find_device(obj) + if device is not None: + return device + elif isinstance(data, torch.Tensor): + return data.device + + +@contextmanager +def GatheredParameters(params, modifier_rank=None, fwd_module=None, enabled=True): + """ + Wrapper around `deepspeed.runtime.zero.GatheredParameters`, but if Zero-3 is not enabled, will be a no-op context + manager. + """ + # We need to use the `AcceleratorState` here since it has access to the deepspeed plugin + if AcceleratorState().distributed_type != DistributedType.DEEPSPEED or ( + AcceleratorState().deepspeed_plugin is not None + and not AcceleratorState().deepspeed_plugin.is_zero3_init_enabled() + ): + gather_param_context = nullcontext() + else: + import deepspeed + + gather_param_context = deepspeed.zero.GatheredParameters( + params, modifier_rank=modifier_rank, fwd_module=fwd_module, enabled=enabled + ) + with gather_param_context: + yield diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/other.py b/venv/lib/python3.10/site-packages/accelerate/utils/other.py new file mode 100644 index 0000000000000000000000000000000000000000..adc0ed9cbfe66fa6e5fbcfd4da623d279c837a61 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/other.py @@ -0,0 +1,560 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import platform +import re +import socket +from codecs import encode +from collections import OrderedDict +from functools import partial, reduce +from types import MethodType + +import numpy as np +import torch +from packaging.version import Version +from safetensors.torch import save_file as safe_save_file + +from ..commands.config.default import write_basic_config # noqa: F401 +from ..logging import get_logger +from ..state import PartialState +from .constants import FSDP_PYTORCH_VERSION +from .dataclasses import DistributedType +from .imports import ( + is_deepspeed_available, + is_numpy_available, + is_torch_distributed_available, + is_torch_xla_available, + is_weights_only_available, +) +from .modeling import id_tensor_storage +from .transformer_engine import convert_model +from .versions import is_torch_version + + +logger = get_logger(__name__) + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + +def is_compiled_module(module: torch.nn.Module) -> bool: + """ + Check whether the module was compiled with torch.compile() + """ + if not hasattr(torch, "_dynamo"): + return False + + return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) + + +def has_compiled_regions(module: torch.nn.Module) -> bool: + """ + Check whether the module has submodules that were compiled with `torch.compile()`. + """ + if not hasattr(torch, "_dynamo"): + return False + + if module._modules: + for submodule in module.modules(): + if isinstance(submodule, torch._dynamo.eval_frame.OptimizedModule): + return True + + return False + + +def is_repeated_blocks(module: torch.nn.Module) -> bool: + """ + Check whether the module is a repeated block, i.e. `torch.nn.ModuleList` with all children of the same class. This + is useful to determine whether we should apply regional compilation to the module. + """ + + return isinstance(module, torch.nn.ModuleList) and all(isinstance(m, module[0].__class__) for m in module) + + +def has_repeated_blocks(module: torch.nn.Module) -> bool: + """ + Check whether the module has repeated blocks, i.e. `torch.nn.ModuleList` with all children of the same class, at + any level of the module hierarchy. This is useful to determine whether we should apply regional compilation to the + module. + """ + if module._modules: + for submodule in module.modules(): + if is_repeated_blocks(submodule): + return True + + return False + + +def compile_regions(module: torch.nn.Module, **compile_kwargs) -> torch.nn.Module: + """ + Performs regional compilation where we target repeated blocks of the same class and compile them sequentially to + hit the compiler's cache. For example, in `GPT2LMHeadModel`, the repeated block/class is `GPT2Block`, and can be + accessed as `model.transformer.h[0]`. The rest of the model (e.g. model.lm_head) is compiled separately. + + This allows us to speed up the compilation overhead / cold start of models like LLMs and Transformers in general. + See https://pytorch.org/tutorials/recipes/regional_compilation.html for more details. + + Args: + module (`torch.nn.Module`): + The model to compile. + **compile_kwargs: + Additional keyword arguments to pass to `torch.compile()`. + + Returns: + `torch.nn.Module`: A new instance of the model with some compiled regions. + + Example: + ```python + >>> from accelerate.utils import compile_regions + >>> from transformers import AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> compiled_model = compile_regions(model, mode="reduce-overhead") + >>> compiled_model.transformer.h[0] + OptimizedModule( + (_orig_mod): GPT2Block( + (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True) + (attn): GPT2Attention( + (c_attn): Conv1D(nf=2304, nx=768) + (c_proj): Conv1D(nf=768, nx=768) + (attn_dropout): Dropout(p=0.1, inplace=False) + (resid_dropout): Dropout(p=0.1, inplace=False) + ) + (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True) + (mlp): GPT2MLP( + (c_fc): Conv1D(nf=3072, nx=768) + (c_proj): Conv1D(nf=768, nx=3072) + (act): NewGELUActivation() + (dropout): Dropout(p=0.1, inplace=False) + ) + ) + ) + ``` + """ + + def _compile_regions(module: torch.nn.Module, **compile_kwargs) -> torch.nn.Module: + if is_repeated_blocks(module): + new_module = torch.nn.ModuleList() + for submodule in module: + new_module.append(torch.compile(submodule, **compile_kwargs)) + elif has_repeated_blocks(module): + new_module = module.__class__.__new__(module.__class__) + new_module.__dict__.update(module.__dict__) + new_module._modules = {} + for name, submodule in module.named_children(): + new_module.add_module(name, _compile_regions(submodule, **compile_kwargs)) + else: + new_module = torch.compile(module, **compile_kwargs) + + return new_module + + new_module = _compile_regions(module, **compile_kwargs) + + if "_orig_mod" not in new_module.__dict__: + # Keeps a reference to the original module to decompile/unwrap it later + new_module.__dict__["_orig_mod"] = module + + return new_module + + +def compile_regions_deepspeed(module: torch.nn.Module, **compile_kwargs): + """ + Performs regional compilation the same way as `compile_regions`, but specifically for `DeepSpeedEngine.module`. + Since the model is wrapped in a `DeepSpeedEngine` and has many added hooks, offloaded parameters, etc that + `torch.compile(...)` interferes with, version of trgional compilation uses the inplace `module.compile()` method + instead. + + Args: + module (`torch.nn.Module`): + The model to compile. + **compile_kwargs: + Additional keyword arguments to pass to `module.compile()`. + """ + + if is_repeated_blocks(module): + for submodule in module: + submodule.compile(**compile_kwargs) + elif has_repeated_blocks(module): + for child in module.children(): + compile_regions_deepspeed(child, **compile_kwargs) + else: # leaf node + module.compile(**compile_kwargs) + + +def model_has_dtensor(model: torch.nn.Module) -> bool: + """ + Check if the model has DTensor parameters. + + Args: + model (`torch.nn.Module`): + The model to check. + + Returns: + `bool`: Whether the model has DTensor parameters. + """ + if is_torch_version(">=", "2.5.0"): + from torch.distributed.tensor import DTensor + else: + # from torch 2.0.0 (oldest supported accelerate torch version), DTensor is in torch.distributed._tensor + from torch.distributed._tensor import DTensor + + return any(isinstance(p, DTensor) for p in model.parameters()) + + +def extract_model_from_parallel( + model, keep_fp32_wrapper: bool = True, keep_torch_compile: bool = True, recursive: bool = False +): + """ + Extract a model from its distributed containers. + + Args: + model (`torch.nn.Module`): + The model to extract. + keep_fp32_wrapper (`bool`, *optional*): + Whether to remove mixed precision hooks from the model. + keep_torch_compile (`bool`, *optional*): + Whether to unwrap compiled model. + recursive (`bool`, *optional*, defaults to `False`): + Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers + recursively, not just the top-level distributed containers. + + Returns: + `torch.nn.Module`: The extracted model. + """ + options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) + + is_compiled = is_compiled_module(model) + has_compiled = has_compiled_regions(model) + + if is_compiled: + compiled_model = model + model = model._orig_mod + elif has_compiled: + compiled_model = model + model = model.__dict__["_orig_mod"] + + if is_deepspeed_available(): + from deepspeed import DeepSpeedEngine + + options += (DeepSpeedEngine,) + + if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available(): + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + options += (FSDP,) + + while isinstance(model, options): + model = model.module + + if recursive: + # This is needed in cases such as using FSDPv2 on XLA + def _recursive_unwrap(module): + # Wrapped modules are standardly wrapped as `module`, similar to the cases earlier + # with DDP, DataParallel, DeepSpeed, and FSDP + if hasattr(module, "module"): + unwrapped_module = _recursive_unwrap(module.module) + else: + unwrapped_module = module + # Next unwrap child sublayers recursively + for name, child in unwrapped_module.named_children(): + setattr(unwrapped_module, name, _recursive_unwrap(child)) + return unwrapped_module + + # Start with top-level + model = _recursive_unwrap(model) + + if not keep_fp32_wrapper: + forward = model.forward + original_forward = model.__dict__.pop("_original_forward", None) + if original_forward is not None: + while hasattr(forward, "__wrapped__"): + forward = forward.__wrapped__ + if forward == original_forward: + break + model.forward = MethodType(forward, model) + if getattr(model, "_converted_to_transformer_engine", False): + convert_model(model, to_transformer_engine=False) + + if keep_torch_compile: + if is_compiled: + compiled_model._orig_mod = model + model = compiled_model + elif has_compiled: + compiled_model.__dict__["_orig_mod"] = model + model = compiled_model + + return model + + +def wait_for_everyone(): + """ + Introduces a blocking point in the script, making sure all processes have reached this point before continuing. + + + + Make sure all processes will reach this instruction otherwise one of your processes will hang forever. + + + """ + PartialState().wait_for_everyone() + + +def clean_state_dict_for_safetensors(state_dict: dict): + """ + Cleans the state dictionary from a model and removes tensor aliasing if present. + + Args: + state_dict (`dict`): + The state dictionary from a model + """ + ptrs = collections.defaultdict(list) + # When bnb serialization is used, weights in state dict can be strings + for name, tensor in state_dict.items(): + if not isinstance(tensor, str): + ptrs[id_tensor_storage(tensor)].append(name) + + # These are all pointers of tensors with shared memory + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + warn_names = set() + for names in shared_ptrs.values(): + # When not all duplicates have been cleaned, we still remove those keys but put a clear warning. + # If the link between tensors was done at runtime then `from_pretrained` will not get + # the key back leading to random tensor. A proper warning will be shown + # during reload (if applicable), but since the file is not necessarily compatible with + # the config, better show a proper warning. + found_names = [name for name in names if name in state_dict] + warn_names.update(found_names[1:]) + for name in found_names[1:]: + del state_dict[name] + if len(warn_names) > 0: + logger.warning( + f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading", + ) + state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()} + return state_dict + + +def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False): + """ + Save the data to disk. Use in place of `torch.save()`. + + Args: + obj: + The data to save + f: + The file (or file-like object) to use to save the data + save_on_each_node (`bool`, *optional*, defaults to `False`): + Whether to only save on the global main process + safe_serialization (`bool`, *optional*, defaults to `False`): + Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`). + """ + # When TorchXLA is enabled, it's necessary to transfer all data to the CPU before saving. + # Another issue arises with `id_tensor_storage`, which treats all XLA tensors as identical. + # If tensors remain on XLA, calling `clean_state_dict_for_safetensors` will result in only + # one XLA tensor remaining. + if PartialState().distributed_type == DistributedType.XLA: + obj = xm._maybe_convert_to_cpu(obj) + # Check if it's a model and remove duplicates + if safe_serialization: + save_func = partial(safe_save_file, metadata={"format": "pt"}) + if isinstance(obj, OrderedDict): + obj = clean_state_dict_for_safetensors(obj) + else: + save_func = torch.save + + if PartialState().is_main_process and not save_on_each_node: + save_func(obj, f) + elif PartialState().is_local_main_process and save_on_each_node: + save_func(obj, f) + + +# The following are considered "safe" globals to reconstruct various types of objects when using `weights_only=True` +# These should be added and then removed after loading in the file +np_core = np._core if is_numpy_available("2.0.0") else np.core +TORCH_SAFE_GLOBALS = [ + # numpy arrays are just numbers, not objects, so we can reconstruct them safely + np_core.multiarray._reconstruct, + np.ndarray, + # The following are needed for the RNG states + encode, + np.dtype, +] + +if is_numpy_available("1.25.0"): + TORCH_SAFE_GLOBALS.append(np.dtypes.UInt32DType) + + +def load(f, map_location=None, **kwargs): + """ + Compatible drop-in replacement of `torch.load()` which allows for `weights_only` to be used if `torch` version is + 2.4.0 or higher. Otherwise will ignore the kwarg. + + Will also add (and then remove) an exception for numpy arrays + + Args: + f: + The file (or file-like object) to use to load the data + map_location: + a function, `torch.device`, string or a dict specifying how to remap storage locations + **kwargs: + Additional keyword arguments to pass to `torch.load()`. + """ + try: + if is_weights_only_available(): + old_safe_globals = torch.serialization.get_safe_globals() + if "weights_only" not in kwargs: + kwargs["weights_only"] = True + torch.serialization.add_safe_globals(TORCH_SAFE_GLOBALS) + else: + kwargs.pop("weights_only", None) + loaded_obj = torch.load(f, map_location=map_location, **kwargs) + finally: + if is_weights_only_available(): + torch.serialization.clear_safe_globals() + if old_safe_globals: + torch.serialization.add_safe_globals(old_safe_globals) + return loaded_obj + + +def get_pretty_name(obj): + """ + Gets a pretty name from `obj`. + """ + if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"): + obj = getattr(obj, "__class__", obj) + if hasattr(obj, "__qualname__"): + return obj.__qualname__ + if hasattr(obj, "__name__"): + return obj.__name__ + return str(obj) + + +def merge_dicts(source, destination): + """ + Recursively merges two dictionaries. + + Args: + source (`dict`): The dictionary to merge into `destination`. + destination (`dict`): The dictionary to merge `source` into. + """ + for key, value in source.items(): + if isinstance(value, dict): + node = destination.setdefault(key, {}) + merge_dicts(value, node) + else: + destination[key] = value + + return destination + + +def is_port_in_use(port: int = None) -> bool: + """ + Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been + run and need to see if the port is already in use. + """ + if port is None: + port = 29500 + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + return s.connect_ex(("localhost", port)) == 0 + + +def get_free_port() -> int: + """ + Gets a free port on `localhost`. Useful for automatic port selection when port 0 is specified in distributed + training scenarios. + + Returns: + int: An available port number + """ + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) # bind to port 0 for OS to assign a free port + return s.getsockname()[1] + + +def convert_bytes(size): + "Converts `size` from bytes to the largest possible unit" + for x in ["bytes", "KB", "MB", "GB", "TB"]: + if size < 1024.0: + return f"{round(size, 2)} {x}" + size /= 1024.0 + + return f"{round(size, 2)} PB" + + +def check_os_kernel(): + """Warns if the kernel version is below the recommended minimum on Linux.""" + # see issue #1929 + info = platform.uname() + system = info.system + if system != "Linux": + return + + _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release) + min_version = "5.5.0" + if Version(version) < Version(min_version): + msg = ( + f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can " + "cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher." + ) + logger.warning(msg, main_process_only=True) + + +def recursive_getattr(obj, attr: str): + """ + Recursive `getattr`. + + Args: + obj: + A class instance holding the attribute. + attr (`str`): + The attribute that is to be retrieved, e.g. 'attribute1.attribute2'. + """ + + def _getattr(obj, attr): + return getattr(obj, attr) + + return reduce(_getattr, [obj] + attr.split(".")) + + +def get_module_children_bottom_up(model: torch.nn.Module, return_fqns: bool = False) -> list[torch.nn.Module]: + """Traverse the model in bottom-up order and return the children modules in that order. + + Args: + model (`torch.nn.Module`): the model to get the children of + + Returns: + `list[torch.nn.Module]`: a list of children modules of `model` in bottom-up order. The last element is the + `model` itself. + """ + top = model if not return_fqns else ("", model) + stack = [top] + ordered_modules = [] + while stack: + current_module = stack.pop() + if return_fqns: + current_module_name, current_module = current_module + for name, attr in current_module.named_children(): + if isinstance(attr, torch.nn.Module): + if return_fqns: + child_name = current_module_name + "." + name if current_module_name else name + stack.append((child_name, attr)) + else: + stack.append(attr) + if return_fqns: + ordered_modules.append((current_module_name, current_module)) + else: + ordered_modules.append(current_module) + return ordered_modules[::-1] diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/random.py b/venv/lib/python3.10/site-packages/accelerate/utils/random.py new file mode 100644 index 0000000000000000000000000000000000000000..9dceb598cacc1c1d17b198e0a7c19789ab3b9f39 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/random.py @@ -0,0 +1,156 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from typing import Optional, Union + +import numpy as np +import torch + +from ..state import AcceleratorState +from .constants import CUDA_DISTRIBUTED_TYPES +from .dataclasses import DistributedType, RNGType +from .imports import ( + is_hpu_available, + is_mlu_available, + is_musa_available, + is_npu_available, + is_sdaa_available, + is_torch_xla_available, + is_xpu_available, +) + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + +def set_seed(seed: int, device_specific: bool = False, deterministic: bool = False): + """ + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. + + Args: + seed (`int`): + The seed to set. + device_specific (`bool`, *optional*, defaults to `False`): + Whether to differ the seed on each device slightly with `self.process_index`. + deterministic (`bool`, *optional*, defaults to `False`): + Whether to use deterministic algorithms where available. Can slow down training. + """ + if device_specific: + seed += AcceleratorState().process_index + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if is_xpu_available(): + torch.xpu.manual_seed_all(seed) + elif is_npu_available(): + torch.npu.manual_seed_all(seed) + elif is_mlu_available(): + torch.mlu.manual_seed_all(seed) + elif is_sdaa_available(): + torch.sdaa.manual_seed_all(seed) + elif is_musa_available(): + torch.musa.manual_seed_all(seed) + elif is_hpu_available(): + torch.hpu.manual_seed_all(seed) + else: + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + if is_torch_xla_available(): + xm.set_rng_state(seed) + + if deterministic: + torch.use_deterministic_algorithms(True) + + +def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None): + # Get the proper rng state + if rng_type == RNGType.TORCH: + rng_state = torch.get_rng_state() + elif rng_type == RNGType.CUDA: + rng_state = torch.cuda.get_rng_state() + elif rng_type == RNGType.XLA: + assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable." + rng_state = torch.tensor(xm.get_rng_state()) + elif rng_type == RNGType.NPU: + assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs." + rng_state = torch.npu.get_rng_state() + elif rng_type == RNGType.MLU: + assert is_mlu_available(), "Can't synchronize MLU seeds on an environment without MLUs." + rng_state = torch.mlu.get_rng_state() + elif rng_type == RNGType.SDAA: + assert is_sdaa_available(), "Can't synchronize SDAA seeds on an environment without SDAAs." + rng_state = torch.sdaa.get_rng_state() + elif rng_type == RNGType.MUSA: + assert is_musa_available(), "Can't synchronize MUSA seeds on an environment without MUSAs." + rng_state = torch.musa.get_rng_state() + elif rng_type == RNGType.XPU: + assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs." + rng_state = torch.xpu.get_rng_state() + elif rng_type == RNGType.HPU: + assert is_hpu_available(), "Can't synchronize HPU seeds on an environment without HPUs." + rng_state = torch.hpu.get_rng_state() + elif rng_type == RNGType.GENERATOR: + assert generator is not None, "Need a generator to synchronize its seed." + rng_state = generator.get_state() + + # Broadcast the rng state from device 0 to other devices + state = AcceleratorState() + if state.distributed_type == DistributedType.XLA: + rng_state = rng_state.to(xm.xla_device()) + xm.collective_broadcast([rng_state]) + xm.mark_step() + rng_state = rng_state.cpu() + elif ( + state.distributed_type in CUDA_DISTRIBUTED_TYPES + or state.distributed_type == DistributedType.MULTI_MLU + or state.distributed_type == DistributedType.MULTI_SDAA + or state.distributed_type == DistributedType.MULTI_MUSA + or state.distributed_type == DistributedType.MULTI_NPU + or state.distributed_type == DistributedType.MULTI_XPU + or state.distributed_type == DistributedType.MULTI_HPU + ): + rng_state = rng_state.to(state.device) + torch.distributed.broadcast(rng_state, 0) + rng_state = rng_state.cpu() + elif state.distributed_type == DistributedType.MULTI_CPU: + torch.distributed.broadcast(rng_state, 0) + + # Set the broadcast rng state + if rng_type == RNGType.TORCH: + torch.set_rng_state(rng_state) + elif rng_type == RNGType.CUDA: + torch.cuda.set_rng_state(rng_state) + elif rng_type == RNGType.NPU: + torch.npu.set_rng_state(rng_state) + elif rng_type == RNGType.MLU: + torch.mlu.set_rng_state(rng_state) + elif rng_type == RNGType.SDAA: + torch.sdaa.set_rng_state(rng_state) + elif rng_type == RNGType.MUSA: + torch.musa.set_rng_state(rng_state) + elif rng_type == RNGType.XPU: + torch.xpu.set_rng_state(rng_state) + elif rng_state == RNGType.HPU: + torch.hpu.set_rng_state(rng_state) + elif rng_type == RNGType.XLA: + xm.set_rng_state(rng_state.item()) + elif rng_type == RNGType.GENERATOR: + generator.set_state(rng_state) + + +def synchronize_rng_states(rng_types: list[Union[str, RNGType]], generator: Optional[torch.Generator] = None): + for rng_type in rng_types: + synchronize_rng_state(RNGType(rng_type), generator=generator) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/rich.py b/venv/lib/python3.10/site-packages/accelerate/utils/rich.py new file mode 100644 index 0000000000000000000000000000000000000000..2d48661b7fcef92ef1168b74cc275c6d3ccc67a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/rich.py @@ -0,0 +1,24 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .imports import is_rich_available + + +if is_rich_available(): + from rich.traceback import install + + install(show_locals=False) + +else: + raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`") diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py b/venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py new file mode 100644 index 0000000000000000000000000000000000000000..140133926c2f88d39c70f5a9f46a08f88bed36da --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py @@ -0,0 +1,51 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.metadata +import subprocess +import sys + + +def install_xla(upgrade: bool = False): + """ + Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory. + + Args: + upgrade (`bool`, *optional*, defaults to `False`): + Whether to upgrade `torch` and install the latest `torch_xla` wheels. + + Example: + + ```python + >>> from accelerate.utils import install_xla + + >>> install_xla(upgrade=True) + ``` + """ + in_colab = False + if "IPython" in sys.modules: + in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) + + if in_colab: + if upgrade: + torch_install_cmd = ["pip", "install", "-U", "torch"] + subprocess.run(torch_install_cmd, check=True) + # get the current version of torch + torch_version = importlib.metadata.version("torch") + torch_version_trunc = torch_version[: torch_version.rindex(".")] + xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl" + xla_install_cmd = ["pip", "install", xla_wheel] + subprocess.run(xla_install_cmd, check=True) + else: + raise RuntimeError("`install_xla` utility works only on google colab.") diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/tqdm.py b/venv/lib/python3.10/site-packages/accelerate/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..2d4873c1573eb2ee7392162f440a76d4f07cd8ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/tqdm.py @@ -0,0 +1,43 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .imports import is_tqdm_available + + +if is_tqdm_available(): + from tqdm.auto import tqdm as _tqdm + +from ..state import PartialState + + +def tqdm(*args, main_process_only: bool = True, **kwargs): + """ + Wrapper around `tqdm.tqdm` that optionally displays only on the main process. + + Args: + main_process_only (`bool`, *optional*): + Whether to display the progress bar only on the main process + """ + if not is_tqdm_available(): + raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") + if len(args) > 0 and isinstance(args[0], bool): + raise ValueError( + "Passing `True` or `False` as the first argument to Accelerate's `tqdm` wrapper is unsupported. " + "Please use the `main_process_only` keyword argument instead." + ) + disable = kwargs.pop("disable", False) + if main_process_only and not disable: + disable = PartialState().local_process_index != 0 + return _tqdm(*args, **kwargs, disable=disable) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py b/venv/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..337213453b3d15f610f41ac71c5afe15e034b38c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py @@ -0,0 +1,164 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from types import MethodType + +import torch.nn as nn + +from .imports import is_hpu_available, is_transformer_engine_available +from .operations import GatheredParameters + + +# Do not import `transformer_engine` at package level to avoid potential issues + + +def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True): + """ + Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart. + """ + if not is_transformer_engine_available(): + raise ImportError("Using `convert_model` requires transformer_engine to be installed.") + + if is_hpu_available(): + import intel_transformer_engine as te + + if not hasattr(te, "LayerNorm"): + # HPU does not have a LayerNorm implementation in TE + te.LayerNorm = nn.LayerNorm + else: + import transformer_engine.pytorch as te + + for name, module in model.named_children(): + if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear: + has_bias = module.bias is not None + params_to_gather = [module.weight] + if has_bias: + params_to_gather.append(module.bias) + + with GatheredParameters(params_to_gather, modifier_rank=0): + if any(p % 16 != 0 for p in module.weight.shape): + return + te_module = te.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + te_module.weight.copy_(module.weight) + if has_bias: + te_module.bias.copy_(module.bias) + + setattr(model, name, te_module) + # Note: @xrsrke (Phuc) found that te.LayerNorm doesn't have any real memory savings or speedups over nn.LayerNorm + elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln: + with GatheredParameters([module.weight, module.bias], modifier_rank=0): + te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + te_module.weight.copy_(module.weight) + te_module.bias.copy_(module.bias) + + setattr(model, name, te_module) + elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear: + has_bias = module.bias is not None + new_module = nn.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + new_module.weight.copy_(module.weight) + if has_bias: + new_module.bias.copy_(module.bias) + + setattr(model, name, new_module) + elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln: + new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + new_module.weight.copy_(module.weight) + new_module.bias.copy_(module.bias) + + setattr(model, name, new_module) + else: + convert_model( + module, + to_transformer_engine=to_transformer_engine, + _convert_linear=_convert_linear, + _convert_ln=_convert_ln, + ) + + +def has_transformer_engine_layers(model): + """ + Returns whether a given model has some `transformer_engine` layer or not. + """ + if not is_transformer_engine_available(): + raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.") + + if is_hpu_available(): + import intel_transformer_engine as te + + module_cls_to_check = te.Linear + else: + import transformer_engine.pytorch as te + + module_cls_to_check = (te.LayerNorm, te.Linear, te.TransformerLayer) + + for m in model.modules(): + if isinstance(m, module_cls_to_check): + return True + + return False + + +def contextual_fp8_autocast(model_forward, fp8_recipe, use_during_eval=False): + """ + Wrapper for a model's forward method to apply FP8 autocast. Is context aware, meaning that by default it will + disable FP8 autocast during eval mode, which is generally better for more accurate metrics. + """ + if not is_transformer_engine_available(): + raise ImportError("Using `contextual_fp8_autocast` requires transformer_engine to be installed.") + + if is_hpu_available(): + from intel_transformer_engine import fp8_autocast + else: + from transformer_engine.pytorch import fp8_autocast + + def forward(self, *args, **kwargs): + enabled = use_during_eval or self.training + with fp8_autocast(enabled=enabled, fp8_recipe=fp8_recipe): + return model_forward(*args, **kwargs) + + # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` + forward.__wrapped__ = model_forward + + return forward + + +def apply_fp8_autowrap(model, fp8_recipe_handler): + """ + Applies FP8 context manager to the model's forward method + """ + if not is_transformer_engine_available(): + raise ImportError("Using `apply_fp8_autowrap` requires transformer_engine to be installed.") + + if is_hpu_available(): + import intel_transformer_engine.recipe as te_recipe + else: + import transformer_engine.common.recipe as te_recipe + + kwargs = fp8_recipe_handler.to_kwargs() if fp8_recipe_handler is not None else {} + if "fp8_format" in kwargs: + kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"]) + use_during_eval = kwargs.pop("use_autocast_during_eval", False) + fp8_recipe = te_recipe.DelayedScaling(**kwargs) + new_forward = contextual_fp8_autocast(model.forward, fp8_recipe, use_during_eval) + + if hasattr(model.forward, "__func__"): + model.forward = MethodType(new_forward, model) + else: + model.forward = new_forward + + return model diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/versions.py b/venv/lib/python3.10/site-packages/accelerate/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..985c918f0e057bacc70c372f6906071bb73db577 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/versions.py @@ -0,0 +1,56 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.metadata +from typing import Union + +from packaging.version import Version, parse + +from .constants import STR_OPERATION_TO_FUNC + + +torch_version = parse(importlib.metadata.version("torch")) + + +def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): + """ + Compares a library version to some requirement using a given operation. + + Args: + library_or_version (`str` or `packaging.version.Version`): + A library name or a version to check. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="`. + requirement_version (`str`): + The version to compare the library version against + """ + if operation not in STR_OPERATION_TO_FUNC.keys(): + raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") + operation = STR_OPERATION_TO_FUNC[operation] + if isinstance(library_or_version, str): + library_or_version = parse(importlib.metadata.version(library_or_version)) + return operation(library_or_version, parse(requirement_version)) + + +def is_torch_version(operation: str, version: str): + """ + Compares the current PyTorch version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of PyTorch + """ + return compare_versions(torch_version, operation, version) diff --git a/venv/lib/python3.10/site-packages/aiohttp/.hash/_cparser.pxd.hash b/venv/lib/python3.10/site-packages/aiohttp/.hash/_cparser.pxd.hash new file mode 100644 index 0000000000000000000000000000000000000000..3f5cd0e6f720a049ee19149790c808558b492726 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/.hash/_cparser.pxd.hash @@ -0,0 +1 @@ +5276d46021e0e0d7577e0c9155800cbf62932d60a50783fec42aefb63febedec /home/runner/work/aiohttp/aiohttp/aiohttp/_cparser.pxd diff --git a/venv/lib/python3.10/site-packages/aiohttp/.hash/_find_header.pxd.hash b/venv/lib/python3.10/site-packages/aiohttp/.hash/_find_header.pxd.hash new file mode 100644 index 0000000000000000000000000000000000000000..f006c2de5d24a1b5a9c26f83c858127b5e12b07c --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/.hash/_find_header.pxd.hash @@ -0,0 +1 @@ +d067f01423cddb3c442933b5fcc039b18ab651fcec1bc91c577693aafc25cf78 /home/runner/work/aiohttp/aiohttp/aiohttp/_find_header.pxd diff --git a/venv/lib/python3.10/site-packages/aiohttp/.hash/_http_parser.pyx.hash b/venv/lib/python3.10/site-packages/aiohttp/.hash/_http_parser.pyx.hash new file mode 100644 index 0000000000000000000000000000000000000000..d8c2036a4ec28ee081de8fcd49f107bc0a7222d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/.hash/_http_parser.pyx.hash @@ -0,0 +1 @@ +d4bd3b3cab898e00c642eaa59b2f7ae5ae5aa1374e698597f7d805a302f23e21 /home/runner/work/aiohttp/aiohttp/aiohttp/_http_parser.pyx diff --git a/venv/lib/python3.10/site-packages/aiohttp/.hash/_http_writer.pyx.hash b/venv/lib/python3.10/site-packages/aiohttp/.hash/_http_writer.pyx.hash new file mode 100644 index 0000000000000000000000000000000000000000..771a6b17aa5bc2ecd8909c2d3c4b10ce769e55d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/.hash/_http_writer.pyx.hash @@ -0,0 +1 @@ +f7ab1e2628277b82772d59c1dc3033c13495d769df67b1d1d49b1a474a75dd52 /home/runner/work/aiohttp/aiohttp/aiohttp/_http_writer.pyx diff --git a/venv/lib/python3.10/site-packages/aiohttp/.hash/hdrs.py.hash b/venv/lib/python3.10/site-packages/aiohttp/.hash/hdrs.py.hash new file mode 100644 index 0000000000000000000000000000000000000000..c8d55240e6a55c305c99244966dee5615565716b --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/.hash/hdrs.py.hash @@ -0,0 +1 @@ +dab8f933203eeb245d60f856e542a45b888d5a110094620e4811f90f816628d1 /home/runner/work/aiohttp/aiohttp/aiohttp/hdrs.py diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..085d1db0fa3708901e8a8bca812a6ca4b61ad62f Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/_cookie_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/_cookie_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54bf261207ce3ae9d554e3e5758ef254d7bf8549 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/_cookie_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/abc.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f587c3d0823a2cfe9825992dc66064a104e7e67 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/abc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/base_protocol.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/base_protocol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d86db7285b368df1778a07663adbebd43e0be059 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/base_protocol.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d49742ebcc42b218465d4c74138d7ef0121d6453 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45a1bb0034ab95291a7627641dc5aa9aec48272c Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_middleware_digest_auth.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_middleware_digest_auth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c99bd400314cce03f98b2bf13b8a57d7fe3ec95 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_middleware_digest_auth.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_middlewares.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_middlewares.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9591a8f487c446e0ad9b97d687e1952398aa072 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_middlewares.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_proto.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_proto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cecb956efc610f6c1ccfe96c643c7e9cf81b748 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_proto.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_reqrep.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_reqrep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2580a492557b7b54de9006e05b4861b620be0d38 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_reqrep.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_ws.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_ws.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ed70b2961685360d64d40b2b858256f7bf92e37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/client_ws.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/compression_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/compression_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a13da3fd18b0f1cbd2a83cbc1d55c4d8c56d12fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/compression_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/connector.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/connector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f53e0f66f96cf81f19e5584c6f23eaf7aa9da3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/connector.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/cookiejar.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/cookiejar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8abf10e84f8fb2376125b03d5b03bc41deab941c Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/cookiejar.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/formdata.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/formdata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..392407cc6aeffd2b611195535e93b9fe9a550739 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/formdata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/hdrs.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/hdrs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30f6b25046ee529462221569049cbbba2cdbede5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/hdrs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46e6f29e0a92de83ea4979e69f72efca5ca06e37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a774263e4493bb21ed8735c1cd1a688e45c9387 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccb5b0b405b7f9b5690c43ca4c8196ef27d312c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_parser.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86557b4e3866faf15c8518a5c023340d94e351d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_parser.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_websocket.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_websocket.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ac21afbe19f21d16c5ab684402e4a161c5c5ab7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_websocket.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_writer.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c016016d306965ca7e00610088cc83959377f7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/http_writer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/log.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49247e31e3061e770f525fefbe896a083786d67d Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/log.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/multipart.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/multipart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47367346a9fcea4bcca74a8742e590824bf362f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/multipart.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/payload.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/payload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f85c79bfea6a0c66b74ab57936d27d88da870fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/payload.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/payload_streamer.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/payload_streamer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93c9d3e1b5f46d1890852d93958512bd3db51c07 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/payload_streamer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/pytest_plugin.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/pytest_plugin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6159d7afcb4295228cfa26b016ad258a297d48e Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/pytest_plugin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/resolver.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/resolver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f9d91a1795a7b3be0bc5919764cc1fd5577ed0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/resolver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/streams.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e586f777619db8a079af6e8374d195db02e5a5d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/streams.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/tcp_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/tcp_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c91293022479a7f2cc550383b11b38aeecec289e Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/tcp_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/test_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53cf2579fba38da050a24dfaf12b74718369de8a Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/test_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/tracing.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/tracing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59f3571d55d519a5756819418bbf723a95ae8d38 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/tracing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/typedefs.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/typedefs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b09924d344cf0fbbfe382eb0f90fa5d9bb74d7f Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/typedefs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5284d5796ce3e5af08b921555e6f5188bdba6c94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fa651f31e924ac021145b51eaf02de5229c3da5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e95a33e220a47964de0c75b7874875cf18cd5b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_fileresponse.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_fileresponse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9a35371251645e9155607576aa07338047fa76a Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_fileresponse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_log.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edfc538316fbb1576b0bfa516bd19e3e9724c4c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_log.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_middlewares.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_middlewares.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4542f333b0bb77dc81056d9ea0c6916a92c4ba35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_middlewares.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_protocol.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_protocol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3b5f7cb9b6646fce7e7d03bd58de83f3d928f41 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_protocol.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_request.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_request.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..430efb63a3563d58fea97becc1dc62cd04a9df71 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_request.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_response.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_response.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea13057a6dcf463a7888b70fa289cd1dfd827a46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_response.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_routedef.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_routedef.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1218092338da34e2a80191e83676f7f0b3ffaf8d Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_routedef.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_runner.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeed192a9296f96a2b05123d3641515a56f66021 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_runner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_server.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_server.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5b997d3bb6dffedec9a024371e5f1d2b4282c7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_server.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_urldispatcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_urldispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11dc2931f65a1e5b02049a962e28bdfe0c8c74b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_urldispatcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_ws.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_ws.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c25129913b51c0212c9a4be835020923222f1d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/web_ws.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/__pycache__/worker.cpython-310.pyc b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/worker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f939d5550f42026940bc2078c5c114da1f204dd7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/aiohttp/__pycache__/worker.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/__init__.py b/venv/lib/python3.10/site-packages/aiohttp/_websocket/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..836257cc47aba3e74863c7de0e098d0835bcee1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/__init__.py @@ -0,0 +1 @@ +"""WebSocket protocol versions 13 and 8.""" diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/helpers.py b/venv/lib/python3.10/site-packages/aiohttp/_websocket/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb58df9228603fc1eb79a7a2cac8301217a36a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/helpers.py @@ -0,0 +1,147 @@ +"""Helpers for WebSocket protocol versions 13 and 8.""" + +import functools +import re +from struct import Struct +from typing import TYPE_CHECKING, Final, List, Optional, Pattern, Tuple + +from ..helpers import NO_EXTENSIONS +from .models import WSHandshakeError + +UNPACK_LEN3 = Struct("!Q").unpack_from +UNPACK_CLOSE_CODE = Struct("!H").unpack +PACK_LEN1 = Struct("!BB").pack +PACK_LEN2 = Struct("!BBH").pack +PACK_LEN3 = Struct("!BBQ").pack +PACK_CLOSE_CODE = Struct("!H").pack +PACK_RANDBITS = Struct("!L").pack +MSG_SIZE: Final[int] = 2**14 +MASK_LEN: Final[int] = 4 + +WS_KEY: Final[bytes] = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + +# Used by _websocket_mask_python +@functools.lru_cache +def _xor_table() -> List[bytes]: + return [bytes(a ^ b for a in range(256)) for b in range(256)] + + +def _websocket_mask_python(mask: bytes, data: bytearray) -> None: + """Websocket masking function. + + `mask` is a `bytes` object of length 4; `data` is a `bytearray` + object of any length. The contents of `data` are masked with `mask`, + as specified in section 5.3 of RFC 6455. + + Note that this function mutates the `data` argument. + + This pure-python implementation may be replaced by an optimized + version when available. + + """ + assert isinstance(data, bytearray), data + assert len(mask) == 4, mask + + if data: + _XOR_TABLE = _xor_table() + a, b, c, d = (_XOR_TABLE[n] for n in mask) + data[::4] = data[::4].translate(a) + data[1::4] = data[1::4].translate(b) + data[2::4] = data[2::4].translate(c) + data[3::4] = data[3::4].translate(d) + + +if TYPE_CHECKING or NO_EXTENSIONS: # pragma: no cover + websocket_mask = _websocket_mask_python +else: + try: + from .mask import _websocket_mask_cython # type: ignore[import-not-found] + + websocket_mask = _websocket_mask_cython + except ImportError: # pragma: no cover + websocket_mask = _websocket_mask_python + + +_WS_EXT_RE: Final[Pattern[str]] = re.compile( + r"^(?:;\s*(?:" + r"(server_no_context_takeover)|" + r"(client_no_context_takeover)|" + r"(server_max_window_bits(?:=(\d+))?)|" + r"(client_max_window_bits(?:=(\d+))?)))*$" +) + +_WS_EXT_RE_SPLIT: Final[Pattern[str]] = re.compile(r"permessage-deflate([^,]+)?") + + +def ws_ext_parse(extstr: Optional[str], isserver: bool = False) -> Tuple[int, bool]: + if not extstr: + return 0, False + + compress = 0 + notakeover = False + for ext in _WS_EXT_RE_SPLIT.finditer(extstr): + defext = ext.group(1) + # Return compress = 15 when get `permessage-deflate` + if not defext: + compress = 15 + break + match = _WS_EXT_RE.match(defext) + if match: + compress = 15 + if isserver: + # Server never fail to detect compress handshake. + # Server does not need to send max wbit to client + if match.group(4): + compress = int(match.group(4)) + # Group3 must match if group4 matches + # Compress wbit 8 does not support in zlib + # If compress level not support, + # CONTINUE to next extension + if compress > 15 or compress < 9: + compress = 0 + continue + if match.group(1): + notakeover = True + # Ignore regex group 5 & 6 for client_max_window_bits + break + else: + if match.group(6): + compress = int(match.group(6)) + # Group5 must match if group6 matches + # Compress wbit 8 does not support in zlib + # If compress level not support, + # FAIL the parse progress + if compress > 15 or compress < 9: + raise WSHandshakeError("Invalid window size") + if match.group(2): + notakeover = True + # Ignore regex group 5 & 6 for client_max_window_bits + break + # Return Fail if client side and not match + elif not isserver: + raise WSHandshakeError("Extension for deflate not supported" + ext.group(1)) + + return compress, notakeover + + +def ws_ext_gen( + compress: int = 15, isserver: bool = False, server_notakeover: bool = False +) -> str: + # client_notakeover=False not used for server + # compress wbit 8 does not support in zlib + if compress < 9 or compress > 15: + raise ValueError( + "Compress wbits must between 9 and 15, zlib does not support wbits=8" + ) + enabledext = ["permessage-deflate"] + if not isserver: + enabledext.append("client_max_window_bits") + + if compress < 15: + enabledext.append("server_max_window_bits=" + str(compress)) + if server_notakeover: + enabledext.append("server_no_context_takeover") + # if client_notakeover: + # enabledext.append('client_no_context_takeover') + return "; ".join(enabledext) diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.pxd b/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.pxd new file mode 100644 index 0000000000000000000000000000000000000000..90983de9ac7e59dfceb639c1e7b656abd5fbb305 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.pxd @@ -0,0 +1,3 @@ +"""Cython declarations for websocket masking.""" + +cpdef void _websocket_mask_cython(bytes mask, bytearray data) diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.pyx b/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.pyx new file mode 100644 index 0000000000000000000000000000000000000000..2d956c8899644d4c6bce042b928be1f23e51293a --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.pyx @@ -0,0 +1,48 @@ +from cpython cimport PyBytes_AsString + + +#from cpython cimport PyByteArray_AsString # cython still not exports that +cdef extern from "Python.h": + char* PyByteArray_AsString(bytearray ba) except NULL + +from libc.stdint cimport uint32_t, uint64_t, uintmax_t + + +cpdef void _websocket_mask_cython(bytes mask, bytearray data): + """Note, this function mutates its `data` argument + """ + cdef: + Py_ssize_t data_len, i + # bit operations on signed integers are implementation-specific + unsigned char * in_buf + const unsigned char * mask_buf + uint32_t uint32_msk + uint64_t uint64_msk + + assert len(mask) == 4 + + data_len = len(data) + in_buf = PyByteArray_AsString(data) + mask_buf = PyBytes_AsString(mask) + uint32_msk = (mask_buf)[0] + + # TODO: align in_data ptr to achieve even faster speeds + # does it need in python ?! malloc() always aligns to sizeof(long) bytes + + if sizeof(size_t) >= 8: + uint64_msk = uint32_msk + uint64_msk = (uint64_msk << 32) | uint32_msk + + while data_len >= 8: + (in_buf)[0] ^= uint64_msk + in_buf += 8 + data_len -= 8 + + + while data_len >= 4: + (in_buf)[0] ^= uint32_msk + in_buf += 4 + data_len -= 4 + + for i in range(0, data_len): + in_buf[i] ^= mask_buf[i] diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/models.py b/venv/lib/python3.10/site-packages/aiohttp/_websocket/models.py new file mode 100644 index 0000000000000000000000000000000000000000..7e89b9652957e8f4e73916e18048368e8d75911e --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/models.py @@ -0,0 +1,84 @@ +"""Models for WebSocket protocol versions 13 and 8.""" + +import json +from enum import IntEnum +from typing import Any, Callable, Final, NamedTuple, Optional, cast + +WS_DEFLATE_TRAILING: Final[bytes] = bytes([0x00, 0x00, 0xFF, 0xFF]) + + +class WSCloseCode(IntEnum): + OK = 1000 + GOING_AWAY = 1001 + PROTOCOL_ERROR = 1002 + UNSUPPORTED_DATA = 1003 + ABNORMAL_CLOSURE = 1006 + INVALID_TEXT = 1007 + POLICY_VIOLATION = 1008 + MESSAGE_TOO_BIG = 1009 + MANDATORY_EXTENSION = 1010 + INTERNAL_ERROR = 1011 + SERVICE_RESTART = 1012 + TRY_AGAIN_LATER = 1013 + BAD_GATEWAY = 1014 + + +class WSMsgType(IntEnum): + # websocket spec types + CONTINUATION = 0x0 + TEXT = 0x1 + BINARY = 0x2 + PING = 0x9 + PONG = 0xA + CLOSE = 0x8 + + # aiohttp specific types + CLOSING = 0x100 + CLOSED = 0x101 + ERROR = 0x102 + + text = TEXT + binary = BINARY + ping = PING + pong = PONG + close = CLOSE + closing = CLOSING + closed = CLOSED + error = ERROR + + +class WSMessage(NamedTuple): + type: WSMsgType + # To type correctly, this would need some kind of tagged union for each type. + data: Any + extra: Optional[str] + + def json(self, *, loads: Callable[[Any], Any] = json.loads) -> Any: + """Return parsed JSON data. + + .. versionadded:: 0.22 + """ + return loads(self.data) + + +# Constructing the tuple directly to avoid the overhead of +# the lambda and arg processing since NamedTuples are constructed +# with a run time built lambda +# https://github.com/python/cpython/blob/d83fcf8371f2f33c7797bc8f5423a8bca8c46e5c/Lib/collections/__init__.py#L441 +WS_CLOSED_MESSAGE = tuple.__new__(WSMessage, (WSMsgType.CLOSED, None, None)) +WS_CLOSING_MESSAGE = tuple.__new__(WSMessage, (WSMsgType.CLOSING, None, None)) + + +class WebSocketError(Exception): + """WebSocket protocol parser error.""" + + def __init__(self, code: int, message: str) -> None: + self.code = code + super().__init__(code, message) + + def __str__(self) -> str: + return cast(str, self.args[1]) + + +class WSHandshakeError(Exception): + """WebSocket protocol handshake error.""" diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader.py b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader.py new file mode 100644 index 0000000000000000000000000000000000000000..23f32265cfccbdc8c8fe2f1600accbfb6f816efa --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader.py @@ -0,0 +1,31 @@ +"""Reader for WebSocket protocol versions 13 and 8.""" + +from typing import TYPE_CHECKING + +from ..helpers import NO_EXTENSIONS + +if TYPE_CHECKING or NO_EXTENSIONS: # pragma: no cover + from .reader_py import ( + WebSocketDataQueue as WebSocketDataQueuePython, + WebSocketReader as WebSocketReaderPython, + ) + + WebSocketReader = WebSocketReaderPython + WebSocketDataQueue = WebSocketDataQueuePython +else: + try: + from .reader_c import ( # type: ignore[import-not-found] + WebSocketDataQueue as WebSocketDataQueueCython, + WebSocketReader as WebSocketReaderCython, + ) + + WebSocketReader = WebSocketReaderCython + WebSocketDataQueue = WebSocketDataQueueCython + except ImportError: # pragma: no cover + from .reader_py import ( + WebSocketDataQueue as WebSocketDataQueuePython, + WebSocketReader as WebSocketReaderPython, + ) + + WebSocketReader = WebSocketReaderPython + WebSocketDataQueue = WebSocketDataQueuePython diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.pxd b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a7620d8e87fddd189ed9dbb062e6ec34aa8d673e --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.pxd @@ -0,0 +1,110 @@ +import cython + +from .mask cimport _websocket_mask_cython as websocket_mask + + +cdef unsigned int READ_HEADER +cdef unsigned int READ_PAYLOAD_LENGTH +cdef unsigned int READ_PAYLOAD_MASK +cdef unsigned int READ_PAYLOAD + +cdef int OP_CODE_NOT_SET +cdef int OP_CODE_CONTINUATION +cdef int OP_CODE_TEXT +cdef int OP_CODE_BINARY +cdef int OP_CODE_CLOSE +cdef int OP_CODE_PING +cdef int OP_CODE_PONG + +cdef int COMPRESSED_NOT_SET +cdef int COMPRESSED_FALSE +cdef int COMPRESSED_TRUE + +cdef object UNPACK_LEN3 +cdef object UNPACK_CLOSE_CODE +cdef object TUPLE_NEW + +cdef object WSMsgType +cdef object WSMessage + +cdef object WS_MSG_TYPE_TEXT +cdef object WS_MSG_TYPE_BINARY + +cdef set ALLOWED_CLOSE_CODES +cdef set MESSAGE_TYPES_WITH_CONTENT + +cdef tuple EMPTY_FRAME +cdef tuple EMPTY_FRAME_ERROR + +cdef class WebSocketDataQueue: + + cdef unsigned int _size + cdef public object _protocol + cdef unsigned int _limit + cdef object _loop + cdef bint _eof + cdef object _waiter + cdef object _exception + cdef public object _buffer + cdef object _get_buffer + cdef object _put_buffer + + cdef void _release_waiter(self) + + cpdef void feed_data(self, object data, unsigned int size) + + @cython.locals(size="unsigned int") + cdef _read_from_buffer(self) + +cdef class WebSocketReader: + + cdef WebSocketDataQueue queue + cdef unsigned int _max_msg_size + + cdef Exception _exc + cdef bytearray _partial + cdef unsigned int _state + + cdef int _opcode + cdef bint _frame_fin + cdef int _frame_opcode + cdef list _payload_fragments + cdef Py_ssize_t _frame_payload_len + + cdef bytes _tail + cdef bint _has_mask + cdef bytes _frame_mask + cdef Py_ssize_t _payload_bytes_to_read + cdef unsigned int _payload_len_flag + cdef int _compressed + cdef object _decompressobj + cdef bint _compress + + cpdef tuple feed_data(self, object data) + + @cython.locals( + is_continuation=bint, + fin=bint, + has_partial=bint, + payload_merged=bytes, + ) + cpdef void _handle_frame(self, bint fin, int opcode, object payload, int compressed) except * + + @cython.locals( + start_pos=Py_ssize_t, + data_len=Py_ssize_t, + length=Py_ssize_t, + chunk_size=Py_ssize_t, + chunk_len=Py_ssize_t, + data_len=Py_ssize_t, + data_cstr="const unsigned char *", + first_byte="unsigned char", + second_byte="unsigned char", + f_start_pos=Py_ssize_t, + f_end_pos=Py_ssize_t, + has_mask=bint, + fin=bint, + had_fragments=Py_ssize_t, + payload_bytearray=bytearray, + ) + cpdef void _feed_data(self, bytes data) except * diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.py b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.py new file mode 100644 index 0000000000000000000000000000000000000000..f966a1593c5bc534442d1bd1a1067b0998969b28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.py @@ -0,0 +1,476 @@ +"""Reader for WebSocket protocol versions 13 and 8.""" + +import asyncio +import builtins +from collections import deque +from typing import Deque, Final, Optional, Set, Tuple, Union + +from ..base_protocol import BaseProtocol +from ..compression_utils import ZLibDecompressor +from ..helpers import _EXC_SENTINEL, set_exception +from ..streams import EofStream +from .helpers import UNPACK_CLOSE_CODE, UNPACK_LEN3, websocket_mask +from .models import ( + WS_DEFLATE_TRAILING, + WebSocketError, + WSCloseCode, + WSMessage, + WSMsgType, +) + +ALLOWED_CLOSE_CODES: Final[Set[int]] = {int(i) for i in WSCloseCode} + +# States for the reader, used to parse the WebSocket frame +# integer values are used so they can be cythonized +READ_HEADER = 1 +READ_PAYLOAD_LENGTH = 2 +READ_PAYLOAD_MASK = 3 +READ_PAYLOAD = 4 + +WS_MSG_TYPE_BINARY = WSMsgType.BINARY +WS_MSG_TYPE_TEXT = WSMsgType.TEXT + +# WSMsgType values unpacked so they can by cythonized to ints +OP_CODE_NOT_SET = -1 +OP_CODE_CONTINUATION = WSMsgType.CONTINUATION.value +OP_CODE_TEXT = WSMsgType.TEXT.value +OP_CODE_BINARY = WSMsgType.BINARY.value +OP_CODE_CLOSE = WSMsgType.CLOSE.value +OP_CODE_PING = WSMsgType.PING.value +OP_CODE_PONG = WSMsgType.PONG.value + +EMPTY_FRAME_ERROR = (True, b"") +EMPTY_FRAME = (False, b"") + +COMPRESSED_NOT_SET = -1 +COMPRESSED_FALSE = 0 +COMPRESSED_TRUE = 1 + +TUPLE_NEW = tuple.__new__ + +cython_int = int # Typed to int in Python, but cython with use a signed int in the pxd + + +class WebSocketDataQueue: + """WebSocketDataQueue resumes and pauses an underlying stream. + + It is a destination for WebSocket data. + """ + + def __init__( + self, protocol: BaseProtocol, limit: int, *, loop: asyncio.AbstractEventLoop + ) -> None: + self._size = 0 + self._protocol = protocol + self._limit = limit * 2 + self._loop = loop + self._eof = False + self._waiter: Optional[asyncio.Future[None]] = None + self._exception: Union[BaseException, None] = None + self._buffer: Deque[Tuple[WSMessage, int]] = deque() + self._get_buffer = self._buffer.popleft + self._put_buffer = self._buffer.append + + def is_eof(self) -> bool: + return self._eof + + def exception(self) -> Optional[BaseException]: + return self._exception + + def set_exception( + self, + exc: BaseException, + exc_cause: builtins.BaseException = _EXC_SENTINEL, + ) -> None: + self._eof = True + self._exception = exc + if (waiter := self._waiter) is not None: + self._waiter = None + set_exception(waiter, exc, exc_cause) + + def _release_waiter(self) -> None: + if (waiter := self._waiter) is None: + return + self._waiter = None + if not waiter.done(): + waiter.set_result(None) + + def feed_eof(self) -> None: + self._eof = True + self._release_waiter() + self._exception = None # Break cyclic references + + def feed_data(self, data: "WSMessage", size: "cython_int") -> None: + self._size += size + self._put_buffer((data, size)) + self._release_waiter() + if self._size > self._limit and not self._protocol._reading_paused: + self._protocol.pause_reading() + + async def read(self) -> WSMessage: + if not self._buffer and not self._eof: + assert not self._waiter + self._waiter = self._loop.create_future() + try: + await self._waiter + except (asyncio.CancelledError, asyncio.TimeoutError): + self._waiter = None + raise + return self._read_from_buffer() + + def _read_from_buffer(self) -> WSMessage: + if self._buffer: + data, size = self._get_buffer() + self._size -= size + if self._size < self._limit and self._protocol._reading_paused: + self._protocol.resume_reading() + return data + if self._exception is not None: + raise self._exception + raise EofStream + + +class WebSocketReader: + def __init__( + self, queue: WebSocketDataQueue, max_msg_size: int, compress: bool = True + ) -> None: + self.queue = queue + self._max_msg_size = max_msg_size + + self._exc: Optional[Exception] = None + self._partial = bytearray() + self._state = READ_HEADER + + self._opcode: int = OP_CODE_NOT_SET + self._frame_fin = False + self._frame_opcode: int = OP_CODE_NOT_SET + self._payload_fragments: list[bytes] = [] + self._frame_payload_len = 0 + + self._tail: bytes = b"" + self._has_mask = False + self._frame_mask: Optional[bytes] = None + self._payload_bytes_to_read = 0 + self._payload_len_flag = 0 + self._compressed: int = COMPRESSED_NOT_SET + self._decompressobj: Optional[ZLibDecompressor] = None + self._compress = compress + + def feed_eof(self) -> None: + self.queue.feed_eof() + + # data can be bytearray on Windows because proactor event loop uses bytearray + # and asyncio types this to Union[bytes, bytearray, memoryview] so we need + # coerce data to bytes if it is not + def feed_data( + self, data: Union[bytes, bytearray, memoryview] + ) -> Tuple[bool, bytes]: + if type(data) is not bytes: + data = bytes(data) + + if self._exc is not None: + return True, data + + try: + self._feed_data(data) + except Exception as exc: + self._exc = exc + set_exception(self.queue, exc) + return EMPTY_FRAME_ERROR + + return EMPTY_FRAME + + def _handle_frame( + self, + fin: bool, + opcode: Union[int, cython_int], # Union intended: Cython pxd uses C int + payload: Union[bytes, bytearray], + compressed: Union[int, cython_int], # Union intended: Cython pxd uses C int + ) -> None: + msg: WSMessage + if opcode in {OP_CODE_TEXT, OP_CODE_BINARY, OP_CODE_CONTINUATION}: + # load text/binary + if not fin: + # got partial frame payload + if opcode != OP_CODE_CONTINUATION: + self._opcode = opcode + self._partial += payload + if self._max_msg_size and len(self._partial) >= self._max_msg_size: + raise WebSocketError( + WSCloseCode.MESSAGE_TOO_BIG, + f"Message size {len(self._partial)} " + f"exceeds limit {self._max_msg_size}", + ) + return + + has_partial = bool(self._partial) + if opcode == OP_CODE_CONTINUATION: + if self._opcode == OP_CODE_NOT_SET: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Continuation frame for non started message", + ) + opcode = self._opcode + self._opcode = OP_CODE_NOT_SET + # previous frame was non finished + # we should get continuation opcode + elif has_partial: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "The opcode in non-fin frame is expected " + f"to be zero, got {opcode!r}", + ) + + assembled_payload: Union[bytes, bytearray] + if has_partial: + assembled_payload = self._partial + payload + self._partial.clear() + else: + assembled_payload = payload + + if self._max_msg_size and len(assembled_payload) >= self._max_msg_size: + raise WebSocketError( + WSCloseCode.MESSAGE_TOO_BIG, + f"Message size {len(assembled_payload)} " + f"exceeds limit {self._max_msg_size}", + ) + + # Decompress process must to be done after all packets + # received. + if compressed: + if not self._decompressobj: + self._decompressobj = ZLibDecompressor(suppress_deflate_header=True) + # XXX: It's possible that the zlib backend (isal is known to + # do this, maybe others too?) will return max_length bytes, + # but internally buffer more data such that the payload is + # >max_length, so we return one extra byte and if we're able + # to do that, then the message is too big. + payload_merged = self._decompressobj.decompress_sync( + assembled_payload + WS_DEFLATE_TRAILING, + ( + self._max_msg_size + 1 + if self._max_msg_size + else self._max_msg_size + ), + ) + if self._max_msg_size and len(payload_merged) > self._max_msg_size: + raise WebSocketError( + WSCloseCode.MESSAGE_TOO_BIG, + f"Decompressed message exceeds size limit {self._max_msg_size}", + ) + elif type(assembled_payload) is bytes: + payload_merged = assembled_payload + else: + payload_merged = bytes(assembled_payload) + + if opcode == OP_CODE_TEXT: + try: + text = payload_merged.decode("utf-8") + except UnicodeDecodeError as exc: + raise WebSocketError( + WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message" + ) from exc + + # XXX: The Text and Binary messages here can be a performance + # bottleneck, so we use tuple.__new__ to improve performance. + # This is not type safe, but many tests should fail in + # test_client_ws_functional.py if this is wrong. + self.queue.feed_data( + TUPLE_NEW(WSMessage, (WS_MSG_TYPE_TEXT, text, "")), + len(payload_merged), + ) + else: + self.queue.feed_data( + TUPLE_NEW(WSMessage, (WS_MSG_TYPE_BINARY, payload_merged, "")), + len(payload_merged), + ) + elif opcode == OP_CODE_CLOSE: + if len(payload) >= 2: + close_code = UNPACK_CLOSE_CODE(payload[:2])[0] + if close_code < 3000 and close_code not in ALLOWED_CLOSE_CODES: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + f"Invalid close code: {close_code}", + ) + try: + close_message = payload[2:].decode("utf-8") + except UnicodeDecodeError as exc: + raise WebSocketError( + WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message" + ) from exc + msg = TUPLE_NEW(WSMessage, (WSMsgType.CLOSE, close_code, close_message)) + elif payload: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + f"Invalid close frame: {fin} {opcode} {payload!r}", + ) + else: + msg = TUPLE_NEW(WSMessage, (WSMsgType.CLOSE, 0, "")) + + self.queue.feed_data(msg, 0) + elif opcode == OP_CODE_PING: + msg = TUPLE_NEW(WSMessage, (WSMsgType.PING, payload, "")) + self.queue.feed_data(msg, len(payload)) + elif opcode == OP_CODE_PONG: + msg = TUPLE_NEW(WSMessage, (WSMsgType.PONG, payload, "")) + self.queue.feed_data(msg, len(payload)) + else: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, f"Unexpected opcode={opcode!r}" + ) + + def _feed_data(self, data: bytes) -> None: + """Return the next frame from the socket.""" + if self._tail: + data, self._tail = self._tail + data, b"" + + start_pos: int = 0 + data_len = len(data) + data_cstr = data + + while True: + # read header + if self._state == READ_HEADER: + if data_len - start_pos < 2: + break + first_byte = data_cstr[start_pos] + second_byte = data_cstr[start_pos + 1] + start_pos += 2 + + fin = (first_byte >> 7) & 1 + rsv1 = (first_byte >> 6) & 1 + rsv2 = (first_byte >> 5) & 1 + rsv3 = (first_byte >> 4) & 1 + opcode = first_byte & 0xF + + # frame-fin = %x0 ; more frames of this message follow + # / %x1 ; final frame of this message + # frame-rsv1 = %x0 ; + # 1 bit, MUST be 0 unless negotiated otherwise + # frame-rsv2 = %x0 ; + # 1 bit, MUST be 0 unless negotiated otherwise + # frame-rsv3 = %x0 ; + # 1 bit, MUST be 0 unless negotiated otherwise + # + # Remove rsv1 from this test for deflate development + if rsv2 or rsv3 or (rsv1 and not self._compress): + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Received frame with non-zero reserved bits", + ) + + if opcode > 0x7 and fin == 0: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Received fragmented control frame", + ) + + has_mask = (second_byte >> 7) & 1 + length = second_byte & 0x7F + + # Control frames MUST have a payload + # length of 125 bytes or less + if opcode > 0x7 and length > 125: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Control frame payload cannot be larger than 125 bytes", + ) + + # Set compress status if last package is FIN + # OR set compress status if this is first fragment + # Raise error if not first fragment with rsv1 = 0x1 + if self._frame_fin or self._compressed == COMPRESSED_NOT_SET: + self._compressed = COMPRESSED_TRUE if rsv1 else COMPRESSED_FALSE + elif rsv1: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Received frame with non-zero reserved bits", + ) + + self._frame_fin = bool(fin) + self._frame_opcode = opcode + self._has_mask = bool(has_mask) + self._payload_len_flag = length + self._state = READ_PAYLOAD_LENGTH + + # read payload length + if self._state == READ_PAYLOAD_LENGTH: + len_flag = self._payload_len_flag + if len_flag == 126: + if data_len - start_pos < 2: + break + first_byte = data_cstr[start_pos] + second_byte = data_cstr[start_pos + 1] + start_pos += 2 + self._payload_bytes_to_read = first_byte << 8 | second_byte + elif len_flag > 126: + if data_len - start_pos < 8: + break + self._payload_bytes_to_read = UNPACK_LEN3(data, start_pos)[0] + start_pos += 8 + else: + self._payload_bytes_to_read = len_flag + + self._state = READ_PAYLOAD_MASK if self._has_mask else READ_PAYLOAD + + # read payload mask + if self._state == READ_PAYLOAD_MASK: + if data_len - start_pos < 4: + break + self._frame_mask = data_cstr[start_pos : start_pos + 4] + start_pos += 4 + self._state = READ_PAYLOAD + + if self._state == READ_PAYLOAD: + chunk_len = data_len - start_pos + if self._payload_bytes_to_read >= chunk_len: + f_end_pos = data_len + self._payload_bytes_to_read -= chunk_len + else: + f_end_pos = start_pos + self._payload_bytes_to_read + self._payload_bytes_to_read = 0 + + had_fragments = self._frame_payload_len + self._frame_payload_len += f_end_pos - start_pos + f_start_pos = start_pos + start_pos = f_end_pos + + if self._payload_bytes_to_read != 0: + # If we don't have a complete frame, we need to save the + # data for the next call to feed_data. + self._payload_fragments.append(data_cstr[f_start_pos:f_end_pos]) + break + + payload: Union[bytes, bytearray] + if had_fragments: + # We have to join the payload fragments get the payload + self._payload_fragments.append(data_cstr[f_start_pos:f_end_pos]) + if self._has_mask: + assert self._frame_mask is not None + payload_bytearray = bytearray(b"".join(self._payload_fragments)) + websocket_mask(self._frame_mask, payload_bytearray) + payload = payload_bytearray + else: + payload = b"".join(self._payload_fragments) + self._payload_fragments.clear() + elif self._has_mask: + assert self._frame_mask is not None + payload_bytearray = data_cstr[f_start_pos:f_end_pos] # type: ignore[assignment] + if type(payload_bytearray) is not bytearray: # pragma: no branch + # Cython will do the conversion for us + # but we need to do it for Python and we + # will always get here in Python + payload_bytearray = bytearray(payload_bytearray) + websocket_mask(self._frame_mask, payload_bytearray) + payload = payload_bytearray + else: + payload = data_cstr[f_start_pos:f_end_pos] + + self._handle_frame( + self._frame_fin, self._frame_opcode, payload, self._compressed + ) + self._frame_payload_len = 0 + self._state = READ_HEADER + + # XXX: Cython needs slices to be bounded, so we can't omit the slice end here. + self._tail = data_cstr[start_pos:data_len] if start_pos < data_len else b"" diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_py.py b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_py.py new file mode 100644 index 0000000000000000000000000000000000000000..f966a1593c5bc534442d1bd1a1067b0998969b28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_py.py @@ -0,0 +1,476 @@ +"""Reader for WebSocket protocol versions 13 and 8.""" + +import asyncio +import builtins +from collections import deque +from typing import Deque, Final, Optional, Set, Tuple, Union + +from ..base_protocol import BaseProtocol +from ..compression_utils import ZLibDecompressor +from ..helpers import _EXC_SENTINEL, set_exception +from ..streams import EofStream +from .helpers import UNPACK_CLOSE_CODE, UNPACK_LEN3, websocket_mask +from .models import ( + WS_DEFLATE_TRAILING, + WebSocketError, + WSCloseCode, + WSMessage, + WSMsgType, +) + +ALLOWED_CLOSE_CODES: Final[Set[int]] = {int(i) for i in WSCloseCode} + +# States for the reader, used to parse the WebSocket frame +# integer values are used so they can be cythonized +READ_HEADER = 1 +READ_PAYLOAD_LENGTH = 2 +READ_PAYLOAD_MASK = 3 +READ_PAYLOAD = 4 + +WS_MSG_TYPE_BINARY = WSMsgType.BINARY +WS_MSG_TYPE_TEXT = WSMsgType.TEXT + +# WSMsgType values unpacked so they can by cythonized to ints +OP_CODE_NOT_SET = -1 +OP_CODE_CONTINUATION = WSMsgType.CONTINUATION.value +OP_CODE_TEXT = WSMsgType.TEXT.value +OP_CODE_BINARY = WSMsgType.BINARY.value +OP_CODE_CLOSE = WSMsgType.CLOSE.value +OP_CODE_PING = WSMsgType.PING.value +OP_CODE_PONG = WSMsgType.PONG.value + +EMPTY_FRAME_ERROR = (True, b"") +EMPTY_FRAME = (False, b"") + +COMPRESSED_NOT_SET = -1 +COMPRESSED_FALSE = 0 +COMPRESSED_TRUE = 1 + +TUPLE_NEW = tuple.__new__ + +cython_int = int # Typed to int in Python, but cython with use a signed int in the pxd + + +class WebSocketDataQueue: + """WebSocketDataQueue resumes and pauses an underlying stream. + + It is a destination for WebSocket data. + """ + + def __init__( + self, protocol: BaseProtocol, limit: int, *, loop: asyncio.AbstractEventLoop + ) -> None: + self._size = 0 + self._protocol = protocol + self._limit = limit * 2 + self._loop = loop + self._eof = False + self._waiter: Optional[asyncio.Future[None]] = None + self._exception: Union[BaseException, None] = None + self._buffer: Deque[Tuple[WSMessage, int]] = deque() + self._get_buffer = self._buffer.popleft + self._put_buffer = self._buffer.append + + def is_eof(self) -> bool: + return self._eof + + def exception(self) -> Optional[BaseException]: + return self._exception + + def set_exception( + self, + exc: BaseException, + exc_cause: builtins.BaseException = _EXC_SENTINEL, + ) -> None: + self._eof = True + self._exception = exc + if (waiter := self._waiter) is not None: + self._waiter = None + set_exception(waiter, exc, exc_cause) + + def _release_waiter(self) -> None: + if (waiter := self._waiter) is None: + return + self._waiter = None + if not waiter.done(): + waiter.set_result(None) + + def feed_eof(self) -> None: + self._eof = True + self._release_waiter() + self._exception = None # Break cyclic references + + def feed_data(self, data: "WSMessage", size: "cython_int") -> None: + self._size += size + self._put_buffer((data, size)) + self._release_waiter() + if self._size > self._limit and not self._protocol._reading_paused: + self._protocol.pause_reading() + + async def read(self) -> WSMessage: + if not self._buffer and not self._eof: + assert not self._waiter + self._waiter = self._loop.create_future() + try: + await self._waiter + except (asyncio.CancelledError, asyncio.TimeoutError): + self._waiter = None + raise + return self._read_from_buffer() + + def _read_from_buffer(self) -> WSMessage: + if self._buffer: + data, size = self._get_buffer() + self._size -= size + if self._size < self._limit and self._protocol._reading_paused: + self._protocol.resume_reading() + return data + if self._exception is not None: + raise self._exception + raise EofStream + + +class WebSocketReader: + def __init__( + self, queue: WebSocketDataQueue, max_msg_size: int, compress: bool = True + ) -> None: + self.queue = queue + self._max_msg_size = max_msg_size + + self._exc: Optional[Exception] = None + self._partial = bytearray() + self._state = READ_HEADER + + self._opcode: int = OP_CODE_NOT_SET + self._frame_fin = False + self._frame_opcode: int = OP_CODE_NOT_SET + self._payload_fragments: list[bytes] = [] + self._frame_payload_len = 0 + + self._tail: bytes = b"" + self._has_mask = False + self._frame_mask: Optional[bytes] = None + self._payload_bytes_to_read = 0 + self._payload_len_flag = 0 + self._compressed: int = COMPRESSED_NOT_SET + self._decompressobj: Optional[ZLibDecompressor] = None + self._compress = compress + + def feed_eof(self) -> None: + self.queue.feed_eof() + + # data can be bytearray on Windows because proactor event loop uses bytearray + # and asyncio types this to Union[bytes, bytearray, memoryview] so we need + # coerce data to bytes if it is not + def feed_data( + self, data: Union[bytes, bytearray, memoryview] + ) -> Tuple[bool, bytes]: + if type(data) is not bytes: + data = bytes(data) + + if self._exc is not None: + return True, data + + try: + self._feed_data(data) + except Exception as exc: + self._exc = exc + set_exception(self.queue, exc) + return EMPTY_FRAME_ERROR + + return EMPTY_FRAME + + def _handle_frame( + self, + fin: bool, + opcode: Union[int, cython_int], # Union intended: Cython pxd uses C int + payload: Union[bytes, bytearray], + compressed: Union[int, cython_int], # Union intended: Cython pxd uses C int + ) -> None: + msg: WSMessage + if opcode in {OP_CODE_TEXT, OP_CODE_BINARY, OP_CODE_CONTINUATION}: + # load text/binary + if not fin: + # got partial frame payload + if opcode != OP_CODE_CONTINUATION: + self._opcode = opcode + self._partial += payload + if self._max_msg_size and len(self._partial) >= self._max_msg_size: + raise WebSocketError( + WSCloseCode.MESSAGE_TOO_BIG, + f"Message size {len(self._partial)} " + f"exceeds limit {self._max_msg_size}", + ) + return + + has_partial = bool(self._partial) + if opcode == OP_CODE_CONTINUATION: + if self._opcode == OP_CODE_NOT_SET: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Continuation frame for non started message", + ) + opcode = self._opcode + self._opcode = OP_CODE_NOT_SET + # previous frame was non finished + # we should get continuation opcode + elif has_partial: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "The opcode in non-fin frame is expected " + f"to be zero, got {opcode!r}", + ) + + assembled_payload: Union[bytes, bytearray] + if has_partial: + assembled_payload = self._partial + payload + self._partial.clear() + else: + assembled_payload = payload + + if self._max_msg_size and len(assembled_payload) >= self._max_msg_size: + raise WebSocketError( + WSCloseCode.MESSAGE_TOO_BIG, + f"Message size {len(assembled_payload)} " + f"exceeds limit {self._max_msg_size}", + ) + + # Decompress process must to be done after all packets + # received. + if compressed: + if not self._decompressobj: + self._decompressobj = ZLibDecompressor(suppress_deflate_header=True) + # XXX: It's possible that the zlib backend (isal is known to + # do this, maybe others too?) will return max_length bytes, + # but internally buffer more data such that the payload is + # >max_length, so we return one extra byte and if we're able + # to do that, then the message is too big. + payload_merged = self._decompressobj.decompress_sync( + assembled_payload + WS_DEFLATE_TRAILING, + ( + self._max_msg_size + 1 + if self._max_msg_size + else self._max_msg_size + ), + ) + if self._max_msg_size and len(payload_merged) > self._max_msg_size: + raise WebSocketError( + WSCloseCode.MESSAGE_TOO_BIG, + f"Decompressed message exceeds size limit {self._max_msg_size}", + ) + elif type(assembled_payload) is bytes: + payload_merged = assembled_payload + else: + payload_merged = bytes(assembled_payload) + + if opcode == OP_CODE_TEXT: + try: + text = payload_merged.decode("utf-8") + except UnicodeDecodeError as exc: + raise WebSocketError( + WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message" + ) from exc + + # XXX: The Text and Binary messages here can be a performance + # bottleneck, so we use tuple.__new__ to improve performance. + # This is not type safe, but many tests should fail in + # test_client_ws_functional.py if this is wrong. + self.queue.feed_data( + TUPLE_NEW(WSMessage, (WS_MSG_TYPE_TEXT, text, "")), + len(payload_merged), + ) + else: + self.queue.feed_data( + TUPLE_NEW(WSMessage, (WS_MSG_TYPE_BINARY, payload_merged, "")), + len(payload_merged), + ) + elif opcode == OP_CODE_CLOSE: + if len(payload) >= 2: + close_code = UNPACK_CLOSE_CODE(payload[:2])[0] + if close_code < 3000 and close_code not in ALLOWED_CLOSE_CODES: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + f"Invalid close code: {close_code}", + ) + try: + close_message = payload[2:].decode("utf-8") + except UnicodeDecodeError as exc: + raise WebSocketError( + WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message" + ) from exc + msg = TUPLE_NEW(WSMessage, (WSMsgType.CLOSE, close_code, close_message)) + elif payload: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + f"Invalid close frame: {fin} {opcode} {payload!r}", + ) + else: + msg = TUPLE_NEW(WSMessage, (WSMsgType.CLOSE, 0, "")) + + self.queue.feed_data(msg, 0) + elif opcode == OP_CODE_PING: + msg = TUPLE_NEW(WSMessage, (WSMsgType.PING, payload, "")) + self.queue.feed_data(msg, len(payload)) + elif opcode == OP_CODE_PONG: + msg = TUPLE_NEW(WSMessage, (WSMsgType.PONG, payload, "")) + self.queue.feed_data(msg, len(payload)) + else: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, f"Unexpected opcode={opcode!r}" + ) + + def _feed_data(self, data: bytes) -> None: + """Return the next frame from the socket.""" + if self._tail: + data, self._tail = self._tail + data, b"" + + start_pos: int = 0 + data_len = len(data) + data_cstr = data + + while True: + # read header + if self._state == READ_HEADER: + if data_len - start_pos < 2: + break + first_byte = data_cstr[start_pos] + second_byte = data_cstr[start_pos + 1] + start_pos += 2 + + fin = (first_byte >> 7) & 1 + rsv1 = (first_byte >> 6) & 1 + rsv2 = (first_byte >> 5) & 1 + rsv3 = (first_byte >> 4) & 1 + opcode = first_byte & 0xF + + # frame-fin = %x0 ; more frames of this message follow + # / %x1 ; final frame of this message + # frame-rsv1 = %x0 ; + # 1 bit, MUST be 0 unless negotiated otherwise + # frame-rsv2 = %x0 ; + # 1 bit, MUST be 0 unless negotiated otherwise + # frame-rsv3 = %x0 ; + # 1 bit, MUST be 0 unless negotiated otherwise + # + # Remove rsv1 from this test for deflate development + if rsv2 or rsv3 or (rsv1 and not self._compress): + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Received frame with non-zero reserved bits", + ) + + if opcode > 0x7 and fin == 0: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Received fragmented control frame", + ) + + has_mask = (second_byte >> 7) & 1 + length = second_byte & 0x7F + + # Control frames MUST have a payload + # length of 125 bytes or less + if opcode > 0x7 and length > 125: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Control frame payload cannot be larger than 125 bytes", + ) + + # Set compress status if last package is FIN + # OR set compress status if this is first fragment + # Raise error if not first fragment with rsv1 = 0x1 + if self._frame_fin or self._compressed == COMPRESSED_NOT_SET: + self._compressed = COMPRESSED_TRUE if rsv1 else COMPRESSED_FALSE + elif rsv1: + raise WebSocketError( + WSCloseCode.PROTOCOL_ERROR, + "Received frame with non-zero reserved bits", + ) + + self._frame_fin = bool(fin) + self._frame_opcode = opcode + self._has_mask = bool(has_mask) + self._payload_len_flag = length + self._state = READ_PAYLOAD_LENGTH + + # read payload length + if self._state == READ_PAYLOAD_LENGTH: + len_flag = self._payload_len_flag + if len_flag == 126: + if data_len - start_pos < 2: + break + first_byte = data_cstr[start_pos] + second_byte = data_cstr[start_pos + 1] + start_pos += 2 + self._payload_bytes_to_read = first_byte << 8 | second_byte + elif len_flag > 126: + if data_len - start_pos < 8: + break + self._payload_bytes_to_read = UNPACK_LEN3(data, start_pos)[0] + start_pos += 8 + else: + self._payload_bytes_to_read = len_flag + + self._state = READ_PAYLOAD_MASK if self._has_mask else READ_PAYLOAD + + # read payload mask + if self._state == READ_PAYLOAD_MASK: + if data_len - start_pos < 4: + break + self._frame_mask = data_cstr[start_pos : start_pos + 4] + start_pos += 4 + self._state = READ_PAYLOAD + + if self._state == READ_PAYLOAD: + chunk_len = data_len - start_pos + if self._payload_bytes_to_read >= chunk_len: + f_end_pos = data_len + self._payload_bytes_to_read -= chunk_len + else: + f_end_pos = start_pos + self._payload_bytes_to_read + self._payload_bytes_to_read = 0 + + had_fragments = self._frame_payload_len + self._frame_payload_len += f_end_pos - start_pos + f_start_pos = start_pos + start_pos = f_end_pos + + if self._payload_bytes_to_read != 0: + # If we don't have a complete frame, we need to save the + # data for the next call to feed_data. + self._payload_fragments.append(data_cstr[f_start_pos:f_end_pos]) + break + + payload: Union[bytes, bytearray] + if had_fragments: + # We have to join the payload fragments get the payload + self._payload_fragments.append(data_cstr[f_start_pos:f_end_pos]) + if self._has_mask: + assert self._frame_mask is not None + payload_bytearray = bytearray(b"".join(self._payload_fragments)) + websocket_mask(self._frame_mask, payload_bytearray) + payload = payload_bytearray + else: + payload = b"".join(self._payload_fragments) + self._payload_fragments.clear() + elif self._has_mask: + assert self._frame_mask is not None + payload_bytearray = data_cstr[f_start_pos:f_end_pos] # type: ignore[assignment] + if type(payload_bytearray) is not bytearray: # pragma: no branch + # Cython will do the conversion for us + # but we need to do it for Python and we + # will always get here in Python + payload_bytearray = bytearray(payload_bytearray) + websocket_mask(self._frame_mask, payload_bytearray) + payload = payload_bytearray + else: + payload = data_cstr[f_start_pos:f_end_pos] + + self._handle_frame( + self._frame_fin, self._frame_opcode, payload, self._compressed + ) + self._frame_payload_len = 0 + self._state = READ_HEADER + + # XXX: Cython needs slices to be bounded, so we can't omit the slice end here. + self._tail = data_cstr[start_pos:data_len] if start_pos < data_len else b"" diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/writer.py b/venv/lib/python3.10/site-packages/aiohttp/_websocket/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..19163f9afdf7b689e9e3f7959c0fc1daee096a4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/writer.py @@ -0,0 +1,178 @@ +"""WebSocket protocol versions 13 and 8.""" + +import asyncio +import random +from functools import partial +from typing import Any, Final, Optional, Union + +from ..base_protocol import BaseProtocol +from ..client_exceptions import ClientConnectionResetError +from ..compression_utils import ZLibBackend, ZLibCompressor +from .helpers import ( + MASK_LEN, + MSG_SIZE, + PACK_CLOSE_CODE, + PACK_LEN1, + PACK_LEN2, + PACK_LEN3, + PACK_RANDBITS, + websocket_mask, +) +from .models import WS_DEFLATE_TRAILING, WSMsgType + +DEFAULT_LIMIT: Final[int] = 2**16 + +# For websockets, keeping latency low is extremely important as implementations +# generally expect to be able to send and receive messages quickly. We use a +# larger chunk size than the default to reduce the number of executor calls +# since the executor is a significant source of latency and overhead when +# the chunks are small. A size of 5KiB was chosen because it is also the +# same value python-zlib-ng choose to use as the threshold to release the GIL. + +WEBSOCKET_MAX_SYNC_CHUNK_SIZE = 5 * 1024 + + +class WebSocketWriter: + """WebSocket writer. + + The writer is responsible for sending messages to the client. It is + created by the protocol when a connection is established. The writer + should avoid implementing any application logic and should only be + concerned with the low-level details of the WebSocket protocol. + """ + + def __init__( + self, + protocol: BaseProtocol, + transport: asyncio.Transport, + *, + use_mask: bool = False, + limit: int = DEFAULT_LIMIT, + random: random.Random = random.Random(), + compress: int = 0, + notakeover: bool = False, + ) -> None: + """Initialize a WebSocket writer.""" + self.protocol = protocol + self.transport = transport + self.use_mask = use_mask + self.get_random_bits = partial(random.getrandbits, 32) + self.compress = compress + self.notakeover = notakeover + self._closing = False + self._limit = limit + self._output_size = 0 + self._compressobj: Any = None # actually compressobj + + async def send_frame( + self, message: bytes, opcode: int, compress: Optional[int] = None + ) -> None: + """Send a frame over the websocket with message as its payload.""" + if self._closing and not (opcode & WSMsgType.CLOSE): + raise ClientConnectionResetError("Cannot write to closing transport") + + # RSV are the reserved bits in the frame header. They are used to + # indicate that the frame is using an extension. + # https://datatracker.ietf.org/doc/html/rfc6455#section-5.2 + rsv = 0 + # Only compress larger packets (disabled) + # Does small packet needs to be compressed? + # if self.compress and opcode < 8 and len(message) > 124: + if (compress or self.compress) and opcode < 8: + # RSV1 (rsv = 0x40) is set for compressed frames + # https://datatracker.ietf.org/doc/html/rfc7692#section-7.2.3.1 + rsv = 0x40 + + if compress: + # Do not set self._compress if compressing is for this frame + compressobj = self._make_compress_obj(compress) + else: # self.compress + if not self._compressobj: + self._compressobj = self._make_compress_obj(self.compress) + compressobj = self._compressobj + + message = ( + await compressobj.compress(message) + + compressobj.flush( + ZLibBackend.Z_FULL_FLUSH + if self.notakeover + else ZLibBackend.Z_SYNC_FLUSH + ) + ).removesuffix(WS_DEFLATE_TRAILING) + # Its critical that we do not return control to the event + # loop until we have finished sending all the compressed + # data. Otherwise we could end up mixing compressed frames + # if there are multiple coroutines compressing data. + + msg_length = len(message) + + use_mask = self.use_mask + mask_bit = 0x80 if use_mask else 0 + + # Depending on the message length, the header is assembled differently. + # The first byte is reserved for the opcode and the RSV bits. + first_byte = 0x80 | rsv | opcode + if msg_length < 126: + header = PACK_LEN1(first_byte, msg_length | mask_bit) + header_len = 2 + elif msg_length < 65536: + header = PACK_LEN2(first_byte, 126 | mask_bit, msg_length) + header_len = 4 + else: + header = PACK_LEN3(first_byte, 127 | mask_bit, msg_length) + header_len = 10 + + if self.transport.is_closing(): + raise ClientConnectionResetError("Cannot write to closing transport") + + # https://datatracker.ietf.org/doc/html/rfc6455#section-5.3 + # If we are using a mask, we need to generate it randomly + # and apply it to the message before sending it. A mask is + # a 32-bit value that is applied to the message using a + # bitwise XOR operation. It is used to prevent certain types + # of attacks on the websocket protocol. The mask is only used + # when aiohttp is acting as a client. Servers do not use a mask. + if use_mask: + mask = PACK_RANDBITS(self.get_random_bits()) + message = bytearray(message) + websocket_mask(mask, message) + self.transport.write(header + mask + message) + self._output_size += MASK_LEN + elif msg_length > MSG_SIZE: + self.transport.write(header) + self.transport.write(message) + else: + self.transport.write(header + message) + + self._output_size += header_len + msg_length + + # It is safe to return control to the event loop when using compression + # after this point as we have already sent or buffered all the data. + + # Once we have written output_size up to the limit, we call the + # drain helper which waits for the transport to be ready to accept + # more data. This is a flow control mechanism to prevent the buffer + # from growing too large. The drain helper will return right away + # if the writer is not paused. + if self._output_size > self._limit: + self._output_size = 0 + if self.protocol._paused: + await self.protocol._drain_helper() + + def _make_compress_obj(self, compress: int) -> ZLibCompressor: + return ZLibCompressor( + level=ZLibBackend.Z_BEST_SPEED, + wbits=-compress, + max_sync_chunk_size=WEBSOCKET_MAX_SYNC_CHUNK_SIZE, + ) + + async def close(self, code: int = 1000, message: Union[bytes, str] = b"") -> None: + """Close the websocket, sending the specified code and message.""" + if isinstance(message, str): + message = message.encode("utf-8") + try: + await self.send_frame( + PACK_CLOSE_CODE(code) + message, opcode=WSMsgType.CLOSE + ) + finally: + self._closing = True diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_log.py b/venv/lib/python3.10/site-packages/aiohttp/web_log.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ea2beeb152974ce5dd9f3e7990133ce04f7980 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_log.py @@ -0,0 +1,216 @@ +import datetime +import functools +import logging +import os +import re +import time as time_mod +from collections import namedtuple +from typing import Any, Callable, Dict, Iterable, List, Tuple # noqa + +from .abc import AbstractAccessLogger +from .web_request import BaseRequest +from .web_response import StreamResponse + +KeyMethod = namedtuple("KeyMethod", "key method") + + +class AccessLogger(AbstractAccessLogger): + """Helper object to log access. + + Usage: + log = logging.getLogger("spam") + log_format = "%a %{User-Agent}i" + access_logger = AccessLogger(log, log_format) + access_logger.log(request, response, time) + + Format: + %% The percent sign + %a Remote IP-address (IP-address of proxy if using reverse proxy) + %t Time when the request was started to process + %P The process ID of the child that serviced the request + %r First line of request + %s Response status code + %b Size of response in bytes, including HTTP headers + %T Time taken to serve the request, in seconds + %Tf Time taken to serve the request, in seconds with floating fraction + in .06f format + %D Time taken to serve the request, in microseconds + %{FOO}i request.headers['FOO'] + %{FOO}o response.headers['FOO'] + %{FOO}e os.environ['FOO'] + + """ + + LOG_FORMAT_MAP = { + "a": "remote_address", + "t": "request_start_time", + "P": "process_id", + "r": "first_request_line", + "s": "response_status", + "b": "response_size", + "T": "request_time", + "Tf": "request_time_frac", + "D": "request_time_micro", + "i": "request_header", + "o": "response_header", + } + + LOG_FORMAT = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i"' + FORMAT_RE = re.compile(r"%(\{([A-Za-z0-9\-_]+)\}([ioe])|[atPrsbOD]|Tf?)") + CLEANUP_RE = re.compile(r"(%[^s])") + _FORMAT_CACHE: Dict[str, Tuple[str, List[KeyMethod]]] = {} + + def __init__(self, logger: logging.Logger, log_format: str = LOG_FORMAT) -> None: + """Initialise the logger. + + logger is a logger object to be used for logging. + log_format is a string with apache compatible log format description. + + """ + super().__init__(logger, log_format=log_format) + + _compiled_format = AccessLogger._FORMAT_CACHE.get(log_format) + if not _compiled_format: + _compiled_format = self.compile_format(log_format) + AccessLogger._FORMAT_CACHE[log_format] = _compiled_format + + self._log_format, self._methods = _compiled_format + + def compile_format(self, log_format: str) -> Tuple[str, List[KeyMethod]]: + """Translate log_format into form usable by modulo formatting + + All known atoms will be replaced with %s + Also methods for formatting of those atoms will be added to + _methods in appropriate order + + For example we have log_format = "%a %t" + This format will be translated to "%s %s" + Also contents of _methods will be + [self._format_a, self._format_t] + These method will be called and results will be passed + to translated string format. + + Each _format_* method receive 'args' which is list of arguments + given to self.log + + Exceptions are _format_e, _format_i and _format_o methods which + also receive key name (by functools.partial) + + """ + # list of (key, method) tuples, we don't use an OrderedDict as users + # can repeat the same key more than once + methods = list() + + for atom in self.FORMAT_RE.findall(log_format): + if atom[1] == "": + format_key1 = self.LOG_FORMAT_MAP[atom[0]] + m = getattr(AccessLogger, "_format_%s" % atom[0]) + key_method = KeyMethod(format_key1, m) + else: + format_key2 = (self.LOG_FORMAT_MAP[atom[2]], atom[1]) + m = getattr(AccessLogger, "_format_%s" % atom[2]) + key_method = KeyMethod(format_key2, functools.partial(m, atom[1])) + + methods.append(key_method) + + log_format = self.FORMAT_RE.sub(r"%s", log_format) + log_format = self.CLEANUP_RE.sub(r"%\1", log_format) + return log_format, methods + + @staticmethod + def _format_i( + key: str, request: BaseRequest, response: StreamResponse, time: float + ) -> str: + if request is None: + return "(no headers)" + + # suboptimal, make istr(key) once + return request.headers.get(key, "-") + + @staticmethod + def _format_o( + key: str, request: BaseRequest, response: StreamResponse, time: float + ) -> str: + # suboptimal, make istr(key) once + return response.headers.get(key, "-") + + @staticmethod + def _format_a(request: BaseRequest, response: StreamResponse, time: float) -> str: + if request is None: + return "-" + ip = request.remote + return ip if ip is not None else "-" + + @staticmethod + def _format_t(request: BaseRequest, response: StreamResponse, time: float) -> str: + tz = datetime.timezone(datetime.timedelta(seconds=-time_mod.timezone)) + now = datetime.datetime.now(tz) + start_time = now - datetime.timedelta(seconds=time) + return start_time.strftime("[%d/%b/%Y:%H:%M:%S %z]") + + @staticmethod + def _format_P(request: BaseRequest, response: StreamResponse, time: float) -> str: + return "<%s>" % os.getpid() + + @staticmethod + def _format_r(request: BaseRequest, response: StreamResponse, time: float) -> str: + if request is None: + return "-" + return "{} {} HTTP/{}.{}".format( + request.method, + request.path_qs, + request.version.major, + request.version.minor, + ) + + @staticmethod + def _format_s(request: BaseRequest, response: StreamResponse, time: float) -> int: + return response.status + + @staticmethod + def _format_b(request: BaseRequest, response: StreamResponse, time: float) -> int: + return response.body_length + + @staticmethod + def _format_T(request: BaseRequest, response: StreamResponse, time: float) -> str: + return str(round(time)) + + @staticmethod + def _format_Tf(request: BaseRequest, response: StreamResponse, time: float) -> str: + return "%06f" % time + + @staticmethod + def _format_D(request: BaseRequest, response: StreamResponse, time: float) -> str: + return str(round(time * 1000000)) + + def _format_line( + self, request: BaseRequest, response: StreamResponse, time: float + ) -> Iterable[Tuple[str, Callable[[BaseRequest, StreamResponse, float], str]]]: + return [(key, method(request, response, time)) for key, method in self._methods] + + @property + def enabled(self) -> bool: + """Check if logger is enabled.""" + # Avoid formatting the log line if it will not be emitted. + return self.logger.isEnabledFor(logging.INFO) + + def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None: + try: + fmt_info = self._format_line(request, response, time) + + values = list() + extra = dict() + for key, value in fmt_info: + values.append(value) + + if key.__class__ is str: + extra[key] = value + else: + k1, k2 = key # type: ignore[misc] + dct = extra.get(k1, {}) # type: ignore[var-annotated,has-type] + dct[k2] = value # type: ignore[index,has-type] + extra[k1] = dct # type: ignore[has-type,assignment] + + self.logger.info(self._log_format % tuple(values), extra=extra) + except Exception: + self.logger.exception("Error in logging") diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_middlewares.py b/venv/lib/python3.10/site-packages/aiohttp/web_middlewares.py new file mode 100644 index 0000000000000000000000000000000000000000..2f1f5f58e6e38845d4d2d4ffdd2748fc519fa5bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_middlewares.py @@ -0,0 +1,121 @@ +import re +from typing import TYPE_CHECKING, Tuple, Type, TypeVar + +from .typedefs import Handler, Middleware +from .web_exceptions import HTTPMove, HTTPPermanentRedirect +from .web_request import Request +from .web_response import StreamResponse +from .web_urldispatcher import SystemRoute + +__all__ = ( + "middleware", + "normalize_path_middleware", +) + +if TYPE_CHECKING: + from .web_app import Application + +_Func = TypeVar("_Func") + + +async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]: + alt_request = request.clone(rel_url=path) + + match_info = await request.app.router.resolve(alt_request) + alt_request._match_info = match_info + + if match_info.http_exception is None: + return True, alt_request + + return False, request + + +def middleware(f: _Func) -> _Func: + f.__middleware_version__ = 1 # type: ignore[attr-defined] + return f + + +def normalize_path_middleware( + *, + append_slash: bool = True, + remove_slash: bool = False, + merge_slashes: bool = True, + redirect_class: Type[HTTPMove] = HTTPPermanentRedirect, +) -> Middleware: + """Factory for producing a middleware that normalizes the path of a request. + + Normalizing means: + - Add or remove a trailing slash to the path. + - Double slashes are replaced by one. + + The middleware returns as soon as it finds a path that resolves + correctly. The order if both merge and append/remove are enabled is + 1) merge slashes + 2) append/remove slash + 3) both merge slashes and append/remove slash. + If the path resolves with at least one of those conditions, it will + redirect to the new path. + + Only one of `append_slash` and `remove_slash` can be enabled. If both + are `True` the factory will raise an assertion error + + If `append_slash` is `True` the middleware will append a slash when + needed. If a resource is defined with trailing slash and the request + comes without it, it will append it automatically. + + If `remove_slash` is `True`, `append_slash` must be `False`. When enabled + the middleware will remove trailing slashes and redirect if the resource + is defined + + If merge_slashes is True, merge multiple consecutive slashes in the + path into one. + """ + correct_configuration = not (append_slash and remove_slash) + assert correct_configuration, "Cannot both remove and append slash" + + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + if isinstance(request.match_info.route, SystemRoute): + paths_to_check = [] + if "?" in request.raw_path: + path, query = request.raw_path.split("?", 1) + query = "?" + query + else: + query = "" + path = request.raw_path + + if merge_slashes: + paths_to_check.append(re.sub("//+", "/", path)) + if append_slash and not request.path.endswith("/"): + paths_to_check.append(path + "/") + if remove_slash and request.path.endswith("/"): + paths_to_check.append(path[:-1]) + if merge_slashes and append_slash: + paths_to_check.append(re.sub("//+", "/", path + "/")) + if merge_slashes and remove_slash: + merged_slashes = re.sub("//+", "/", path) + paths_to_check.append(merged_slashes[:-1]) + + for path in paths_to_check: + path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg + resolves, request = await _check_request_resolves(request, path) + if resolves: + raise redirect_class(request.raw_path + query) + + return await handler(request) + + return impl + + +def _fix_request_current_app(app: "Application") -> Middleware: + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + match_info = request.match_info + prev = match_info.current_app + match_info.current_app = app + try: + return await handler(request) + finally: + match_info.current_app = prev + + return impl diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_protocol.py b/venv/lib/python3.10/site-packages/aiohttp/web_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..e1923aac24bdc7f2c7697b061a6bf68ae6509c6f --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_protocol.py @@ -0,0 +1,792 @@ +import asyncio +import asyncio.streams +import sys +import traceback +import warnings +from collections import deque +from contextlib import suppress +from html import escape as html_escape +from http import HTTPStatus +from logging import Logger +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Deque, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +import attr +import yarl +from propcache import under_cached_property + +from .abc import AbstractAccessLogger, AbstractStreamWriter +from .base_protocol import BaseProtocol +from .helpers import ceil_timeout +from .http import ( + HttpProcessingError, + HttpRequestParser, + HttpVersion10, + RawRequestMessage, + StreamWriter, +) +from .http_exceptions import BadHttpMethod +from .log import access_logger, server_logger +from .streams import EMPTY_PAYLOAD, StreamReader +from .tcp_helpers import tcp_keepalive +from .web_exceptions import HTTPException, HTTPInternalServerError +from .web_log import AccessLogger +from .web_request import BaseRequest +from .web_response import Response, StreamResponse + +__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError") + +if TYPE_CHECKING: + import ssl + + from .web_server import Server + + +_RequestFactory = Callable[ + [ + RawRequestMessage, + StreamReader, + "RequestHandler", + AbstractStreamWriter, + "asyncio.Task[None]", + ], + BaseRequest, +] + +_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]] + +ERROR = RawRequestMessage( + "UNKNOWN", + "/", + HttpVersion10, + {}, # type: ignore[arg-type] + {}, # type: ignore[arg-type] + True, + None, + False, + False, + yarl.URL("/"), +) + + +class RequestPayloadError(Exception): + """Payload parsing error.""" + + +class PayloadAccessError(Exception): + """Payload was accessed after response was sent.""" + + +_PAYLOAD_ACCESS_ERROR = PayloadAccessError() + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class _ErrInfo: + status: int + exc: BaseException + message: str + + +_MsgType = Tuple[Union[RawRequestMessage, _ErrInfo], StreamReader] + + +class RequestHandler(BaseProtocol): + """HTTP protocol implementation. + + RequestHandler handles incoming HTTP request. It reads request line, + request headers and request payload and calls handle_request() method. + By default it always returns with 404 response. + + RequestHandler handles errors in incoming request, like bad + status line, bad headers or incomplete payload. If any error occurs, + connection gets closed. + + keepalive_timeout -- number of seconds before closing + keep-alive connection + + tcp_keepalive -- TCP keep-alive is on, default is on + + debug -- enable debug mode + + logger -- custom logger object + + access_log_class -- custom class for access_logger + + access_log -- custom logging object + + access_log_format -- access log format string + + loop -- Optional event loop + + max_line_size -- Optional maximum header line size + + max_field_size -- Optional maximum header field size + + max_headers -- Optional maximum header size + + timeout_ceil_threshold -- Optional value to specify + threshold to ceil() timeout + values + + """ + + __slots__ = ( + "_request_count", + "_keepalive", + "_manager", + "_request_handler", + "_request_factory", + "_tcp_keepalive", + "_next_keepalive_close_time", + "_keepalive_handle", + "_keepalive_timeout", + "_lingering_time", + "_messages", + "_message_tail", + "_handler_waiter", + "_waiter", + "_task_handler", + "_upgrade", + "_payload_parser", + "_request_parser", + "_reading_paused", + "logger", + "debug", + "access_log", + "access_logger", + "_close", + "_force_close", + "_current_request", + "_timeout_ceil_threshold", + "_request_in_progress", + "_logging_enabled", + "_cache", + ) + + def __init__( + self, + manager: "Server", + *, + loop: asyncio.AbstractEventLoop, + # Default should be high enough that it's likely longer than a reverse proxy. + keepalive_timeout: float = 3630, + tcp_keepalive: bool = True, + logger: Logger = server_logger, + access_log_class: Type[AbstractAccessLogger] = AccessLogger, + access_log: Logger = access_logger, + access_log_format: str = AccessLogger.LOG_FORMAT, + debug: bool = False, + max_line_size: int = 8190, + max_headers: int = 32768, + max_field_size: int = 8190, + lingering_time: float = 10.0, + read_bufsize: int = 2**16, + auto_decompress: bool = True, + timeout_ceil_threshold: float = 5, + ): + super().__init__(loop) + + # _request_count is the number of requests processed with the same connection. + self._request_count = 0 + self._keepalive = False + self._current_request: Optional[BaseRequest] = None + self._manager: Optional[Server] = manager + self._request_handler: Optional[_RequestHandler] = manager.request_handler + self._request_factory: Optional[_RequestFactory] = manager.request_factory + + self._tcp_keepalive = tcp_keepalive + # placeholder to be replaced on keepalive timeout setup + self._next_keepalive_close_time = 0.0 + self._keepalive_handle: Optional[asyncio.Handle] = None + self._keepalive_timeout = keepalive_timeout + self._lingering_time = float(lingering_time) + + self._messages: Deque[_MsgType] = deque() + self._message_tail = b"" + + self._waiter: Optional[asyncio.Future[None]] = None + self._handler_waiter: Optional[asyncio.Future[None]] = None + self._task_handler: Optional[asyncio.Task[None]] = None + + self._upgrade = False + self._payload_parser: Any = None + self._request_parser: Optional[HttpRequestParser] = HttpRequestParser( + self, + loop, + read_bufsize, + max_line_size=max_line_size, + max_field_size=max_field_size, + max_headers=max_headers, + payload_exception=RequestPayloadError, + auto_decompress=auto_decompress, + ) + + self._timeout_ceil_threshold: float = 5 + try: + self._timeout_ceil_threshold = float(timeout_ceil_threshold) + except (TypeError, ValueError): + pass + + self.logger = logger + self.debug = debug + self.access_log = access_log + if access_log: + self.access_logger: Optional[AbstractAccessLogger] = access_log_class( + access_log, access_log_format + ) + self._logging_enabled = self.access_logger.enabled + else: + self.access_logger = None + self._logging_enabled = False + + self._close = False + self._force_close = False + self._request_in_progress = False + self._cache: dict[str, Any] = {} + + def __repr__(self) -> str: + return "<{} {}>".format( + self.__class__.__name__, + "connected" if self.transport is not None else "disconnected", + ) + + @under_cached_property + def ssl_context(self) -> Optional["ssl.SSLContext"]: + """Return SSLContext if available.""" + return ( + None + if self.transport is None + else self.transport.get_extra_info("sslcontext") + ) + + @under_cached_property + def peername( + self, + ) -> Optional[Union[str, Tuple[str, int, int, int], Tuple[str, int]]]: + """Return peername if available.""" + return ( + None + if self.transport is None + else self.transport.get_extra_info("peername") + ) + + @property + def keepalive_timeout(self) -> float: + return self._keepalive_timeout + + async def shutdown(self, timeout: Optional[float] = 15.0) -> None: + """Do worker process exit preparations. + + We need to clean up everything and stop accepting requests. + It is especially important for keep-alive connections. + """ + self._force_close = True + + if self._keepalive_handle is not None: + self._keepalive_handle.cancel() + + # Wait for graceful handler completion + if self._request_in_progress: + # The future is only created when we are shutting + # down while the handler is still processing a request + # to avoid creating a future for every request. + self._handler_waiter = self._loop.create_future() + try: + async with ceil_timeout(timeout): + await self._handler_waiter + except (asyncio.CancelledError, asyncio.TimeoutError): + self._handler_waiter = None + if ( + sys.version_info >= (3, 11) + and (task := asyncio.current_task()) + and task.cancelling() + ): + raise + # Then cancel handler and wait + try: + async with ceil_timeout(timeout): + if self._current_request is not None: + self._current_request._cancel(asyncio.CancelledError()) + + if self._task_handler is not None and not self._task_handler.done(): + await asyncio.shield(self._task_handler) + except (asyncio.CancelledError, asyncio.TimeoutError): + if ( + sys.version_info >= (3, 11) + and (task := asyncio.current_task()) + and task.cancelling() + ): + raise + + # force-close non-idle handler + if self._task_handler is not None: + self._task_handler.cancel() + + self.force_close() + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + super().connection_made(transport) + + real_transport = cast(asyncio.Transport, transport) + if self._tcp_keepalive: + tcp_keepalive(real_transport) + + assert self._manager is not None + self._manager.connection_made(self, real_transport) + + loop = self._loop + if sys.version_info >= (3, 12): + task = asyncio.Task(self.start(), loop=loop, eager_start=True) + else: + task = loop.create_task(self.start()) + self._task_handler = task + + def connection_lost(self, exc: Optional[BaseException]) -> None: + if self._manager is None: + return + self._manager.connection_lost(self, exc) + + # Grab value before setting _manager to None. + handler_cancellation = self._manager.handler_cancellation + + self.force_close() + super().connection_lost(exc) + self._manager = None + self._request_factory = None + self._request_handler = None + self._request_parser = None + + if self._keepalive_handle is not None: + self._keepalive_handle.cancel() + + if self._current_request is not None: + if exc is None: + exc = ConnectionResetError("Connection lost") + self._current_request._cancel(exc) + + if handler_cancellation and self._task_handler is not None: + self._task_handler.cancel() + + self._task_handler = None + + if self._payload_parser is not None: + self._payload_parser.feed_eof() + self._payload_parser = None + + def set_parser(self, parser: Any) -> None: + # Actual type is WebReader + assert self._payload_parser is None + + self._payload_parser = parser + + if self._message_tail: + self._payload_parser.feed_data(self._message_tail) + self._message_tail = b"" + + def eof_received(self) -> None: + pass + + def data_received(self, data: bytes) -> None: + if self._force_close or self._close: + return + # parse http messages + messages: Sequence[_MsgType] + if self._payload_parser is None and not self._upgrade: + assert self._request_parser is not None + try: + messages, upgraded, tail = self._request_parser.feed_data(data) + except HttpProcessingError as exc: + messages = [ + (_ErrInfo(status=400, exc=exc, message=exc.message), EMPTY_PAYLOAD) + ] + upgraded = False + tail = b"" + + for msg, payload in messages or (): + self._request_count += 1 + self._messages.append((msg, payload)) + + waiter = self._waiter + if messages and waiter is not None and not waiter.done(): + # don't set result twice + waiter.set_result(None) + + self._upgrade = upgraded + if upgraded and tail: + self._message_tail = tail + + # no parser, just store + elif self._payload_parser is None and self._upgrade and data: + self._message_tail += data + + # feed payload + elif data: + eof, tail = self._payload_parser.feed_data(data) + if eof: + self.close() + + def keep_alive(self, val: bool) -> None: + """Set keep-alive connection mode. + + :param bool val: new state. + """ + self._keepalive = val + if self._keepalive_handle: + self._keepalive_handle.cancel() + self._keepalive_handle = None + + def close(self) -> None: + """Close connection. + + Stop accepting new pipelining messages and close + connection when handlers done processing messages. + """ + self._close = True + if self._waiter: + self._waiter.cancel() + + def force_close(self) -> None: + """Forcefully close connection.""" + self._force_close = True + if self._waiter: + self._waiter.cancel() + if self.transport is not None: + self.transport.close() + self.transport = None + + def log_access( + self, request: BaseRequest, response: StreamResponse, time: Optional[float] + ) -> None: + if self.access_logger is not None and self.access_logger.enabled: + if TYPE_CHECKING: + assert time is not None + self.access_logger.log(request, response, self._loop.time() - time) + + def log_debug(self, *args: Any, **kw: Any) -> None: + if self.debug: + self.logger.debug(*args, **kw) + + def log_exception(self, *args: Any, **kw: Any) -> None: + self.logger.exception(*args, **kw) + + def _process_keepalive(self) -> None: + self._keepalive_handle = None + if self._force_close or not self._keepalive: + return + + loop = self._loop + now = loop.time() + close_time = self._next_keepalive_close_time + if now < close_time: + # Keep alive close check fired too early, reschedule + self._keepalive_handle = loop.call_at(close_time, self._process_keepalive) + return + + # handler in idle state + if self._waiter and not self._waiter.done(): + self.force_close() + + async def _handle_request( + self, + request: BaseRequest, + start_time: Optional[float], + request_handler: Callable[[BaseRequest], Awaitable[StreamResponse]], + ) -> Tuple[StreamResponse, bool]: + self._request_in_progress = True + try: + try: + self._current_request = request + resp = await request_handler(request) + finally: + self._current_request = None + except HTTPException as exc: + resp = exc + resp, reset = await self.finish_response(request, resp, start_time) + except asyncio.CancelledError: + raise + except asyncio.TimeoutError as exc: + self.log_debug("Request handler timed out.", exc_info=exc) + resp = self.handle_error(request, 504) + resp, reset = await self.finish_response(request, resp, start_time) + except Exception as exc: + resp = self.handle_error(request, 500, exc) + resp, reset = await self.finish_response(request, resp, start_time) + else: + # Deprecation warning (See #2415) + if getattr(resp, "__http_exception__", False): + warnings.warn( + "returning HTTPException object is deprecated " + "(#2415) and will be removed, " + "please raise the exception instead", + DeprecationWarning, + ) + + resp, reset = await self.finish_response(request, resp, start_time) + finally: + self._request_in_progress = False + if self._handler_waiter is not None: + self._handler_waiter.set_result(None) + + return resp, reset + + async def start(self) -> None: + """Process incoming request. + + It reads request line, request headers and request payload, then + calls handle_request() method. Subclass has to override + handle_request(). start() handles various exceptions in request + or response handling. Connection is being closed always unless + keep_alive(True) specified. + """ + loop = self._loop + manager = self._manager + assert manager is not None + keepalive_timeout = self._keepalive_timeout + resp = None + assert self._request_factory is not None + assert self._request_handler is not None + + while not self._force_close: + if not self._messages: + try: + # wait for next request + self._waiter = loop.create_future() + await self._waiter + finally: + self._waiter = None + + message, payload = self._messages.popleft() + + # time is only fetched if logging is enabled as otherwise + # its thrown away and never used. + start = loop.time() if self._logging_enabled else None + + manager.requests_count += 1 + writer = StreamWriter(self, loop) + if isinstance(message, _ErrInfo): + # make request_factory work + request_handler = self._make_error_handler(message) + message = ERROR + else: + request_handler = self._request_handler + + # Important don't hold a reference to the current task + # as on traceback it will prevent the task from being + # collected and will cause a memory leak. + request = self._request_factory( + message, + payload, + self, + writer, + self._task_handler or asyncio.current_task(loop), # type: ignore[arg-type] + ) + try: + # a new task is used for copy context vars (#3406) + coro = self._handle_request(request, start, request_handler) + if sys.version_info >= (3, 12): + task = asyncio.Task(coro, loop=loop, eager_start=True) + else: + task = loop.create_task(coro) + try: + resp, reset = await task + except ConnectionError: + self.log_debug("Ignored premature client disconnection") + break + + # Drop the processed task from asyncio.Task.all_tasks() early + del task + if reset: + self.log_debug("Ignored premature client disconnection 2") + break + + # notify server about keep-alive + self._keepalive = bool(resp.keep_alive) + + # check payload + if not payload.is_eof(): + lingering_time = self._lingering_time + if not self._force_close and lingering_time: + self.log_debug( + "Start lingering close timer for %s sec.", lingering_time + ) + + now = loop.time() + end_t = now + lingering_time + + try: + while not payload.is_eof() and now < end_t: + async with ceil_timeout(end_t - now): + # read and ignore + await payload.readany() + now = loop.time() + except (asyncio.CancelledError, asyncio.TimeoutError): + if ( + sys.version_info >= (3, 11) + and (t := asyncio.current_task()) + and t.cancelling() + ): + raise + + # if payload still uncompleted + if not payload.is_eof() and not self._force_close: + self.log_debug("Uncompleted request.") + self.close() + + payload.set_exception(_PAYLOAD_ACCESS_ERROR) + + except asyncio.CancelledError: + self.log_debug("Ignored premature client disconnection") + self.force_close() + raise + except Exception as exc: + self.log_exception("Unhandled exception", exc_info=exc) + self.force_close() + except BaseException: + self.force_close() + raise + finally: + request._task = None # type: ignore[assignment] # Break reference cycle in case of exception + if self.transport is None and resp is not None: + self.log_debug("Ignored premature client disconnection.") + + if self._keepalive and not self._close and not self._force_close: + # start keep-alive timer + close_time = loop.time() + keepalive_timeout + self._next_keepalive_close_time = close_time + if self._keepalive_handle is None: + self._keepalive_handle = loop.call_at( + close_time, self._process_keepalive + ) + else: + break + + # remove handler, close transport if no handlers left + if not self._force_close: + self._task_handler = None + if self.transport is not None: + self.transport.close() + + async def finish_response( + self, request: BaseRequest, resp: StreamResponse, start_time: Optional[float] + ) -> Tuple[StreamResponse, bool]: + """Prepare the response and write_eof, then log access. + + This has to + be called within the context of any exception so the access logger + can get exception information. Returns True if the client disconnects + prematurely. + """ + request._finish() + if self._request_parser is not None: + self._request_parser.set_upgraded(False) + self._upgrade = False + if self._message_tail: + self._request_parser.feed_data(self._message_tail) + self._message_tail = b"" + try: + prepare_meth = resp.prepare + except AttributeError: + if resp is None: + self.log_exception("Missing return statement on request handler") + else: + self.log_exception( + "Web-handler should return a response instance, " + "got {!r}".format(resp) + ) + exc = HTTPInternalServerError() + resp = Response( + status=exc.status, reason=exc.reason, text=exc.text, headers=exc.headers + ) + prepare_meth = resp.prepare + try: + await prepare_meth(request) + await resp.write_eof() + except ConnectionError: + self.log_access(request, resp, start_time) + return resp, True + + self.log_access(request, resp, start_time) + return resp, False + + def handle_error( + self, + request: BaseRequest, + status: int = 500, + exc: Optional[BaseException] = None, + message: Optional[str] = None, + ) -> StreamResponse: + """Handle errors. + + Returns HTTP response with specific status code. Logs additional + information. It always closes current connection. + """ + if self._request_count == 1 and isinstance(exc, BadHttpMethod): + # BadHttpMethod is common when a client sends non-HTTP + # or encrypted traffic to an HTTP port. This is expected + # to happen when connected to the public internet so we log + # it at the debug level as to not fill logs with noise. + self.logger.debug( + "Error handling request from %s", request.remote, exc_info=exc + ) + else: + self.log_exception( + "Error handling request from %s", request.remote, exc_info=exc + ) + + # some data already got sent, connection is broken + if request.writer.output_size > 0: + raise ConnectionError( + "Response is sent already, cannot send another response " + "with the error message" + ) + + ct = "text/plain" + if status == HTTPStatus.INTERNAL_SERVER_ERROR: + title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR) + msg = HTTPStatus.INTERNAL_SERVER_ERROR.description + tb = None + if self.debug: + with suppress(Exception): + tb = traceback.format_exc() + + if "text/html" in request.headers.get("Accept", ""): + if tb: + tb = html_escape(tb) + msg = f"

Traceback:

\n
{tb}
" + message = ( + "" + "{title}" + "\n

{title}

" + "\n{msg}\n\n" + ).format(title=title, msg=msg) + ct = "text/html" + else: + if tb: + msg = tb + message = title + "\n\n" + msg + + resp = Response(status=status, text=message, content_type=ct) + resp.force_close() + + return resp + + def _make_error_handler( + self, err_info: _ErrInfo + ) -> Callable[[BaseRequest], Awaitable[StreamResponse]]: + async def handler(request: BaseRequest) -> StreamResponse: + return self.handle_error( + request, err_info.status, err_info.exc, err_info.message + ) + + return handler diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_request.py b/venv/lib/python3.10/site-packages/aiohttp/web_request.py new file mode 100644 index 0000000000000000000000000000000000000000..0bc69b74db94f388fec4035ff5789bbf9ff98445 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_request.py @@ -0,0 +1,916 @@ +import asyncio +import datetime +import io +import re +import socket +import string +import tempfile +import types +import warnings +from types import MappingProxyType +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Final, + Iterator, + Mapping, + MutableMapping, + Optional, + Pattern, + Tuple, + Union, + cast, +) +from urllib.parse import parse_qsl + +import attr +from multidict import ( + CIMultiDict, + CIMultiDictProxy, + MultiDict, + MultiDictProxy, + MultiMapping, +) +from yarl import URL + +from . import hdrs +from ._cookie_helpers import parse_cookie_header +from .abc import AbstractStreamWriter +from .helpers import ( + _SENTINEL, + DEBUG, + ETAG_ANY, + LIST_QUOTED_ETAG_RE, + ChainMapProxy, + ETag, + HeadersMixin, + parse_http_date, + reify, + sentinel, + set_exception, +) +from .http_parser import RawRequestMessage +from .http_writer import HttpVersion +from .multipart import BodyPartReader, MultipartReader +from .streams import EmptyStreamReader, StreamReader +from .typedefs import ( + DEFAULT_JSON_DECODER, + JSONDecoder, + LooseHeaders, + RawHeaders, + StrOrURL, +) +from .web_exceptions import HTTPRequestEntityTooLarge +from .web_response import StreamResponse + +__all__ = ("BaseRequest", "FileField", "Request") + + +if TYPE_CHECKING: + from .web_app import Application + from .web_protocol import RequestHandler + from .web_urldispatcher import UrlMappingMatchInfo + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class FileField: + name: str + filename: str + file: io.BufferedReader + content_type: str + headers: CIMultiDictProxy[str] + + +_TCHAR: Final[str] = string.digits + string.ascii_letters + r"!#$%&'*+.^_`|~-" +# '-' at the end to prevent interpretation as range in a char class + +_TOKEN: Final[str] = rf"[{_TCHAR}]+" + +_QDTEXT: Final[str] = r"[{}]".format( + r"".join(chr(c) for c in (0x09, 0x20, 0x21) + tuple(range(0x23, 0x7F))) +) +# qdtext includes 0x5C to escape 0x5D ('\]') +# qdtext excludes obs-text (because obsoleted, and encoding not specified) + +_QUOTED_PAIR: Final[str] = r"\\[\t !-~]" + +_QUOTED_STRING: Final[str] = r'"(?:{quoted_pair}|{qdtext})*"'.format( + qdtext=_QDTEXT, quoted_pair=_QUOTED_PAIR +) + +_FORWARDED_PAIR: Final[str] = ( + r"({token})=({token}|{quoted_string})(:\d{{1,4}})?".format( + token=_TOKEN, quoted_string=_QUOTED_STRING + ) +) + +_QUOTED_PAIR_REPLACE_RE: Final[Pattern[str]] = re.compile(r"\\([\t !-~])") +# same pattern as _QUOTED_PAIR but contains a capture group + +_FORWARDED_PAIR_RE: Final[Pattern[str]] = re.compile(_FORWARDED_PAIR) + +############################################################ +# HTTP Request +############################################################ + + +class BaseRequest(MutableMapping[str, Any], HeadersMixin): + + POST_METHODS = { + hdrs.METH_PATCH, + hdrs.METH_POST, + hdrs.METH_PUT, + hdrs.METH_TRACE, + hdrs.METH_DELETE, + } + + ATTRS = HeadersMixin.ATTRS | frozenset( + [ + "_message", + "_protocol", + "_payload_writer", + "_payload", + "_headers", + "_method", + "_version", + "_rel_url", + "_post", + "_read_bytes", + "_state", + "_cache", + "_task", + "_client_max_size", + "_loop", + "_transport_sslcontext", + "_transport_peername", + ] + ) + _post: Optional[MultiDictProxy[Union[str, bytes, FileField]]] = None + _read_bytes: Optional[bytes] = None + + def __init__( + self, + message: RawRequestMessage, + payload: StreamReader, + protocol: "RequestHandler", + payload_writer: AbstractStreamWriter, + task: "asyncio.Task[None]", + loop: asyncio.AbstractEventLoop, + *, + client_max_size: int = 1024**2, + state: Optional[Dict[str, Any]] = None, + scheme: Optional[str] = None, + host: Optional[str] = None, + remote: Optional[str] = None, + ) -> None: + self._message = message + self._protocol = protocol + self._payload_writer = payload_writer + + self._payload = payload + self._headers: CIMultiDictProxy[str] = message.headers + self._method = message.method + self._version = message.version + self._cache: Dict[str, Any] = {} + url = message.url + if url.absolute: + if scheme is not None: + url = url.with_scheme(scheme) + if host is not None: + url = url.with_host(host) + # absolute URL is given, + # override auto-calculating url, host, and scheme + # all other properties should be good + self._cache["url"] = url + self._cache["host"] = url.host + self._cache["scheme"] = url.scheme + self._rel_url = url.relative() + else: + self._rel_url = url + if scheme is not None: + self._cache["scheme"] = scheme + if host is not None: + self._cache["host"] = host + + self._state = {} if state is None else state + self._task = task + self._client_max_size = client_max_size + self._loop = loop + + self._transport_sslcontext = protocol.ssl_context + self._transport_peername = protocol.peername + + if remote is not None: + self._cache["remote"] = remote + + def clone( + self, + *, + method: Union[str, _SENTINEL] = sentinel, + rel_url: Union[StrOrURL, _SENTINEL] = sentinel, + headers: Union[LooseHeaders, _SENTINEL] = sentinel, + scheme: Union[str, _SENTINEL] = sentinel, + host: Union[str, _SENTINEL] = sentinel, + remote: Union[str, _SENTINEL] = sentinel, + client_max_size: Union[int, _SENTINEL] = sentinel, + ) -> "BaseRequest": + """Clone itself with replacement some attributes. + + Creates and returns a new instance of Request object. If no parameters + are given, an exact copy is returned. If a parameter is not passed, it + will reuse the one from the current request object. + """ + if self._read_bytes: + raise RuntimeError("Cannot clone request after reading its content") + + dct: Dict[str, Any] = {} + if method is not sentinel: + dct["method"] = method + if rel_url is not sentinel: + new_url: URL = URL(rel_url) + dct["url"] = new_url + dct["path"] = str(new_url) + if headers is not sentinel: + # a copy semantic + dct["headers"] = CIMultiDictProxy(CIMultiDict(headers)) + dct["raw_headers"] = tuple( + (k.encode("utf-8"), v.encode("utf-8")) + for k, v in dct["headers"].items() + ) + + message = self._message._replace(**dct) + + kwargs = {} + if scheme is not sentinel: + kwargs["scheme"] = scheme + if host is not sentinel: + kwargs["host"] = host + if remote is not sentinel: + kwargs["remote"] = remote + if client_max_size is sentinel: + client_max_size = self._client_max_size + + return self.__class__( + message, + self._payload, + self._protocol, + self._payload_writer, + self._task, + self._loop, + client_max_size=client_max_size, + state=self._state.copy(), + **kwargs, + ) + + @property + def task(self) -> "asyncio.Task[None]": + return self._task + + @property + def protocol(self) -> "RequestHandler": + return self._protocol + + @property + def transport(self) -> Optional[asyncio.Transport]: + if self._protocol is None: + return None + return self._protocol.transport + + @property + def writer(self) -> AbstractStreamWriter: + return self._payload_writer + + @property + def client_max_size(self) -> int: + return self._client_max_size + + @reify + def message(self) -> RawRequestMessage: + warnings.warn("Request.message is deprecated", DeprecationWarning, stacklevel=3) + return self._message + + @reify + def rel_url(self) -> URL: + return self._rel_url + + @reify + def loop(self) -> asyncio.AbstractEventLoop: + warnings.warn( + "request.loop property is deprecated", DeprecationWarning, stacklevel=2 + ) + return self._loop + + # MutableMapping API + + def __getitem__(self, key: str) -> Any: + return self._state[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._state[key] = value + + def __delitem__(self, key: str) -> None: + del self._state[key] + + def __len__(self) -> int: + return len(self._state) + + def __iter__(self) -> Iterator[str]: + return iter(self._state) + + ######## + + @reify + def secure(self) -> bool: + """A bool indicating if the request is handled with SSL.""" + return self.scheme == "https" + + @reify + def forwarded(self) -> Tuple[Mapping[str, str], ...]: + """A tuple containing all parsed Forwarded header(s). + + Makes an effort to parse Forwarded headers as specified by RFC 7239: + + - It adds one (immutable) dictionary per Forwarded 'field-value', ie + per proxy. The element corresponds to the data in the Forwarded + field-value added by the first proxy encountered by the client. Each + subsequent item corresponds to those added by later proxies. + - It checks that every value has valid syntax in general as specified + in section 4: either a 'token' or a 'quoted-string'. + - It un-escapes found escape sequences. + - It does NOT validate 'by' and 'for' contents as specified in section + 6. + - It does NOT validate 'host' contents (Host ABNF). + - It does NOT validate 'proto' contents for valid URI scheme names. + + Returns a tuple containing one or more immutable dicts + """ + elems = [] + for field_value in self._message.headers.getall(hdrs.FORWARDED, ()): + length = len(field_value) + pos = 0 + need_separator = False + elem: Dict[str, str] = {} + elems.append(types.MappingProxyType(elem)) + while 0 <= pos < length: + match = _FORWARDED_PAIR_RE.match(field_value, pos) + if match is not None: # got a valid forwarded-pair + if need_separator: + # bad syntax here, skip to next comma + pos = field_value.find(",", pos) + else: + name, value, port = match.groups() + if value[0] == '"': + # quoted string: remove quotes and unescape + value = _QUOTED_PAIR_REPLACE_RE.sub(r"\1", value[1:-1]) + if port: + value += port + elem[name.lower()] = value + pos += len(match.group(0)) + need_separator = True + elif field_value[pos] == ",": # next forwarded-element + need_separator = False + elem = {} + elems.append(types.MappingProxyType(elem)) + pos += 1 + elif field_value[pos] == ";": # next forwarded-pair + need_separator = False + pos += 1 + elif field_value[pos] in " \t": + # Allow whitespace even between forwarded-pairs, though + # RFC 7239 doesn't. This simplifies code and is in line + # with Postel's law. + pos += 1 + else: + # bad syntax here, skip to next comma + pos = field_value.find(",", pos) + return tuple(elems) + + @reify + def scheme(self) -> str: + """A string representing the scheme of the request. + + Hostname is resolved in this order: + + - overridden value by .clone(scheme=new_scheme) call. + - type of connection to peer: HTTPS if socket is SSL, HTTP otherwise. + + 'http' or 'https'. + """ + if self._transport_sslcontext: + return "https" + else: + return "http" + + @reify + def method(self) -> str: + """Read only property for getting HTTP method. + + The value is upper-cased str like 'GET', 'POST', 'PUT' etc. + """ + return self._method + + @reify + def version(self) -> HttpVersion: + """Read only property for getting HTTP version of request. + + Returns aiohttp.protocol.HttpVersion instance. + """ + return self._version + + @reify + def host(self) -> str: + """Hostname of the request. + + Hostname is resolved in this order: + + - overridden value by .clone(host=new_host) call. + - HOST HTTP header + - socket.getfqdn() value + + For example, 'example.com' or 'localhost:8080'. + + For historical reasons, the port number may be included. + """ + host = self._message.headers.get(hdrs.HOST) + if host is not None: + return host + return socket.getfqdn() + + @reify + def remote(self) -> Optional[str]: + """Remote IP of client initiated HTTP request. + + The IP is resolved in this order: + + - overridden value by .clone(remote=new_remote) call. + - peername of opened socket + """ + if self._transport_peername is None: + return None + if isinstance(self._transport_peername, (list, tuple)): + return str(self._transport_peername[0]) + return str(self._transport_peername) + + @reify + def url(self) -> URL: + """The full URL of the request.""" + # authority is used here because it may include the port number + # and we want yarl to parse it correctly + return URL.build(scheme=self.scheme, authority=self.host).join(self._rel_url) + + @reify + def path(self) -> str: + """The URL including *PATH INFO* without the host or scheme. + + E.g., ``/app/blog`` + """ + return self._rel_url.path + + @reify + def path_qs(self) -> str: + """The URL including PATH_INFO and the query string. + + E.g, /app/blog?id=10 + """ + return str(self._rel_url) + + @reify + def raw_path(self) -> str: + """The URL including raw *PATH INFO* without the host or scheme. + + Warning, the path is unquoted and may contains non valid URL characters + + E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters`` + """ + return self._message.path + + @reify + def query(self) -> "MultiMapping[str]": + """A multidict with all the variables in the query string.""" + return self._rel_url.query + + @reify + def query_string(self) -> str: + """The query string in the URL. + + E.g., id=10 + """ + return self._rel_url.query_string + + @reify + def headers(self) -> CIMultiDictProxy[str]: + """A case-insensitive multidict proxy with all headers.""" + return self._headers + + @reify + def raw_headers(self) -> RawHeaders: + """A sequence of pairs for all headers.""" + return self._message.raw_headers + + @reify + def if_modified_since(self) -> Optional[datetime.datetime]: + """The value of If-Modified-Since HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self.headers.get(hdrs.IF_MODIFIED_SINCE)) + + @reify + def if_unmodified_since(self) -> Optional[datetime.datetime]: + """The value of If-Unmodified-Since HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self.headers.get(hdrs.IF_UNMODIFIED_SINCE)) + + @staticmethod + def _etag_values(etag_header: str) -> Iterator[ETag]: + """Extract `ETag` objects from raw header.""" + if etag_header == ETAG_ANY: + yield ETag( + is_weak=False, + value=ETAG_ANY, + ) + else: + for match in LIST_QUOTED_ETAG_RE.finditer(etag_header): + is_weak, value, garbage = match.group(2, 3, 4) + # Any symbol captured by 4th group means + # that the following sequence is invalid. + if garbage: + break + + yield ETag( + is_weak=bool(is_weak), + value=value, + ) + + @classmethod + def _if_match_or_none_impl( + cls, header_value: Optional[str] + ) -> Optional[Tuple[ETag, ...]]: + if not header_value: + return None + + return tuple(cls._etag_values(header_value)) + + @reify + def if_match(self) -> Optional[Tuple[ETag, ...]]: + """The value of If-Match HTTP header, or None. + + This header is represented as a `tuple` of `ETag` objects. + """ + return self._if_match_or_none_impl(self.headers.get(hdrs.IF_MATCH)) + + @reify + def if_none_match(self) -> Optional[Tuple[ETag, ...]]: + """The value of If-None-Match HTTP header, or None. + + This header is represented as a `tuple` of `ETag` objects. + """ + return self._if_match_or_none_impl(self.headers.get(hdrs.IF_NONE_MATCH)) + + @reify + def if_range(self) -> Optional[datetime.datetime]: + """The value of If-Range HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self.headers.get(hdrs.IF_RANGE)) + + @reify + def keep_alive(self) -> bool: + """Is keepalive enabled by client?""" + return not self._message.should_close + + @reify + def cookies(self) -> Mapping[str, str]: + """Return request cookies. + + A read-only dictionary-like object. + """ + # Use parse_cookie_header for RFC 6265 compliant Cookie header parsing + # that accepts special characters in cookie names (fixes #2683) + parsed = parse_cookie_header(self.headers.get(hdrs.COOKIE, "")) + # Extract values from Morsel objects + return MappingProxyType({name: morsel.value for name, morsel in parsed}) + + @reify + def http_range(self) -> slice: + """The content of Range HTTP header. + + Return a slice instance. + + """ + rng = self._headers.get(hdrs.RANGE) + start, end = None, None + if rng is not None: + try: + pattern = r"^bytes=(\d*)-(\d*)$" + start, end = re.findall(pattern, rng)[0] + except IndexError: # pattern was not found in header + raise ValueError("range not in acceptable format") + + end = int(end) if end else None + start = int(start) if start else None + + if start is None and end is not None: + # end with no start is to return tail of content + start = -end + end = None + + if start is not None and end is not None: + # end is inclusive in range header, exclusive for slice + end += 1 + + if start >= end: + raise ValueError("start cannot be after end") + + if start is end is None: # No valid range supplied + raise ValueError("No start or end of range specified") + + return slice(start, end, 1) + + @reify + def content(self) -> StreamReader: + """Return raw payload stream.""" + return self._payload + + @property + def has_body(self) -> bool: + """Return True if request's HTTP BODY can be read, False otherwise.""" + warnings.warn( + "Deprecated, use .can_read_body #2005", DeprecationWarning, stacklevel=2 + ) + return not self._payload.at_eof() + + @property + def can_read_body(self) -> bool: + """Return True if request's HTTP BODY can be read, False otherwise.""" + return not self._payload.at_eof() + + @reify + def body_exists(self) -> bool: + """Return True if request has HTTP BODY, False otherwise.""" + return type(self._payload) is not EmptyStreamReader + + async def release(self) -> None: + """Release request. + + Eat unread part of HTTP BODY if present. + """ + while not self._payload.at_eof(): + await self._payload.readany() + + async def read(self) -> bytes: + """Read request body if present. + + Returns bytes object with full request content. + """ + if self._read_bytes is None: + body = bytearray() + while True: + chunk = await self._payload.readany() + body.extend(chunk) + if self._client_max_size: + body_size = len(body) + if body_size >= self._client_max_size: + raise HTTPRequestEntityTooLarge( + max_size=self._client_max_size, actual_size=body_size + ) + if not chunk: + break + self._read_bytes = bytes(body) + return self._read_bytes + + async def text(self) -> str: + """Return BODY as text using encoding from .charset.""" + bytes_body = await self.read() + encoding = self.charset or "utf-8" + return bytes_body.decode(encoding) + + async def json(self, *, loads: JSONDecoder = DEFAULT_JSON_DECODER) -> Any: + """Return BODY as JSON.""" + body = await self.text() + return loads(body) + + async def multipart(self) -> MultipartReader: + """Return async iterator to process BODY as multipart.""" + return MultipartReader(self._headers, self._payload) + + async def post(self) -> "MultiDictProxy[Union[str, bytes, FileField]]": + """Return POST parameters.""" + if self._post is not None: + return self._post + if self._method not in self.POST_METHODS: + self._post = MultiDictProxy(MultiDict()) + return self._post + + content_type = self.content_type + if content_type not in ( + "", + "application/x-www-form-urlencoded", + "multipart/form-data", + ): + self._post = MultiDictProxy(MultiDict()) + return self._post + + out: MultiDict[Union[str, bytes, FileField]] = MultiDict() + + if content_type == "multipart/form-data": + multipart = await self.multipart() + max_size = self._client_max_size + + field = await multipart.next() + while field is not None: + size = 0 + field_ct = field.headers.get(hdrs.CONTENT_TYPE) + + if isinstance(field, BodyPartReader): + assert field.name is not None + + # Note that according to RFC 7578, the Content-Type header + # is optional, even for files, so we can't assume it's + # present. + # https://tools.ietf.org/html/rfc7578#section-4.4 + if field.filename: + # store file in temp file + tmp = await self._loop.run_in_executor( + None, tempfile.TemporaryFile + ) + chunk = await field.read_chunk(size=2**16) + while chunk: + chunk = field.decode(chunk) + await self._loop.run_in_executor(None, tmp.write, chunk) + size += len(chunk) + if 0 < max_size < size: + await self._loop.run_in_executor(None, tmp.close) + raise HTTPRequestEntityTooLarge( + max_size=max_size, actual_size=size + ) + chunk = await field.read_chunk(size=2**16) + await self._loop.run_in_executor(None, tmp.seek, 0) + + if field_ct is None: + field_ct = "application/octet-stream" + + ff = FileField( + field.name, + field.filename, + cast(io.BufferedReader, tmp), + field_ct, + field.headers, + ) + out.add(field.name, ff) + else: + # deal with ordinary data + value = await field.read(decode=True) + if field_ct is None or field_ct.startswith("text/"): + charset = field.get_charset(default="utf-8") + out.add(field.name, value.decode(charset)) + else: + out.add(field.name, value) + size += len(value) + if 0 < max_size < size: + raise HTTPRequestEntityTooLarge( + max_size=max_size, actual_size=size + ) + else: + raise ValueError( + "To decode nested multipart you need to use custom reader", + ) + + field = await multipart.next() + else: + data = await self.read() + if data: + charset = self.charset or "utf-8" + out.extend( + parse_qsl( + data.rstrip().decode(charset), + keep_blank_values=True, + encoding=charset, + ) + ) + + self._post = MultiDictProxy(out) + return self._post + + def get_extra_info(self, name: str, default: Any = None) -> Any: + """Extra info from protocol transport""" + protocol = self._protocol + if protocol is None: + return default + + transport = protocol.transport + if transport is None: + return default + + return transport.get_extra_info(name, default) + + def __repr__(self) -> str: + ascii_encodable_path = self.path.encode("ascii", "backslashreplace").decode( + "ascii" + ) + return "<{} {} {} >".format( + self.__class__.__name__, self._method, ascii_encodable_path + ) + + def __eq__(self, other: object) -> bool: + return id(self) == id(other) + + def __bool__(self) -> bool: + return True + + async def _prepare_hook(self, response: StreamResponse) -> None: + return + + def _cancel(self, exc: BaseException) -> None: + set_exception(self._payload, exc) + + def _finish(self) -> None: + if self._post is None or self.content_type != "multipart/form-data": + return + + # NOTE: Release file descriptors for the + # NOTE: `tempfile.Temporaryfile`-created `_io.BufferedRandom` + # NOTE: instances of files sent within multipart request body + # NOTE: via HTTP POST request. + for file_name, file_field_object in self._post.items(): + if isinstance(file_field_object, FileField): + file_field_object.file.close() + + +class Request(BaseRequest): + + ATTRS = BaseRequest.ATTRS | frozenset(["_match_info"]) + + _match_info: Optional["UrlMappingMatchInfo"] = None + + if DEBUG: + + def __setattr__(self, name: str, val: Any) -> None: + if name not in self.ATTRS: + warnings.warn( + "Setting custom {}.{} attribute " + "is discouraged".format(self.__class__.__name__, name), + DeprecationWarning, + stacklevel=2, + ) + super().__setattr__(name, val) + + def clone( + self, + *, + method: Union[str, _SENTINEL] = sentinel, + rel_url: Union[StrOrURL, _SENTINEL] = sentinel, + headers: Union[LooseHeaders, _SENTINEL] = sentinel, + scheme: Union[str, _SENTINEL] = sentinel, + host: Union[str, _SENTINEL] = sentinel, + remote: Union[str, _SENTINEL] = sentinel, + client_max_size: Union[int, _SENTINEL] = sentinel, + ) -> "Request": + ret = super().clone( + method=method, + rel_url=rel_url, + headers=headers, + scheme=scheme, + host=host, + remote=remote, + client_max_size=client_max_size, + ) + new_ret = cast(Request, ret) + new_ret._match_info = self._match_info + return new_ret + + @reify + def match_info(self) -> "UrlMappingMatchInfo": + """Result of route resolving.""" + match_info = self._match_info + assert match_info is not None + return match_info + + @property + def app(self) -> "Application": + """Application instance.""" + match_info = self._match_info + assert match_info is not None + return match_info.current_app + + @property + def config_dict(self) -> ChainMapProxy: + match_info = self._match_info + assert match_info is not None + lst = match_info.apps + app = self.app + idx = lst.index(app) + sublist = list(reversed(lst[: idx + 1])) + return ChainMapProxy(sublist) + + async def _prepare_hook(self, response: StreamResponse) -> None: + match_info = self._match_info + if match_info is None: + return + for app in match_info._apps: + if on_response_prepare := app.on_response_prepare: + await on_response_prepare.send(self, response) diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_response.py b/venv/lib/python3.10/site-packages/aiohttp/web_response.py new file mode 100644 index 0000000000000000000000000000000000000000..e5f8b6cd652c36274386cb0ce5586d8a5fdf044c --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_response.py @@ -0,0 +1,856 @@ +import asyncio +import collections.abc +import datetime +import enum +import json +import math +import time +import warnings +from concurrent.futures import Executor +from http import HTTPStatus +from http.cookies import SimpleCookie +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterator, + MutableMapping, + Optional, + Union, + cast, +) + +from multidict import CIMultiDict, istr + +from . import hdrs, payload +from .abc import AbstractStreamWriter +from .compression_utils import ZLibCompressor +from .helpers import ( + ETAG_ANY, + QUOTED_ETAG_RE, + ETag, + HeadersMixin, + must_be_empty_body, + parse_http_date, + rfc822_formatted_time, + sentinel, + should_remove_content_length, + validate_etag_value, +) +from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11 +from .payload import Payload +from .typedefs import JSONEncoder, LooseHeaders + +REASON_PHRASES = {http_status.value: http_status.phrase for http_status in HTTPStatus} +LARGE_BODY_SIZE = 1024**2 + +__all__ = ("ContentCoding", "StreamResponse", "Response", "json_response") + + +if TYPE_CHECKING: + from .web_request import BaseRequest + + BaseClass = MutableMapping[str, Any] +else: + BaseClass = collections.abc.MutableMapping + + +# TODO(py311): Convert to StrEnum for wider use +class ContentCoding(enum.Enum): + # The content codings that we have support for. + # + # Additional registered codings are listed at: + # https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding + deflate = "deflate" + gzip = "gzip" + identity = "identity" + + +CONTENT_CODINGS = {coding.value: coding for coding in ContentCoding} + +############################################################ +# HTTP Response classes +############################################################ + + +class StreamResponse(BaseClass, HeadersMixin): + + _body: Union[None, bytes, bytearray, Payload] + _length_check = True + _body = None + _keep_alive: Optional[bool] = None + _chunked: bool = False + _compression: bool = False + _compression_strategy: Optional[int] = None + _compression_force: Optional[ContentCoding] = None + _req: Optional["BaseRequest"] = None + _payload_writer: Optional[AbstractStreamWriter] = None + _eof_sent: bool = False + _must_be_empty_body: Optional[bool] = None + _body_length = 0 + _cookies: Optional[SimpleCookie] = None + _send_headers_immediately = True + + def __init__( + self, + *, + status: int = 200, + reason: Optional[str] = None, + headers: Optional[LooseHeaders] = None, + _real_headers: Optional[CIMultiDict[str]] = None, + ) -> None: + """Initialize a new stream response object. + + _real_headers is an internal parameter used to pass a pre-populated + headers object. It is used by the `Response` class to avoid copying + the headers when creating a new response object. It is not intended + to be used by external code. + """ + self._state: Dict[str, Any] = {} + + if _real_headers is not None: + self._headers = _real_headers + elif headers is not None: + self._headers: CIMultiDict[str] = CIMultiDict(headers) + else: + self._headers = CIMultiDict() + + self._set_status(status, reason) + + @property + def prepared(self) -> bool: + return self._eof_sent or self._payload_writer is not None + + @property + def task(self) -> "Optional[asyncio.Task[None]]": + if self._req: + return self._req.task + else: + return None + + @property + def status(self) -> int: + return self._status + + @property + def chunked(self) -> bool: + return self._chunked + + @property + def compression(self) -> bool: + return self._compression + + @property + def reason(self) -> str: + return self._reason + + def set_status( + self, + status: int, + reason: Optional[str] = None, + ) -> None: + assert ( + not self.prepared + ), "Cannot change the response status code after the headers have been sent" + self._set_status(status, reason) + + def _set_status(self, status: int, reason: Optional[str]) -> None: + self._status = int(status) + if reason is None: + reason = REASON_PHRASES.get(self._status, "") + elif "\n" in reason: + raise ValueError("Reason cannot contain \\n") + self._reason = reason + + @property + def keep_alive(self) -> Optional[bool]: + return self._keep_alive + + def force_close(self) -> None: + self._keep_alive = False + + @property + def body_length(self) -> int: + return self._body_length + + @property + def output_length(self) -> int: + warnings.warn("output_length is deprecated", DeprecationWarning) + assert self._payload_writer + return self._payload_writer.buffer_size + + def enable_chunked_encoding(self, chunk_size: Optional[int] = None) -> None: + """Enables automatic chunked transfer encoding.""" + if hdrs.CONTENT_LENGTH in self._headers: + raise RuntimeError( + "You can't enable chunked encoding when a content length is set" + ) + if chunk_size is not None: + warnings.warn("Chunk size is deprecated #1615", DeprecationWarning) + self._chunked = True + + def enable_compression( + self, + force: Optional[Union[bool, ContentCoding]] = None, + strategy: Optional[int] = None, + ) -> None: + """Enables response compression encoding.""" + # Backwards compatibility for when force was a bool <0.17. + if isinstance(force, bool): + force = ContentCoding.deflate if force else ContentCoding.identity + warnings.warn( + "Using boolean for force is deprecated #3318", DeprecationWarning + ) + elif force is not None: + assert isinstance( + force, ContentCoding + ), "force should one of None, bool or ContentEncoding" + + self._compression = True + self._compression_force = force + self._compression_strategy = strategy + + @property + def headers(self) -> "CIMultiDict[str]": + return self._headers + + @property + def cookies(self) -> SimpleCookie: + if self._cookies is None: + self._cookies = SimpleCookie() + return self._cookies + + def set_cookie( + self, + name: str, + value: str, + *, + expires: Optional[str] = None, + domain: Optional[str] = None, + max_age: Optional[Union[int, str]] = None, + path: str = "/", + secure: Optional[bool] = None, + httponly: Optional[bool] = None, + version: Optional[str] = None, + samesite: Optional[str] = None, + partitioned: Optional[bool] = None, + ) -> None: + """Set or update response cookie. + + Sets new cookie or updates existent with new value. + Also updates only those params which are not None. + """ + if self._cookies is None: + self._cookies = SimpleCookie() + + self._cookies[name] = value + c = self._cookies[name] + + if expires is not None: + c["expires"] = expires + elif c.get("expires") == "Thu, 01 Jan 1970 00:00:00 GMT": + del c["expires"] + + if domain is not None: + c["domain"] = domain + + if max_age is not None: + c["max-age"] = str(max_age) + elif "max-age" in c: + del c["max-age"] + + c["path"] = path + + if secure is not None: + c["secure"] = secure + if httponly is not None: + c["httponly"] = httponly + if version is not None: + c["version"] = version + if samesite is not None: + c["samesite"] = samesite + + if partitioned is not None: + c["partitioned"] = partitioned + + def del_cookie( + self, + name: str, + *, + domain: Optional[str] = None, + path: str = "/", + secure: Optional[bool] = None, + httponly: Optional[bool] = None, + samesite: Optional[str] = None, + ) -> None: + """Delete cookie. + + Creates new empty expired cookie. + """ + # TODO: do we need domain/path here? + if self._cookies is not None: + self._cookies.pop(name, None) + self.set_cookie( + name, + "", + max_age=0, + expires="Thu, 01 Jan 1970 00:00:00 GMT", + domain=domain, + path=path, + secure=secure, + httponly=httponly, + samesite=samesite, + ) + + @property + def content_length(self) -> Optional[int]: + # Just a placeholder for adding setter + return super().content_length + + @content_length.setter + def content_length(self, value: Optional[int]) -> None: + if value is not None: + value = int(value) + if self._chunked: + raise RuntimeError( + "You can't set content length when chunked encoding is enable" + ) + self._headers[hdrs.CONTENT_LENGTH] = str(value) + else: + self._headers.pop(hdrs.CONTENT_LENGTH, None) + + @property + def content_type(self) -> str: + # Just a placeholder for adding setter + return super().content_type + + @content_type.setter + def content_type(self, value: str) -> None: + self.content_type # read header values if needed + self._content_type = str(value) + self._generate_content_type_header() + + @property + def charset(self) -> Optional[str]: + # Just a placeholder for adding setter + return super().charset + + @charset.setter + def charset(self, value: Optional[str]) -> None: + ctype = self.content_type # read header values if needed + if ctype == "application/octet-stream": + raise RuntimeError( + "Setting charset for application/octet-stream " + "doesn't make sense, setup content_type first" + ) + assert self._content_dict is not None + if value is None: + self._content_dict.pop("charset", None) + else: + self._content_dict["charset"] = str(value).lower() + self._generate_content_type_header() + + @property + def last_modified(self) -> Optional[datetime.datetime]: + """The value of Last-Modified HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self._headers.get(hdrs.LAST_MODIFIED)) + + @last_modified.setter + def last_modified( + self, value: Optional[Union[int, float, datetime.datetime, str]] + ) -> None: + if value is None: + self._headers.pop(hdrs.LAST_MODIFIED, None) + elif isinstance(value, (int, float)): + self._headers[hdrs.LAST_MODIFIED] = time.strftime( + "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)) + ) + elif isinstance(value, datetime.datetime): + self._headers[hdrs.LAST_MODIFIED] = time.strftime( + "%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple() + ) + elif isinstance(value, str): + self._headers[hdrs.LAST_MODIFIED] = value + else: + msg = f"Unsupported type for last_modified: {type(value).__name__}" + raise TypeError(msg) + + @property + def etag(self) -> Optional[ETag]: + quoted_value = self._headers.get(hdrs.ETAG) + if not quoted_value: + return None + elif quoted_value == ETAG_ANY: + return ETag(value=ETAG_ANY) + match = QUOTED_ETAG_RE.fullmatch(quoted_value) + if not match: + return None + is_weak, value = match.group(1, 2) + return ETag( + is_weak=bool(is_weak), + value=value, + ) + + @etag.setter + def etag(self, value: Optional[Union[ETag, str]]) -> None: + if value is None: + self._headers.pop(hdrs.ETAG, None) + elif (isinstance(value, str) and value == ETAG_ANY) or ( + isinstance(value, ETag) and value.value == ETAG_ANY + ): + self._headers[hdrs.ETAG] = ETAG_ANY + elif isinstance(value, str): + validate_etag_value(value) + self._headers[hdrs.ETAG] = f'"{value}"' + elif isinstance(value, ETag) and isinstance(value.value, str): + validate_etag_value(value.value) + hdr_value = f'W/"{value.value}"' if value.is_weak else f'"{value.value}"' + self._headers[hdrs.ETAG] = hdr_value + else: + raise ValueError( + f"Unsupported etag type: {type(value)}. " + f"etag must be str, ETag or None" + ) + + def _generate_content_type_header( + self, CONTENT_TYPE: istr = hdrs.CONTENT_TYPE + ) -> None: + assert self._content_dict is not None + assert self._content_type is not None + params = "; ".join(f"{k}={v}" for k, v in self._content_dict.items()) + if params: + ctype = self._content_type + "; " + params + else: + ctype = self._content_type + self._headers[CONTENT_TYPE] = ctype + + async def _do_start_compression(self, coding: ContentCoding) -> None: + if coding is ContentCoding.identity: + return + assert self._payload_writer is not None + self._headers[hdrs.CONTENT_ENCODING] = coding.value + self._payload_writer.enable_compression( + coding.value, self._compression_strategy + ) + # Compressed payload may have different content length, + # remove the header + self._headers.popall(hdrs.CONTENT_LENGTH, None) + + async def _start_compression(self, request: "BaseRequest") -> None: + if self._compression_force: + await self._do_start_compression(self._compression_force) + return + # Encoding comparisons should be case-insensitive + # https://www.rfc-editor.org/rfc/rfc9110#section-8.4.1 + accept_encoding = request.headers.get(hdrs.ACCEPT_ENCODING, "").lower() + for value, coding in CONTENT_CODINGS.items(): + if value in accept_encoding: + await self._do_start_compression(coding) + return + + async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]: + if self._eof_sent: + return None + if self._payload_writer is not None: + return self._payload_writer + self._must_be_empty_body = must_be_empty_body(request.method, self.status) + return await self._start(request) + + async def _start(self, request: "BaseRequest") -> AbstractStreamWriter: + self._req = request + writer = self._payload_writer = request._payload_writer + + await self._prepare_headers() + await request._prepare_hook(self) + await self._write_headers() + + return writer + + async def _prepare_headers(self) -> None: + request = self._req + assert request is not None + writer = self._payload_writer + assert writer is not None + keep_alive = self._keep_alive + if keep_alive is None: + keep_alive = request.keep_alive + self._keep_alive = keep_alive + + version = request.version + + headers = self._headers + if self._cookies: + for cookie in self._cookies.values(): + value = cookie.output(header="")[1:] + headers.add(hdrs.SET_COOKIE, value) + + if self._compression: + await self._start_compression(request) + + if self._chunked: + if version != HttpVersion11: + raise RuntimeError( + "Using chunked encoding is forbidden " + "for HTTP/{0.major}.{0.minor}".format(request.version) + ) + if not self._must_be_empty_body: + writer.enable_chunking() + headers[hdrs.TRANSFER_ENCODING] = "chunked" + elif self._length_check: # Disabled for WebSockets + writer.length = self.content_length + if writer.length is None: + if version >= HttpVersion11: + if not self._must_be_empty_body: + writer.enable_chunking() + headers[hdrs.TRANSFER_ENCODING] = "chunked" + elif not self._must_be_empty_body: + keep_alive = False + + # HTTP 1.1: https://tools.ietf.org/html/rfc7230#section-3.3.2 + # HTTP 1.0: https://tools.ietf.org/html/rfc1945#section-10.4 + if self._must_be_empty_body: + if hdrs.CONTENT_LENGTH in headers and should_remove_content_length( + request.method, self.status + ): + del headers[hdrs.CONTENT_LENGTH] + # https://datatracker.ietf.org/doc/html/rfc9112#section-6.1-10 + # https://datatracker.ietf.org/doc/html/rfc9112#section-6.1-13 + if hdrs.TRANSFER_ENCODING in headers: + del headers[hdrs.TRANSFER_ENCODING] + elif (writer.length if self._length_check else self.content_length) != 0: + # https://www.rfc-editor.org/rfc/rfc9110#section-8.3-5 + headers.setdefault(hdrs.CONTENT_TYPE, "application/octet-stream") + headers.setdefault(hdrs.DATE, rfc822_formatted_time()) + headers.setdefault(hdrs.SERVER, SERVER_SOFTWARE) + + # connection header + if hdrs.CONNECTION not in headers: + if keep_alive: + if version == HttpVersion10: + headers[hdrs.CONNECTION] = "keep-alive" + elif version == HttpVersion11: + headers[hdrs.CONNECTION] = "close" + + async def _write_headers(self) -> None: + request = self._req + assert request is not None + writer = self._payload_writer + assert writer is not None + # status line + version = request.version + status_line = f"HTTP/{version[0]}.{version[1]} {self._status} {self._reason}" + await writer.write_headers(status_line, self._headers) + # Send headers immediately if not opted into buffering + if self._send_headers_immediately: + writer.send_headers() + + async def write(self, data: Union[bytes, bytearray, memoryview]) -> None: + assert isinstance( + data, (bytes, bytearray, memoryview) + ), "data argument must be byte-ish (%r)" % type(data) + + if self._eof_sent: + raise RuntimeError("Cannot call write() after write_eof()") + if self._payload_writer is None: + raise RuntimeError("Cannot call write() before prepare()") + + await self._payload_writer.write(data) + + async def drain(self) -> None: + assert not self._eof_sent, "EOF has already been sent" + assert self._payload_writer is not None, "Response has not been started" + warnings.warn( + "drain method is deprecated, use await resp.write()", + DeprecationWarning, + stacklevel=2, + ) + await self._payload_writer.drain() + + async def write_eof(self, data: bytes = b"") -> None: + assert isinstance( + data, (bytes, bytearray, memoryview) + ), "data argument must be byte-ish (%r)" % type(data) + + if self._eof_sent: + return + + assert self._payload_writer is not None, "Response has not been started" + + await self._payload_writer.write_eof(data) + self._eof_sent = True + self._req = None + self._body_length = self._payload_writer.output_size + self._payload_writer = None + + def __repr__(self) -> str: + if self._eof_sent: + info = "eof" + elif self.prepared: + assert self._req is not None + info = f"{self._req.method} {self._req.path} " + else: + info = "not prepared" + return f"<{self.__class__.__name__} {self.reason} {info}>" + + def __getitem__(self, key: str) -> Any: + return self._state[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._state[key] = value + + def __delitem__(self, key: str) -> None: + del self._state[key] + + def __len__(self) -> int: + return len(self._state) + + def __iter__(self) -> Iterator[str]: + return iter(self._state) + + def __hash__(self) -> int: + return hash(id(self)) + + def __eq__(self, other: object) -> bool: + return self is other + + def __bool__(self) -> bool: + return True + + +class Response(StreamResponse): + + _compressed_body: Optional[bytes] = None + _send_headers_immediately = False + + def __init__( + self, + *, + body: Any = None, + status: int = 200, + reason: Optional[str] = None, + text: Optional[str] = None, + headers: Optional[LooseHeaders] = None, + content_type: Optional[str] = None, + charset: Optional[str] = None, + zlib_executor_size: Optional[int] = None, + zlib_executor: Optional[Executor] = None, + ) -> None: + if body is not None and text is not None: + raise ValueError("body and text are not allowed together") + + if headers is None: + real_headers: CIMultiDict[str] = CIMultiDict() + else: + real_headers = CIMultiDict(headers) + + if content_type is not None and "charset" in content_type: + raise ValueError("charset must not be in content_type argument") + + if text is not None: + if hdrs.CONTENT_TYPE in real_headers: + if content_type or charset: + raise ValueError( + "passing both Content-Type header and " + "content_type or charset params " + "is forbidden" + ) + else: + # fast path for filling headers + if not isinstance(text, str): + raise TypeError("text argument must be str (%r)" % type(text)) + if content_type is None: + content_type = "text/plain" + if charset is None: + charset = "utf-8" + real_headers[hdrs.CONTENT_TYPE] = content_type + "; charset=" + charset + body = text.encode(charset) + text = None + elif hdrs.CONTENT_TYPE in real_headers: + if content_type is not None or charset is not None: + raise ValueError( + "passing both Content-Type header and " + "content_type or charset params " + "is forbidden" + ) + elif content_type is not None: + if charset is not None: + content_type += "; charset=" + charset + real_headers[hdrs.CONTENT_TYPE] = content_type + + super().__init__(status=status, reason=reason, _real_headers=real_headers) + + if text is not None: + self.text = text + else: + self.body = body + + self._zlib_executor_size = zlib_executor_size + self._zlib_executor = zlib_executor + + @property + def body(self) -> Optional[Union[bytes, Payload]]: + return self._body + + @body.setter + def body(self, body: Any) -> None: + if body is None: + self._body = None + elif isinstance(body, (bytes, bytearray)): + self._body = body + else: + try: + self._body = body = payload.PAYLOAD_REGISTRY.get(body) + except payload.LookupError: + raise ValueError("Unsupported body type %r" % type(body)) + + headers = self._headers + + # set content-type + if hdrs.CONTENT_TYPE not in headers: + headers[hdrs.CONTENT_TYPE] = body.content_type + + # copy payload headers + if body.headers: + for key, value in body.headers.items(): + if key not in headers: + headers[key] = value + + self._compressed_body = None + + @property + def text(self) -> Optional[str]: + if self._body is None: + return None + # Note: When _body is a Payload (e.g. FilePayload), this may do blocking I/O + # This is generally safe as most common payloads (BytesPayload, StringPayload) + # don't do blocking I/O, but be careful with file-based payloads + return self._body.decode(self.charset or "utf-8") + + @text.setter + def text(self, text: str) -> None: + assert text is None or isinstance( + text, str + ), "text argument must be str (%r)" % type(text) + + if self.content_type == "application/octet-stream": + self.content_type = "text/plain" + if self.charset is None: + self.charset = "utf-8" + + self._body = text.encode(self.charset) + self._compressed_body = None + + @property + def content_length(self) -> Optional[int]: + if self._chunked: + return None + + if hdrs.CONTENT_LENGTH in self._headers: + return int(self._headers[hdrs.CONTENT_LENGTH]) + + if self._compressed_body is not None: + # Return length of the compressed body + return len(self._compressed_body) + elif isinstance(self._body, Payload): + # A payload without content length, or a compressed payload + return None + elif self._body is not None: + return len(self._body) + else: + return 0 + + @content_length.setter + def content_length(self, value: Optional[int]) -> None: + raise RuntimeError("Content length is set automatically") + + async def write_eof(self, data: bytes = b"") -> None: + if self._eof_sent: + return + if self._compressed_body is None: + body: Optional[Union[bytes, Payload]] = self._body + else: + body = self._compressed_body + assert not data, f"data arg is not supported, got {data!r}" + assert self._req is not None + assert self._payload_writer is not None + if body is None or self._must_be_empty_body: + await super().write_eof() + elif isinstance(self._body, Payload): + await self._body.write(self._payload_writer) + await self._body.close() + await super().write_eof() + else: + await super().write_eof(cast(bytes, body)) + + async def _start(self, request: "BaseRequest") -> AbstractStreamWriter: + if hdrs.CONTENT_LENGTH in self._headers: + if should_remove_content_length(request.method, self.status): + del self._headers[hdrs.CONTENT_LENGTH] + elif not self._chunked: + if isinstance(self._body, Payload): + if (size := self._body.size) is not None: + self._headers[hdrs.CONTENT_LENGTH] = str(size) + else: + body_len = len(self._body) if self._body else "0" + # https://www.rfc-editor.org/rfc/rfc9110.html#section-8.6-7 + if body_len != "0" or ( + self.status != 304 and request.method not in hdrs.METH_HEAD_ALL + ): + self._headers[hdrs.CONTENT_LENGTH] = str(body_len) + + return await super()._start(request) + + async def _do_start_compression(self, coding: ContentCoding) -> None: + if self._chunked or isinstance(self._body, Payload): + return await super()._do_start_compression(coding) + if coding is ContentCoding.identity: + return + # Instead of using _payload_writer.enable_compression, + # compress the whole body + compressor = ZLibCompressor( + encoding=coding.value, + max_sync_chunk_size=self._zlib_executor_size, + executor=self._zlib_executor, + ) + assert self._body is not None + if self._zlib_executor_size is None and len(self._body) > LARGE_BODY_SIZE: + warnings.warn( + "Synchronous compression of large response bodies " + f"({len(self._body)} bytes) might block the async event loop. " + "Consider providing a custom value to zlib_executor_size/" + "zlib_executor response properties or disabling compression on it." + ) + self._compressed_body = ( + await compressor.compress(self._body) + compressor.flush() + ) + self._headers[hdrs.CONTENT_ENCODING] = coding.value + self._headers[hdrs.CONTENT_LENGTH] = str(len(self._compressed_body)) + + +def json_response( + data: Any = sentinel, + *, + text: Optional[str] = None, + body: Optional[bytes] = None, + status: int = 200, + reason: Optional[str] = None, + headers: Optional[LooseHeaders] = None, + content_type: str = "application/json", + dumps: JSONEncoder = json.dumps, +) -> Response: + if data is not sentinel: + if text or body: + raise ValueError("only one of data, text, or body should be specified") + else: + text = dumps(data) + return Response( + text=text, + body=body, + status=status, + reason=reason, + headers=headers, + content_type=content_type, + ) diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_routedef.py b/venv/lib/python3.10/site-packages/aiohttp/web_routedef.py new file mode 100644 index 0000000000000000000000000000000000000000..f51b6cd00815a4daeabf7ef269a3225b2b764503 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_routedef.py @@ -0,0 +1,214 @@ +import abc +import os # noqa +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Type, + Union, + overload, +) + +import attr + +from . import hdrs +from .abc import AbstractView +from .typedefs import Handler, PathLike + +if TYPE_CHECKING: + from .web_request import Request + from .web_response import StreamResponse + from .web_urldispatcher import AbstractRoute, UrlDispatcher +else: + Request = StreamResponse = UrlDispatcher = AbstractRoute = None + + +__all__ = ( + "AbstractRouteDef", + "RouteDef", + "StaticDef", + "RouteTableDef", + "head", + "options", + "get", + "post", + "patch", + "put", + "delete", + "route", + "view", + "static", +) + + +class AbstractRouteDef(abc.ABC): + @abc.abstractmethod + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + pass # pragma: no cover + + +_HandlerType = Union[Type[AbstractView], Handler] + + +@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) +class RouteDef(AbstractRouteDef): + method: str + path: str + handler: _HandlerType + kwargs: Dict[str, Any] + + def __repr__(self) -> str: + info = [] + for name, value in sorted(self.kwargs.items()): + info.append(f", {name}={value!r}") + return " {handler.__name__!r}{info}>".format( + method=self.method, path=self.path, handler=self.handler, info="".join(info) + ) + + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + if self.method in hdrs.METH_ALL: + reg = getattr(router, "add_" + self.method.lower()) + return [reg(self.path, self.handler, **self.kwargs)] + else: + return [ + router.add_route(self.method, self.path, self.handler, **self.kwargs) + ] + + +@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) +class StaticDef(AbstractRouteDef): + prefix: str + path: PathLike + kwargs: Dict[str, Any] + + def __repr__(self) -> str: + info = [] + for name, value in sorted(self.kwargs.items()): + info.append(f", {name}={value!r}") + return " {path}{info}>".format( + prefix=self.prefix, path=self.path, info="".join(info) + ) + + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + resource = router.add_static(self.prefix, self.path, **self.kwargs) + routes = resource.get_info().get("routes", {}) + return list(routes.values()) + + +def route(method: str, path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return RouteDef(method, path, handler, kwargs) + + +def head(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_HEAD, path, handler, **kwargs) + + +def options(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_OPTIONS, path, handler, **kwargs) + + +def get( + path: str, + handler: _HandlerType, + *, + name: Optional[str] = None, + allow_head: bool = True, + **kwargs: Any, +) -> RouteDef: + return route( + hdrs.METH_GET, path, handler, name=name, allow_head=allow_head, **kwargs + ) + + +def post(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_POST, path, handler, **kwargs) + + +def put(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_PUT, path, handler, **kwargs) + + +def patch(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_PATCH, path, handler, **kwargs) + + +def delete(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_DELETE, path, handler, **kwargs) + + +def view(path: str, handler: Type[AbstractView], **kwargs: Any) -> RouteDef: + return route(hdrs.METH_ANY, path, handler, **kwargs) + + +def static(prefix: str, path: PathLike, **kwargs: Any) -> StaticDef: + return StaticDef(prefix, path, kwargs) + + +_Deco = Callable[[_HandlerType], _HandlerType] + + +class RouteTableDef(Sequence[AbstractRouteDef]): + """Route definition table""" + + def __init__(self) -> None: + self._items: List[AbstractRouteDef] = [] + + def __repr__(self) -> str: + return f"" + + @overload + def __getitem__(self, index: int) -> AbstractRouteDef: ... + + @overload + def __getitem__(self, index: slice) -> List[AbstractRouteDef]: ... + + def __getitem__(self, index): # type: ignore[no-untyped-def] + return self._items[index] + + def __iter__(self) -> Iterator[AbstractRouteDef]: + return iter(self._items) + + def __len__(self) -> int: + return len(self._items) + + def __contains__(self, item: object) -> bool: + return item in self._items + + def route(self, method: str, path: str, **kwargs: Any) -> _Deco: + def inner(handler: _HandlerType) -> _HandlerType: + self._items.append(RouteDef(method, path, handler, kwargs)) + return handler + + return inner + + def head(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_HEAD, path, **kwargs) + + def get(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_GET, path, **kwargs) + + def post(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_POST, path, **kwargs) + + def put(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_PUT, path, **kwargs) + + def patch(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_PATCH, path, **kwargs) + + def delete(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_DELETE, path, **kwargs) + + def options(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_OPTIONS, path, **kwargs) + + def view(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_ANY, path, **kwargs) + + def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None: + self._items.append(StaticDef(prefix, path, kwargs)) diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_runner.py b/venv/lib/python3.10/site-packages/aiohttp/web_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..bcfec727c8419bbc6518085ecedde1f7de8992c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_runner.py @@ -0,0 +1,399 @@ +import asyncio +import signal +import socket +import warnings +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, List, Optional, Set + +from yarl import URL + +from .typedefs import PathLike +from .web_app import Application +from .web_server import Server + +if TYPE_CHECKING: + from ssl import SSLContext +else: + try: + from ssl import SSLContext + except ImportError: # pragma: no cover + SSLContext = object # type: ignore[misc,assignment] + +__all__ = ( + "BaseSite", + "TCPSite", + "UnixSite", + "NamedPipeSite", + "SockSite", + "BaseRunner", + "AppRunner", + "ServerRunner", + "GracefulExit", +) + + +class GracefulExit(SystemExit): + code = 1 + + +def _raise_graceful_exit() -> None: + raise GracefulExit() + + +class BaseSite(ABC): + __slots__ = ("_runner", "_ssl_context", "_backlog", "_server") + + def __init__( + self, + runner: "BaseRunner", + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + if runner.server is None: + raise RuntimeError("Call runner.setup() before making a site") + if shutdown_timeout != 60.0: + msg = "shutdown_timeout should be set on BaseRunner" + warnings.warn(msg, DeprecationWarning, stacklevel=2) + runner._shutdown_timeout = shutdown_timeout + self._runner = runner + self._ssl_context = ssl_context + self._backlog = backlog + self._server: Optional[asyncio.AbstractServer] = None + + @property + @abstractmethod + def name(self) -> str: + pass # pragma: no cover + + @abstractmethod + async def start(self) -> None: + self._runner._reg_site(self) + + async def stop(self) -> None: + self._runner._check_site(self) + if self._server is not None: # Maybe not started yet + self._server.close() + + self._runner._unreg_site(self) + + +class TCPSite(BaseSite): + __slots__ = ("_host", "_port", "_reuse_address", "_reuse_port") + + def __init__( + self, + runner: "BaseRunner", + host: Optional[str] = None, + port: Optional[int] = None, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + reuse_address: Optional[bool] = None, + reuse_port: Optional[bool] = None, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._host = host + if port is None: + port = 8443 if self._ssl_context else 8080 + self._port = port + self._reuse_address = reuse_address + self._reuse_port = reuse_port + + @property + def name(self) -> str: + scheme = "https" if self._ssl_context else "http" + host = "0.0.0.0" if not self._host else self._host + return str(URL.build(scheme=scheme, host=host, port=self._port)) + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_server( + server, + self._host, + self._port, + ssl=self._ssl_context, + backlog=self._backlog, + reuse_address=self._reuse_address, + reuse_port=self._reuse_port, + ) + + +class UnixSite(BaseSite): + __slots__ = ("_path",) + + def __init__( + self, + runner: "BaseRunner", + path: PathLike, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._path = path + + @property + def name(self) -> str: + scheme = "https" if self._ssl_context else "http" + return f"{scheme}://unix:{self._path}:" + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_unix_server( + server, + self._path, + ssl=self._ssl_context, + backlog=self._backlog, + ) + + +class NamedPipeSite(BaseSite): + __slots__ = ("_path",) + + def __init__( + self, runner: "BaseRunner", path: str, *, shutdown_timeout: float = 60.0 + ) -> None: + loop = asyncio.get_event_loop() + if not isinstance( + loop, asyncio.ProactorEventLoop # type: ignore[attr-defined] + ): + raise RuntimeError( + "Named Pipes only available in proactor loop under windows" + ) + super().__init__(runner, shutdown_timeout=shutdown_timeout) + self._path = path + + @property + def name(self) -> str: + return self._path + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + _server = await loop.start_serving_pipe( # type: ignore[attr-defined] + server, self._path + ) + self._server = _server[0] + + +class SockSite(BaseSite): + __slots__ = ("_sock", "_name") + + def __init__( + self, + runner: "BaseRunner", + sock: socket.socket, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._sock = sock + scheme = "https" if self._ssl_context else "http" + if hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: + name = f"{scheme}://unix:{sock.getsockname()}:" + else: + host, port = sock.getsockname()[:2] + name = str(URL.build(scheme=scheme, host=host, port=port)) + self._name = name + + @property + def name(self) -> str: + return self._name + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_server( + server, sock=self._sock, ssl=self._ssl_context, backlog=self._backlog + ) + + +class BaseRunner(ABC): + __slots__ = ("_handle_signals", "_kwargs", "_server", "_sites", "_shutdown_timeout") + + def __init__( + self, + *, + handle_signals: bool = False, + shutdown_timeout: float = 60.0, + **kwargs: Any, + ) -> None: + self._handle_signals = handle_signals + self._kwargs = kwargs + self._server: Optional[Server] = None + self._sites: List[BaseSite] = [] + self._shutdown_timeout = shutdown_timeout + + @property + def server(self) -> Optional[Server]: + return self._server + + @property + def addresses(self) -> List[Any]: + ret: List[Any] = [] + for site in self._sites: + server = site._server + if server is not None: + sockets = server.sockets # type: ignore[attr-defined] + if sockets is not None: + for sock in sockets: + ret.append(sock.getsockname()) + return ret + + @property + def sites(self) -> Set[BaseSite]: + return set(self._sites) + + async def setup(self) -> None: + loop = asyncio.get_event_loop() + + if self._handle_signals: + try: + loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit) + loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit) + except NotImplementedError: # pragma: no cover + # add_signal_handler is not implemented on Windows + pass + + self._server = await self._make_server() + + @abstractmethod + async def shutdown(self) -> None: + """Call any shutdown hooks to help server close gracefully.""" + + async def cleanup(self) -> None: + # The loop over sites is intentional, an exception on gather() + # leaves self._sites in unpredictable state. + # The loop guaranties that a site is either deleted on success or + # still present on failure + for site in list(self._sites): + await site.stop() + + if self._server: # If setup succeeded + # Yield to event loop to ensure incoming requests prior to stopping the sites + # have all started to be handled before we proceed to close idle connections. + await asyncio.sleep(0) + self._server.pre_shutdown() + await self.shutdown() + await self._server.shutdown(self._shutdown_timeout) + await self._cleanup_server() + + self._server = None + if self._handle_signals: + loop = asyncio.get_running_loop() + try: + loop.remove_signal_handler(signal.SIGINT) + loop.remove_signal_handler(signal.SIGTERM) + except NotImplementedError: # pragma: no cover + # remove_signal_handler is not implemented on Windows + pass + + @abstractmethod + async def _make_server(self) -> Server: + pass # pragma: no cover + + @abstractmethod + async def _cleanup_server(self) -> None: + pass # pragma: no cover + + def _reg_site(self, site: BaseSite) -> None: + if site in self._sites: + raise RuntimeError(f"Site {site} is already registered in runner {self}") + self._sites.append(site) + + def _check_site(self, site: BaseSite) -> None: + if site not in self._sites: + raise RuntimeError(f"Site {site} is not registered in runner {self}") + + def _unreg_site(self, site: BaseSite) -> None: + if site not in self._sites: + raise RuntimeError(f"Site {site} is not registered in runner {self}") + self._sites.remove(site) + + +class ServerRunner(BaseRunner): + """Low-level web server runner""" + + __slots__ = ("_web_server",) + + def __init__( + self, web_server: Server, *, handle_signals: bool = False, **kwargs: Any + ) -> None: + super().__init__(handle_signals=handle_signals, **kwargs) + self._web_server = web_server + + async def shutdown(self) -> None: + pass + + async def _make_server(self) -> Server: + return self._web_server + + async def _cleanup_server(self) -> None: + pass + + +class AppRunner(BaseRunner): + """Web Application runner""" + + __slots__ = ("_app",) + + def __init__( + self, app: Application, *, handle_signals: bool = False, **kwargs: Any + ) -> None: + super().__init__(handle_signals=handle_signals, **kwargs) + if not isinstance(app, Application): + raise TypeError( + "The first argument should be web.Application " + "instance, got {!r}".format(app) + ) + self._app = app + + @property + def app(self) -> Application: + return self._app + + async def shutdown(self) -> None: + await self._app.shutdown() + + async def _make_server(self) -> Server: + loop = asyncio.get_event_loop() + self._app._set_loop(loop) + self._app.on_startup.freeze() + await self._app.startup() + self._app.freeze() + + return self._app._make_handler(loop=loop, **self._kwargs) + + async def _cleanup_server(self) -> None: + await self._app.cleanup() diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_server.py b/venv/lib/python3.10/site-packages/aiohttp/web_server.py new file mode 100644 index 0000000000000000000000000000000000000000..328aca1e405ef87e4df8a992c32eac092b4af8f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_server.py @@ -0,0 +1,84 @@ +"""Low level HTTP server.""" + +import asyncio +from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa + +from .abc import AbstractStreamWriter +from .http_parser import RawRequestMessage +from .streams import StreamReader +from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler +from .web_request import BaseRequest + +__all__ = ("Server",) + + +class Server: + def __init__( + self, + handler: _RequestHandler, + *, + request_factory: Optional[_RequestFactory] = None, + handler_cancellation: bool = False, + loop: Optional[asyncio.AbstractEventLoop] = None, + **kwargs: Any, + ) -> None: + self._loop = loop or asyncio.get_running_loop() + self._connections: Dict[RequestHandler, asyncio.Transport] = {} + self._kwargs = kwargs + # requests_count is the number of requests being processed by the server + # for the lifetime of the server. + self.requests_count = 0 + self.request_handler = handler + self.request_factory = request_factory or self._make_request + self.handler_cancellation = handler_cancellation + + @property + def connections(self) -> List[RequestHandler]: + return list(self._connections.keys()) + + def connection_made( + self, handler: RequestHandler, transport: asyncio.Transport + ) -> None: + self._connections[handler] = transport + + def connection_lost( + self, handler: RequestHandler, exc: Optional[BaseException] = None + ) -> None: + if handler in self._connections: + if handler._task_handler: + handler._task_handler.add_done_callback( + lambda f: self._connections.pop(handler, None) + ) + else: + del self._connections[handler] + + def _make_request( + self, + message: RawRequestMessage, + payload: StreamReader, + protocol: RequestHandler, + writer: AbstractStreamWriter, + task: "asyncio.Task[None]", + ) -> BaseRequest: + return BaseRequest(message, payload, protocol, writer, task, self._loop) + + def pre_shutdown(self) -> None: + for conn in self._connections: + conn.close() + + async def shutdown(self, timeout: Optional[float] = None) -> None: + coros = (conn.shutdown(timeout) for conn in self._connections) + await asyncio.gather(*coros) + self._connections.clear() + + def __call__(self) -> RequestHandler: + try: + return RequestHandler(self, loop=self._loop, **self._kwargs) + except TypeError: + # Failsafe creation: remove all custom handler_args + kwargs = { + k: v + for k, v in self._kwargs.items() + if k in ["debug", "access_log_class"] + } + return RequestHandler(self, loop=self._loop, **kwargs) diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_urldispatcher.py b/venv/lib/python3.10/site-packages/aiohttp/web_urldispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..28ae2518fec3a8b59e1e045ba01d6ff1bad0cd13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_urldispatcher.py @@ -0,0 +1,1303 @@ +import abc +import asyncio +import base64 +import functools +import hashlib +import html +import inspect +import keyword +import os +import re +import sys +import warnings +from functools import wraps +from pathlib import Path +from types import MappingProxyType +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Container, + Dict, + Final, + Generator, + Iterable, + Iterator, + List, + Mapping, + NoReturn, + Optional, + Pattern, + Set, + Sized, + Tuple, + Type, + TypedDict, + Union, + cast, +) + +from yarl import URL, __version__ as yarl_version + +from . import hdrs +from .abc import AbstractMatchInfo, AbstractRouter, AbstractView +from .helpers import DEBUG +from .http import HttpVersion11 +from .typedefs import Handler, PathLike +from .web_exceptions import ( + HTTPException, + HTTPExpectationFailed, + HTTPForbidden, + HTTPMethodNotAllowed, + HTTPNotFound, +) +from .web_fileresponse import FileResponse +from .web_request import Request +from .web_response import Response, StreamResponse +from .web_routedef import AbstractRouteDef + +__all__ = ( + "UrlDispatcher", + "UrlMappingMatchInfo", + "AbstractResource", + "Resource", + "PlainResource", + "DynamicResource", + "AbstractRoute", + "ResourceRoute", + "StaticResource", + "View", +) + + +if TYPE_CHECKING: + from .web_app import Application + + BaseDict = Dict[str, str] +else: + BaseDict = dict + +CIRCULAR_SYMLINK_ERROR = ( + (OSError,) + if sys.version_info < (3, 10) and sys.platform.startswith("win32") + else (RuntimeError,) if sys.version_info < (3, 13) else () +) + +YARL_VERSION: Final[Tuple[int, ...]] = tuple(map(int, yarl_version.split(".")[:2])) + +HTTP_METHOD_RE: Final[Pattern[str]] = re.compile( + r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$" +) +ROUTE_RE: Final[Pattern[str]] = re.compile( + r"(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})" +) +PATH_SEP: Final[str] = re.escape("/") + + +_ExpectHandler = Callable[[Request], Awaitable[Optional[StreamResponse]]] +_Resolve = Tuple[Optional["UrlMappingMatchInfo"], Set[str]] + +html_escape = functools.partial(html.escape, quote=True) + + +class _InfoDict(TypedDict, total=False): + path: str + + formatter: str + pattern: Pattern[str] + + directory: Path + prefix: str + routes: Mapping[str, "AbstractRoute"] + + app: "Application" + + domain: str + + rule: "AbstractRuleMatching" + + http_exception: HTTPException + + +class AbstractResource(Sized, Iterable["AbstractRoute"]): + def __init__(self, *, name: Optional[str] = None) -> None: + self._name = name + + @property + def name(self) -> Optional[str]: + return self._name + + @property + @abc.abstractmethod + def canonical(self) -> str: + """Exposes the resource's canonical path. + + For example '/foo/bar/{name}' + + """ + + @abc.abstractmethod # pragma: no branch + def url_for(self, **kwargs: str) -> URL: + """Construct url for resource with additional params.""" + + @abc.abstractmethod # pragma: no branch + async def resolve(self, request: Request) -> _Resolve: + """Resolve resource. + + Return (UrlMappingMatchInfo, allowed_methods) pair. + """ + + @abc.abstractmethod + def add_prefix(self, prefix: str) -> None: + """Add a prefix to processed URLs. + + Required for subapplications support. + """ + + @abc.abstractmethod + def get_info(self) -> _InfoDict: + """Return a dict with additional info useful for introspection""" + + def freeze(self) -> None: + pass + + @abc.abstractmethod + def raw_match(self, path: str) -> bool: + """Perform a raw match against path""" + + +class AbstractRoute(abc.ABC): + def __init__( + self, + method: str, + handler: Union[Handler, Type[AbstractView]], + *, + expect_handler: Optional[_ExpectHandler] = None, + resource: Optional[AbstractResource] = None, + ) -> None: + + if expect_handler is None: + expect_handler = _default_expect_handler + + assert inspect.iscoroutinefunction(expect_handler) or ( + sys.version_info < (3, 14) and asyncio.iscoroutinefunction(expect_handler) + ), f"Coroutine is expected, got {expect_handler!r}" + + method = method.upper() + if not HTTP_METHOD_RE.match(method): + raise ValueError(f"{method} is not allowed HTTP method") + + assert callable(handler), handler + if inspect.iscoroutinefunction(handler) or ( + sys.version_info < (3, 14) and asyncio.iscoroutinefunction(handler) + ): + pass + elif inspect.isgeneratorfunction(handler): + warnings.warn( + "Bare generators are deprecated, use @coroutine wrapper", + DeprecationWarning, + ) + elif isinstance(handler, type) and issubclass(handler, AbstractView): + pass + else: + warnings.warn( + "Bare functions are deprecated, use async ones", DeprecationWarning + ) + + @wraps(handler) + async def handler_wrapper(request: Request) -> StreamResponse: + result = old_handler(request) # type: ignore[call-arg] + if asyncio.iscoroutine(result): + result = await result + assert isinstance(result, StreamResponse) + return result + + old_handler = handler + handler = handler_wrapper + + self._method = method + self._handler = handler + self._expect_handler = expect_handler + self._resource = resource + + @property + def method(self) -> str: + return self._method + + @property + def handler(self) -> Handler: + return self._handler + + @property + @abc.abstractmethod + def name(self) -> Optional[str]: + """Optional route's name, always equals to resource's name.""" + + @property + def resource(self) -> Optional[AbstractResource]: + return self._resource + + @abc.abstractmethod + def get_info(self) -> _InfoDict: + """Return a dict with additional info useful for introspection""" + + @abc.abstractmethod # pragma: no branch + def url_for(self, *args: str, **kwargs: str) -> URL: + """Construct url for route with additional params.""" + + async def handle_expect_header(self, request: Request) -> Optional[StreamResponse]: + return await self._expect_handler(request) + + +class UrlMappingMatchInfo(BaseDict, AbstractMatchInfo): + + __slots__ = ("_route", "_apps", "_current_app", "_frozen") + + def __init__(self, match_dict: Dict[str, str], route: AbstractRoute) -> None: + super().__init__(match_dict) + self._route = route + self._apps: List[Application] = [] + self._current_app: Optional[Application] = None + self._frozen = False + + @property + def handler(self) -> Handler: + return self._route.handler + + @property + def route(self) -> AbstractRoute: + return self._route + + @property + def expect_handler(self) -> _ExpectHandler: + return self._route.handle_expect_header + + @property + def http_exception(self) -> Optional[HTTPException]: + return None + + def get_info(self) -> _InfoDict: # type: ignore[override] + return self._route.get_info() + + @property + def apps(self) -> Tuple["Application", ...]: + return tuple(self._apps) + + def add_app(self, app: "Application") -> None: + if self._frozen: + raise RuntimeError("Cannot change apps stack after .freeze() call") + if self._current_app is None: + self._current_app = app + self._apps.insert(0, app) + + @property + def current_app(self) -> "Application": + app = self._current_app + assert app is not None + return app + + @current_app.setter + def current_app(self, app: "Application") -> None: + if DEBUG: # pragma: no cover + if app not in self._apps: + raise RuntimeError( + "Expected one of the following apps {!r}, got {!r}".format( + self._apps, app + ) + ) + self._current_app = app + + def freeze(self) -> None: + self._frozen = True + + def __repr__(self) -> str: + return f"" + + +class MatchInfoError(UrlMappingMatchInfo): + + __slots__ = ("_exception",) + + def __init__(self, http_exception: HTTPException) -> None: + self._exception = http_exception + super().__init__({}, SystemRoute(self._exception)) + + @property + def http_exception(self) -> HTTPException: + return self._exception + + def __repr__(self) -> str: + return "".format( + self._exception.status, self._exception.reason + ) + + +async def _default_expect_handler(request: Request) -> None: + """Default handler for Expect header. + + Just send "100 Continue" to client. + raise HTTPExpectationFailed if value of header is not "100-continue" + """ + expect = request.headers.get(hdrs.EXPECT, "") + if request.version == HttpVersion11: + if expect.lower() == "100-continue": + await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n") + # Reset output_size as we haven't started the main body yet. + request.writer.output_size = 0 + else: + raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect) + + +class Resource(AbstractResource): + def __init__(self, *, name: Optional[str] = None) -> None: + super().__init__(name=name) + self._routes: Dict[str, ResourceRoute] = {} + self._any_route: Optional[ResourceRoute] = None + self._allowed_methods: Set[str] = set() + + def add_route( + self, + method: str, + handler: Union[Type[AbstractView], Handler], + *, + expect_handler: Optional[_ExpectHandler] = None, + ) -> "ResourceRoute": + if route := self._routes.get(method, self._any_route): + raise RuntimeError( + "Added route will never be executed, " + f"method {route.method} is already " + "registered" + ) + + route_obj = ResourceRoute(method, handler, self, expect_handler=expect_handler) + self.register_route(route_obj) + return route_obj + + def register_route(self, route: "ResourceRoute") -> None: + assert isinstance( + route, ResourceRoute + ), f"Instance of Route class is required, got {route!r}" + if route.method == hdrs.METH_ANY: + self._any_route = route + self._allowed_methods.add(route.method) + self._routes[route.method] = route + + async def resolve(self, request: Request) -> _Resolve: + if (match_dict := self._match(request.rel_url.path_safe)) is None: + return None, set() + if route := self._routes.get(request.method, self._any_route): + return UrlMappingMatchInfo(match_dict, route), self._allowed_methods + return None, self._allowed_methods + + @abc.abstractmethod + def _match(self, path: str) -> Optional[Dict[str, str]]: + pass # pragma: no cover + + def __len__(self) -> int: + return len(self._routes) + + def __iter__(self) -> Iterator["ResourceRoute"]: + return iter(self._routes.values()) + + # TODO: implement all abstract methods + + +class PlainResource(Resource): + def __init__(self, path: str, *, name: Optional[str] = None) -> None: + super().__init__(name=name) + assert not path or path.startswith("/") + self._path = path + + @property + def canonical(self) -> str: + return self._path + + def freeze(self) -> None: + if not self._path: + self._path = "/" + + def add_prefix(self, prefix: str) -> None: + assert prefix.startswith("/") + assert not prefix.endswith("/") + assert len(prefix) > 1 + self._path = prefix + self._path + + def _match(self, path: str) -> Optional[Dict[str, str]]: + # string comparison is about 10 times faster than regexp matching + if self._path == path: + return {} + return None + + def raw_match(self, path: str) -> bool: + return self._path == path + + def get_info(self) -> _InfoDict: + return {"path": self._path} + + def url_for(self) -> URL: # type: ignore[override] + return URL.build(path=self._path, encoded=True) + + def __repr__(self) -> str: + name = "'" + self.name + "' " if self.name is not None else "" + return f"" + + +class DynamicResource(Resource): + + DYN = re.compile(r"\{(?P[_a-zA-Z][_a-zA-Z0-9]*)\}") + DYN_WITH_RE = re.compile(r"\{(?P[_a-zA-Z][_a-zA-Z0-9]*):(?P.+)\}") + GOOD = r"[^{}/]+" + + def __init__(self, path: str, *, name: Optional[str] = None) -> None: + super().__init__(name=name) + self._orig_path = path + pattern = "" + formatter = "" + for part in ROUTE_RE.split(path): + match = self.DYN.fullmatch(part) + if match: + pattern += "(?P<{}>{})".format(match.group("var"), self.GOOD) + formatter += "{" + match.group("var") + "}" + continue + + match = self.DYN_WITH_RE.fullmatch(part) + if match: + pattern += "(?P<{var}>{re})".format(**match.groupdict()) + formatter += "{" + match.group("var") + "}" + continue + + if "{" in part or "}" in part: + raise ValueError(f"Invalid path '{path}'['{part}']") + + part = _requote_path(part) + formatter += part + pattern += re.escape(part) + + try: + compiled = re.compile(pattern) + except re.error as exc: + raise ValueError(f"Bad pattern '{pattern}': {exc}") from None + assert compiled.pattern.startswith(PATH_SEP) + assert formatter.startswith("/") + self._pattern = compiled + self._formatter = formatter + + @property + def canonical(self) -> str: + return self._formatter + + def add_prefix(self, prefix: str) -> None: + assert prefix.startswith("/") + assert not prefix.endswith("/") + assert len(prefix) > 1 + self._pattern = re.compile(re.escape(prefix) + self._pattern.pattern) + self._formatter = prefix + self._formatter + + def _match(self, path: str) -> Optional[Dict[str, str]]: + match = self._pattern.fullmatch(path) + if match is None: + return None + return { + key: _unquote_path_safe(value) for key, value in match.groupdict().items() + } + + def raw_match(self, path: str) -> bool: + return self._orig_path == path + + def get_info(self) -> _InfoDict: + return {"formatter": self._formatter, "pattern": self._pattern} + + def url_for(self, **parts: str) -> URL: + url = self._formatter.format_map({k: _quote_path(v) for k, v in parts.items()}) + return URL.build(path=url, encoded=True) + + def __repr__(self) -> str: + name = "'" + self.name + "' " if self.name is not None else "" + return "".format( + name=name, formatter=self._formatter + ) + + +class PrefixResource(AbstractResource): + def __init__(self, prefix: str, *, name: Optional[str] = None) -> None: + assert not prefix or prefix.startswith("/"), prefix + assert prefix in ("", "/") or not prefix.endswith("/"), prefix + super().__init__(name=name) + self._prefix = _requote_path(prefix) + self._prefix2 = self._prefix + "/" + + @property + def canonical(self) -> str: + return self._prefix + + def add_prefix(self, prefix: str) -> None: + assert prefix.startswith("/") + assert not prefix.endswith("/") + assert len(prefix) > 1 + self._prefix = prefix + self._prefix + self._prefix2 = self._prefix + "/" + + def raw_match(self, prefix: str) -> bool: + return False + + # TODO: impl missing abstract methods + + +class StaticResource(PrefixResource): + VERSION_KEY = "v" + + def __init__( + self, + prefix: str, + directory: PathLike, + *, + name: Optional[str] = None, + expect_handler: Optional[_ExpectHandler] = None, + chunk_size: int = 256 * 1024, + show_index: bool = False, + follow_symlinks: bool = False, + append_version: bool = False, + ) -> None: + super().__init__(prefix, name=name) + try: + directory = Path(directory).expanduser().resolve(strict=True) + except FileNotFoundError as error: + raise ValueError(f"'{directory}' does not exist") from error + if not directory.is_dir(): + raise ValueError(f"'{directory}' is not a directory") + self._directory = directory + self._show_index = show_index + self._chunk_size = chunk_size + self._follow_symlinks = follow_symlinks + self._expect_handler = expect_handler + self._append_version = append_version + + self._routes = { + "GET": ResourceRoute( + "GET", self._handle, self, expect_handler=expect_handler + ), + "HEAD": ResourceRoute( + "HEAD", self._handle, self, expect_handler=expect_handler + ), + } + self._allowed_methods = set(self._routes) + + def url_for( # type: ignore[override] + self, + *, + filename: PathLike, + append_version: Optional[bool] = None, + ) -> URL: + if append_version is None: + append_version = self._append_version + filename = str(filename).lstrip("/") + + url = URL.build(path=self._prefix, encoded=True) + # filename is not encoded + if YARL_VERSION < (1, 6): + url = url / filename.replace("%", "%25") + else: + url = url / filename + + if append_version: + unresolved_path = self._directory.joinpath(filename) + try: + if self._follow_symlinks: + normalized_path = Path(os.path.normpath(unresolved_path)) + normalized_path.relative_to(self._directory) + filepath = normalized_path.resolve() + else: + filepath = unresolved_path.resolve() + filepath.relative_to(self._directory) + except (ValueError, FileNotFoundError): + # ValueError for case when path point to symlink + # with follow_symlinks is False + return url # relatively safe + if filepath.is_file(): + # TODO cache file content + # with file watcher for cache invalidation + with filepath.open("rb") as f: + file_bytes = f.read() + h = self._get_file_hash(file_bytes) + url = url.with_query({self.VERSION_KEY: h}) + return url + return url + + @staticmethod + def _get_file_hash(byte_array: bytes) -> str: + m = hashlib.sha256() # todo sha256 can be configurable param + m.update(byte_array) + b64 = base64.urlsafe_b64encode(m.digest()) + return b64.decode("ascii") + + def get_info(self) -> _InfoDict: + return { + "directory": self._directory, + "prefix": self._prefix, + "routes": self._routes, + } + + def set_options_route(self, handler: Handler) -> None: + if "OPTIONS" in self._routes: + raise RuntimeError("OPTIONS route was set already") + self._routes["OPTIONS"] = ResourceRoute( + "OPTIONS", handler, self, expect_handler=self._expect_handler + ) + self._allowed_methods.add("OPTIONS") + + async def resolve(self, request: Request) -> _Resolve: + path = request.rel_url.path_safe + method = request.method + if not path.startswith(self._prefix2) and path != self._prefix: + return None, set() + + allowed_methods = self._allowed_methods + if method not in allowed_methods: + return None, allowed_methods + + match_dict = {"filename": _unquote_path_safe(path[len(self._prefix) + 1 :])} + return (UrlMappingMatchInfo(match_dict, self._routes[method]), allowed_methods) + + def __len__(self) -> int: + return len(self._routes) + + def __iter__(self) -> Iterator[AbstractRoute]: + return iter(self._routes.values()) + + async def _handle(self, request: Request) -> StreamResponse: + rel_url = request.match_info["filename"] + filename = Path(rel_url) + if filename.anchor: + # rel_url is an absolute name like + # /static/\\machine_name\c$ or /static/D:\path + # where the static dir is totally different + raise HTTPForbidden() + + unresolved_path = self._directory.joinpath(filename) + loop = asyncio.get_running_loop() + return await loop.run_in_executor( + None, self._resolve_path_to_response, unresolved_path + ) + + def _resolve_path_to_response(self, unresolved_path: Path) -> StreamResponse: + """Take the unresolved path and query the file system to form a response.""" + # Check for access outside the root directory. For follow symlinks, URI + # cannot traverse out, but symlinks can. Otherwise, no access outside + # root is permitted. + try: + if self._follow_symlinks: + normalized_path = Path(os.path.normpath(unresolved_path)) + normalized_path.relative_to(self._directory) + file_path = normalized_path.resolve() + else: + file_path = unresolved_path.resolve() + file_path.relative_to(self._directory) + except (ValueError, *CIRCULAR_SYMLINK_ERROR) as error: + # ValueError is raised for the relative check. Circular symlinks + # raise here on resolving for python < 3.13. + raise HTTPNotFound() from error + + # if path is a directory, return the contents if permitted. Note the + # directory check will raise if a segment is not readable. + try: + if file_path.is_dir(): + if self._show_index: + return Response( + text=self._directory_as_html(file_path), + content_type="text/html", + ) + else: + raise HTTPForbidden() + except PermissionError as error: + raise HTTPForbidden() from error + + # Return the file response, which handles all other checks. + return FileResponse(file_path, chunk_size=self._chunk_size) + + def _directory_as_html(self, dir_path: Path) -> str: + """returns directory's index as html.""" + assert dir_path.is_dir() + + relative_path_to_dir = dir_path.relative_to(self._directory).as_posix() + index_of = f"Index of /{html_escape(relative_path_to_dir)}" + h1 = f"

{index_of}

" + + index_list = [] + dir_index = dir_path.iterdir() + for _file in sorted(dir_index): + # show file url as relative to static path + rel_path = _file.relative_to(self._directory).as_posix() + quoted_file_url = _quote_path(f"{self._prefix}/{rel_path}") + + # if file is a directory, add '/' to the end of the name + if _file.is_dir(): + file_name = f"{_file.name}/" + else: + file_name = _file.name + + index_list.append( + f'
  • {html_escape(file_name)}
  • ' + ) + ul = "
      \n{}\n
    ".format("\n".join(index_list)) + body = f"\n{h1}\n{ul}\n" + + head_str = f"\n{index_of}\n" + html = f"\n{head_str}\n{body}\n" + + return html + + def __repr__(self) -> str: + name = "'" + self.name + "'" if self.name is not None else "" + return " {directory!r}>".format( + name=name, path=self._prefix, directory=self._directory + ) + + +class PrefixedSubAppResource(PrefixResource): + def __init__(self, prefix: str, app: "Application") -> None: + super().__init__(prefix) + self._app = app + self._add_prefix_to_resources(prefix) + + def add_prefix(self, prefix: str) -> None: + super().add_prefix(prefix) + self._add_prefix_to_resources(prefix) + + def _add_prefix_to_resources(self, prefix: str) -> None: + router = self._app.router + for resource in router.resources(): + # Since the canonical path of a resource is about + # to change, we need to unindex it and then reindex + router.unindex_resource(resource) + resource.add_prefix(prefix) + router.index_resource(resource) + + def url_for(self, *args: str, **kwargs: str) -> URL: + raise RuntimeError(".url_for() is not supported by sub-application root") + + def get_info(self) -> _InfoDict: + return {"app": self._app, "prefix": self._prefix} + + async def resolve(self, request: Request) -> _Resolve: + match_info = await self._app.router.resolve(request) + match_info.add_app(self._app) + if isinstance(match_info.http_exception, HTTPMethodNotAllowed): + methods = match_info.http_exception.allowed_methods + else: + methods = set() + return match_info, methods + + def __len__(self) -> int: + return len(self._app.router.routes()) + + def __iter__(self) -> Iterator[AbstractRoute]: + return iter(self._app.router.routes()) + + def __repr__(self) -> str: + return " {app!r}>".format( + prefix=self._prefix, app=self._app + ) + + +class AbstractRuleMatching(abc.ABC): + @abc.abstractmethod # pragma: no branch + async def match(self, request: Request) -> bool: + """Return bool if the request satisfies the criteria""" + + @abc.abstractmethod # pragma: no branch + def get_info(self) -> _InfoDict: + """Return a dict with additional info useful for introspection""" + + @property + @abc.abstractmethod # pragma: no branch + def canonical(self) -> str: + """Return a str""" + + +class Domain(AbstractRuleMatching): + re_part = re.compile(r"(?!-)[a-z\d-]{1,63}(? None: + super().__init__() + self._domain = self.validation(domain) + + @property + def canonical(self) -> str: + return self._domain + + def validation(self, domain: str) -> str: + if not isinstance(domain, str): + raise TypeError("Domain must be str") + domain = domain.rstrip(".").lower() + if not domain: + raise ValueError("Domain cannot be empty") + elif "://" in domain: + raise ValueError("Scheme not supported") + url = URL("http://" + domain) + assert url.raw_host is not None + if not all(self.re_part.fullmatch(x) for x in url.raw_host.split(".")): + raise ValueError("Domain not valid") + if url.port == 80: + return url.raw_host + return f"{url.raw_host}:{url.port}" + + async def match(self, request: Request) -> bool: + host = request.headers.get(hdrs.HOST) + if not host: + return False + return self.match_domain(host) + + def match_domain(self, host: str) -> bool: + return host.lower() == self._domain + + def get_info(self) -> _InfoDict: + return {"domain": self._domain} + + +class MaskDomain(Domain): + re_part = re.compile(r"(?!-)[a-z\d\*-]{1,63}(? None: + super().__init__(domain) + mask = self._domain.replace(".", r"\.").replace("*", ".*") + self._mask = re.compile(mask) + + @property + def canonical(self) -> str: + return self._mask.pattern + + def match_domain(self, host: str) -> bool: + return self._mask.fullmatch(host) is not None + + +class MatchedSubAppResource(PrefixedSubAppResource): + def __init__(self, rule: AbstractRuleMatching, app: "Application") -> None: + AbstractResource.__init__(self) + self._prefix = "" + self._app = app + self._rule = rule + + @property + def canonical(self) -> str: + return self._rule.canonical + + def get_info(self) -> _InfoDict: + return {"app": self._app, "rule": self._rule} + + async def resolve(self, request: Request) -> _Resolve: + if not await self._rule.match(request): + return None, set() + match_info = await self._app.router.resolve(request) + match_info.add_app(self._app) + if isinstance(match_info.http_exception, HTTPMethodNotAllowed): + methods = match_info.http_exception.allowed_methods + else: + methods = set() + return match_info, methods + + def __repr__(self) -> str: + return f" {self._app!r}>" + + +class ResourceRoute(AbstractRoute): + """A route with resource""" + + def __init__( + self, + method: str, + handler: Union[Handler, Type[AbstractView]], + resource: AbstractResource, + *, + expect_handler: Optional[_ExpectHandler] = None, + ) -> None: + super().__init__( + method, handler, expect_handler=expect_handler, resource=resource + ) + + def __repr__(self) -> str: + return " {handler!r}".format( + method=self.method, resource=self._resource, handler=self.handler + ) + + @property + def name(self) -> Optional[str]: + if self._resource is None: + return None + return self._resource.name + + def url_for(self, *args: str, **kwargs: str) -> URL: + """Construct url for route with additional params.""" + assert self._resource is not None + return self._resource.url_for(*args, **kwargs) + + def get_info(self) -> _InfoDict: + assert self._resource is not None + return self._resource.get_info() + + +class SystemRoute(AbstractRoute): + def __init__(self, http_exception: HTTPException) -> None: + super().__init__(hdrs.METH_ANY, self._handle) + self._http_exception = http_exception + + def url_for(self, *args: str, **kwargs: str) -> URL: + raise RuntimeError(".url_for() is not allowed for SystemRoute") + + @property + def name(self) -> Optional[str]: + return None + + def get_info(self) -> _InfoDict: + return {"http_exception": self._http_exception} + + async def _handle(self, request: Request) -> StreamResponse: + raise self._http_exception + + @property + def status(self) -> int: + return self._http_exception.status + + @property + def reason(self) -> str: + return self._http_exception.reason + + def __repr__(self) -> str: + return "".format(self=self) + + +class View(AbstractView): + async def _iter(self) -> StreamResponse: + if self.request.method not in hdrs.METH_ALL: + self._raise_allowed_methods() + method: Optional[Callable[[], Awaitable[StreamResponse]]] + method = getattr(self, self.request.method.lower(), None) + if method is None: + self._raise_allowed_methods() + ret = await method() + assert isinstance(ret, StreamResponse) + return ret + + def __await__(self) -> Generator[Any, None, StreamResponse]: + return self._iter().__await__() + + def _raise_allowed_methods(self) -> NoReturn: + allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m.lower())} + raise HTTPMethodNotAllowed(self.request.method, allowed_methods) + + +class ResourcesView(Sized, Iterable[AbstractResource], Container[AbstractResource]): + def __init__(self, resources: List[AbstractResource]) -> None: + self._resources = resources + + def __len__(self) -> int: + return len(self._resources) + + def __iter__(self) -> Iterator[AbstractResource]: + yield from self._resources + + def __contains__(self, resource: object) -> bool: + return resource in self._resources + + +class RoutesView(Sized, Iterable[AbstractRoute], Container[AbstractRoute]): + def __init__(self, resources: List[AbstractResource]): + self._routes: List[AbstractRoute] = [] + for resource in resources: + for route in resource: + self._routes.append(route) + + def __len__(self) -> int: + return len(self._routes) + + def __iter__(self) -> Iterator[AbstractRoute]: + yield from self._routes + + def __contains__(self, route: object) -> bool: + return route in self._routes + + +class UrlDispatcher(AbstractRouter, Mapping[str, AbstractResource]): + + NAME_SPLIT_RE = re.compile(r"[.:-]") + + def __init__(self) -> None: + super().__init__() + self._resources: List[AbstractResource] = [] + self._named_resources: Dict[str, AbstractResource] = {} + self._resource_index: dict[str, list[AbstractResource]] = {} + self._matched_sub_app_resources: List[MatchedSubAppResource] = [] + + async def resolve(self, request: Request) -> UrlMappingMatchInfo: + resource_index = self._resource_index + allowed_methods: Set[str] = set() + + # Walk the url parts looking for candidates. We walk the url backwards + # to ensure the most explicit match is found first. If there are multiple + # candidates for a given url part because there are multiple resources + # registered for the same canonical path, we resolve them in a linear + # fashion to ensure registration order is respected. + url_part = request.rel_url.path_safe + while url_part: + for candidate in resource_index.get(url_part, ()): + match_dict, allowed = await candidate.resolve(request) + if match_dict is not None: + return match_dict + else: + allowed_methods |= allowed + if url_part == "/": + break + url_part = url_part.rpartition("/")[0] or "/" + + # + # We didn't find any candidates, so we'll try the matched sub-app + # resources which we have to walk in a linear fashion because they + # have regex/wildcard match rules and we cannot index them. + # + # For most cases we do not expect there to be many of these since + # currently they are only added by `add_domain` + # + for resource in self._matched_sub_app_resources: + match_dict, allowed = await resource.resolve(request) + if match_dict is not None: + return match_dict + else: + allowed_methods |= allowed + + if allowed_methods: + return MatchInfoError(HTTPMethodNotAllowed(request.method, allowed_methods)) + + return MatchInfoError(HTTPNotFound()) + + def __iter__(self) -> Iterator[str]: + return iter(self._named_resources) + + def __len__(self) -> int: + return len(self._named_resources) + + def __contains__(self, resource: object) -> bool: + return resource in self._named_resources + + def __getitem__(self, name: str) -> AbstractResource: + return self._named_resources[name] + + def resources(self) -> ResourcesView: + return ResourcesView(self._resources) + + def routes(self) -> RoutesView: + return RoutesView(self._resources) + + def named_resources(self) -> Mapping[str, AbstractResource]: + return MappingProxyType(self._named_resources) + + def register_resource(self, resource: AbstractResource) -> None: + assert isinstance( + resource, AbstractResource + ), f"Instance of AbstractResource class is required, got {resource!r}" + if self.frozen: + raise RuntimeError("Cannot register a resource into frozen router.") + + name = resource.name + + if name is not None: + parts = self.NAME_SPLIT_RE.split(name) + for part in parts: + if keyword.iskeyword(part): + raise ValueError( + f"Incorrect route name {name!r}, " + "python keywords cannot be used " + "for route name" + ) + if not part.isidentifier(): + raise ValueError( + "Incorrect route name {!r}, " + "the name should be a sequence of " + "python identifiers separated " + "by dash, dot or column".format(name) + ) + if name in self._named_resources: + raise ValueError( + "Duplicate {!r}, " + "already handled by {!r}".format(name, self._named_resources[name]) + ) + self._named_resources[name] = resource + self._resources.append(resource) + + if isinstance(resource, MatchedSubAppResource): + # We cannot index match sub-app resources because they have match rules + self._matched_sub_app_resources.append(resource) + else: + self.index_resource(resource) + + def _get_resource_index_key(self, resource: AbstractResource) -> str: + """Return a key to index the resource in the resource index.""" + if "{" in (index_key := resource.canonical): + # strip at the first { to allow for variables, and than + # rpartition at / to allow for variable parts in the path + # For example if the canonical path is `/core/locations{tail:.*}` + # the index key will be `/core` since index is based on the + # url parts split by `/` + index_key = index_key.partition("{")[0].rpartition("/")[0] + return index_key.rstrip("/") or "/" + + def index_resource(self, resource: AbstractResource) -> None: + """Add a resource to the resource index.""" + resource_key = self._get_resource_index_key(resource) + # There may be multiple resources for a canonical path + # so we keep them in a list to ensure that registration + # order is respected. + self._resource_index.setdefault(resource_key, []).append(resource) + + def unindex_resource(self, resource: AbstractResource) -> None: + """Remove a resource from the resource index.""" + resource_key = self._get_resource_index_key(resource) + self._resource_index[resource_key].remove(resource) + + def add_resource(self, path: str, *, name: Optional[str] = None) -> Resource: + if path and not path.startswith("/"): + raise ValueError("path should be started with / or be empty") + # Reuse last added resource if path and name are the same + if self._resources: + resource = self._resources[-1] + if resource.name == name and resource.raw_match(path): + return cast(Resource, resource) + if not ("{" in path or "}" in path or ROUTE_RE.search(path)): + resource = PlainResource(path, name=name) + self.register_resource(resource) + return resource + resource = DynamicResource(path, name=name) + self.register_resource(resource) + return resource + + def add_route( + self, + method: str, + path: str, + handler: Union[Handler, Type[AbstractView]], + *, + name: Optional[str] = None, + expect_handler: Optional[_ExpectHandler] = None, + ) -> AbstractRoute: + resource = self.add_resource(path, name=name) + return resource.add_route(method, handler, expect_handler=expect_handler) + + def add_static( + self, + prefix: str, + path: PathLike, + *, + name: Optional[str] = None, + expect_handler: Optional[_ExpectHandler] = None, + chunk_size: int = 256 * 1024, + show_index: bool = False, + follow_symlinks: bool = False, + append_version: bool = False, + ) -> AbstractResource: + """Add static files view. + + prefix - url prefix + path - folder with files + + """ + assert prefix.startswith("/") + if prefix.endswith("/"): + prefix = prefix[:-1] + resource = StaticResource( + prefix, + path, + name=name, + expect_handler=expect_handler, + chunk_size=chunk_size, + show_index=show_index, + follow_symlinks=follow_symlinks, + append_version=append_version, + ) + self.register_resource(resource) + return resource + + def add_head(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method HEAD.""" + return self.add_route(hdrs.METH_HEAD, path, handler, **kwargs) + + def add_options(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method OPTIONS.""" + return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs) + + def add_get( + self, + path: str, + handler: Handler, + *, + name: Optional[str] = None, + allow_head: bool = True, + **kwargs: Any, + ) -> AbstractRoute: + """Shortcut for add_route with method GET. + + If allow_head is true, another + route is added allowing head requests to the same endpoint. + """ + resource = self.add_resource(path, name=name) + if allow_head: + resource.add_route(hdrs.METH_HEAD, handler, **kwargs) + return resource.add_route(hdrs.METH_GET, handler, **kwargs) + + def add_post(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method POST.""" + return self.add_route(hdrs.METH_POST, path, handler, **kwargs) + + def add_put(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method PUT.""" + return self.add_route(hdrs.METH_PUT, path, handler, **kwargs) + + def add_patch(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method PATCH.""" + return self.add_route(hdrs.METH_PATCH, path, handler, **kwargs) + + def add_delete(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method DELETE.""" + return self.add_route(hdrs.METH_DELETE, path, handler, **kwargs) + + def add_view( + self, path: str, handler: Type[AbstractView], **kwargs: Any + ) -> AbstractRoute: + """Shortcut for add_route with ANY methods for a class-based view.""" + return self.add_route(hdrs.METH_ANY, path, handler, **kwargs) + + def freeze(self) -> None: + super().freeze() + for resource in self._resources: + resource.freeze() + + def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]: + """Append routes to route table. + + Parameter should be a sequence of RouteDef objects. + + Returns a list of registered AbstractRoute instances. + """ + registered_routes = [] + for route_def in routes: + registered_routes.extend(route_def.register(self)) + return registered_routes + + +def _quote_path(value: str) -> str: + if YARL_VERSION < (1, 6): + value = value.replace("%", "%25") + return URL.build(path=value, encoded=False).raw_path + + +def _unquote_path_safe(value: str) -> str: + if "%" not in value: + return value + return value.replace("%2F", "/").replace("%25", "%") + + +def _requote_path(value: str) -> str: + # Quote non-ascii characters and other characters which must be quoted, + # but preserve existing %-sequences. + result = _quote_path(value) + if "%" in value: + result = result.replace("%25", "%") + return result diff --git a/venv/lib/python3.10/site-packages/aiohttp/web_ws.py b/venv/lib/python3.10/site-packages/aiohttp/web_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..575f9a3dc8507d1e6b766333c9daec389313febd --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/web_ws.py @@ -0,0 +1,631 @@ +import asyncio +import base64 +import binascii +import hashlib +import json +import sys +from typing import Any, Final, Iterable, Optional, Tuple, Union, cast + +import attr +from multidict import CIMultiDict + +from . import hdrs +from ._websocket.reader import WebSocketDataQueue +from ._websocket.writer import DEFAULT_LIMIT +from .abc import AbstractStreamWriter +from .client_exceptions import WSMessageTypeError +from .helpers import calculate_timeout_when, set_exception, set_result +from .http import ( + WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE, + WS_KEY, + WebSocketError, + WebSocketReader, + WebSocketWriter, + WSCloseCode, + WSMessage, + WSMsgType as WSMsgType, + ws_ext_gen, + ws_ext_parse, +) +from .http_websocket import _INTERNAL_RECEIVE_TYPES +from .log import ws_logger +from .streams import EofStream +from .typedefs import JSONDecoder, JSONEncoder +from .web_exceptions import HTTPBadRequest, HTTPException +from .web_request import BaseRequest +from .web_response import StreamResponse + +if sys.version_info >= (3, 11): + import asyncio as async_timeout +else: + import async_timeout + +__all__ = ( + "WebSocketResponse", + "WebSocketReady", + "WSMsgType", +) + +THRESHOLD_CONNLOST_ACCESS: Final[int] = 5 + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class WebSocketReady: + ok: bool + protocol: Optional[str] + + def __bool__(self) -> bool: + return self.ok + + +class WebSocketResponse(StreamResponse): + + _length_check: bool = False + _ws_protocol: Optional[str] = None + _writer: Optional[WebSocketWriter] = None + _reader: Optional[WebSocketDataQueue] = None + _closed: bool = False + _closing: bool = False + _conn_lost: int = 0 + _close_code: Optional[int] = None + _loop: Optional[asyncio.AbstractEventLoop] = None + _waiting: bool = False + _close_wait: Optional[asyncio.Future[None]] = None + _exception: Optional[BaseException] = None + _heartbeat_when: float = 0.0 + _heartbeat_cb: Optional[asyncio.TimerHandle] = None + _pong_response_cb: Optional[asyncio.TimerHandle] = None + _ping_task: Optional[asyncio.Task[None]] = None + + def __init__( + self, + *, + timeout: float = 10.0, + receive_timeout: Optional[float] = None, + autoclose: bool = True, + autoping: bool = True, + heartbeat: Optional[float] = None, + protocols: Iterable[str] = (), + compress: bool = True, + max_msg_size: int = 4 * 1024 * 1024, + writer_limit: int = DEFAULT_LIMIT, + ) -> None: + super().__init__(status=101) + self._protocols = protocols + self._timeout = timeout + self._receive_timeout = receive_timeout + self._autoclose = autoclose + self._autoping = autoping + self._heartbeat = heartbeat + if heartbeat is not None: + self._pong_heartbeat = heartbeat / 2.0 + self._compress: Union[bool, int] = compress + self._max_msg_size = max_msg_size + self._writer_limit = writer_limit + + def _cancel_heartbeat(self) -> None: + self._cancel_pong_response_cb() + if self._heartbeat_cb is not None: + self._heartbeat_cb.cancel() + self._heartbeat_cb = None + if self._ping_task is not None: + self._ping_task.cancel() + self._ping_task = None + + def _cancel_pong_response_cb(self) -> None: + if self._pong_response_cb is not None: + self._pong_response_cb.cancel() + self._pong_response_cb = None + + def _reset_heartbeat(self) -> None: + if self._heartbeat is None: + return + self._cancel_pong_response_cb() + req = self._req + timeout_ceil_threshold = ( + req._protocol._timeout_ceil_threshold if req is not None else 5 + ) + loop = self._loop + assert loop is not None + now = loop.time() + when = calculate_timeout_when(now, self._heartbeat, timeout_ceil_threshold) + self._heartbeat_when = when + if self._heartbeat_cb is None: + # We do not cancel the previous heartbeat_cb here because + # it generates a significant amount of TimerHandle churn + # which causes asyncio to rebuild the heap frequently. + # Instead _send_heartbeat() will reschedule the next + # heartbeat if it fires too early. + self._heartbeat_cb = loop.call_at(when, self._send_heartbeat) + + def _send_heartbeat(self) -> None: + self._heartbeat_cb = None + loop = self._loop + assert loop is not None and self._writer is not None + now = loop.time() + if now < self._heartbeat_when: + # Heartbeat fired too early, reschedule + self._heartbeat_cb = loop.call_at( + self._heartbeat_when, self._send_heartbeat + ) + return + + req = self._req + timeout_ceil_threshold = ( + req._protocol._timeout_ceil_threshold if req is not None else 5 + ) + when = calculate_timeout_when(now, self._pong_heartbeat, timeout_ceil_threshold) + self._cancel_pong_response_cb() + self._pong_response_cb = loop.call_at(when, self._pong_not_received) + + coro = self._writer.send_frame(b"", WSMsgType.PING) + if sys.version_info >= (3, 12): + # Optimization for Python 3.12, try to send the ping + # immediately to avoid having to schedule + # the task on the event loop. + ping_task = asyncio.Task(coro, loop=loop, eager_start=True) + else: + ping_task = loop.create_task(coro) + + if not ping_task.done(): + self._ping_task = ping_task + ping_task.add_done_callback(self._ping_task_done) + else: + self._ping_task_done(ping_task) + + def _ping_task_done(self, task: "asyncio.Task[None]") -> None: + """Callback for when the ping task completes.""" + if not task.cancelled() and (exc := task.exception()): + self._handle_ping_pong_exception(exc) + self._ping_task = None + + def _pong_not_received(self) -> None: + if self._req is not None and self._req.transport is not None: + self._handle_ping_pong_exception( + asyncio.TimeoutError( + f"No PONG received after {self._pong_heartbeat} seconds" + ) + ) + + def _handle_ping_pong_exception(self, exc: BaseException) -> None: + """Handle exceptions raised during ping/pong processing.""" + if self._closed: + return + self._set_closed() + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + self._exception = exc + if self._waiting and not self._closing and self._reader is not None: + self._reader.feed_data(WSMessage(WSMsgType.ERROR, exc, None), 0) + + def _set_closed(self) -> None: + """Set the connection to closed. + + Cancel any heartbeat timers and set the closed flag. + """ + self._closed = True + self._cancel_heartbeat() + + async def prepare(self, request: BaseRequest) -> AbstractStreamWriter: + # make pre-check to don't hide it by do_handshake() exceptions + if self._payload_writer is not None: + return self._payload_writer + + protocol, writer = self._pre_start(request) + payload_writer = await super().prepare(request) + assert payload_writer is not None + self._post_start(request, protocol, writer) + await payload_writer.drain() + return payload_writer + + def _handshake( + self, request: BaseRequest + ) -> Tuple["CIMultiDict[str]", Optional[str], int, bool]: + headers = request.headers + if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip(): + raise HTTPBadRequest( + text=( + "No WebSocket UPGRADE hdr: {}\n Can " + '"Upgrade" only to "WebSocket".' + ).format(headers.get(hdrs.UPGRADE)) + ) + + if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower(): + raise HTTPBadRequest( + text="No CONNECTION upgrade hdr: {}".format( + headers.get(hdrs.CONNECTION) + ) + ) + + # find common sub-protocol between client and server + protocol: Optional[str] = None + if hdrs.SEC_WEBSOCKET_PROTOCOL in headers: + req_protocols = [ + str(proto.strip()) + for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") + ] + + for proto in req_protocols: + if proto in self._protocols: + protocol = proto + break + else: + # No overlap found: Return no protocol as per spec + ws_logger.warning( + "%s: Client protocols %r don’t overlap server-known ones %r", + request.remote, + req_protocols, + self._protocols, + ) + + # check supported version + version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "") + if version not in ("13", "8", "7"): + raise HTTPBadRequest(text=f"Unsupported version: {version}") + + # check client handshake for validity + key = headers.get(hdrs.SEC_WEBSOCKET_KEY) + try: + if not key or len(base64.b64decode(key)) != 16: + raise HTTPBadRequest(text=f"Handshake error: {key!r}") + except binascii.Error: + raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None + + accept_val = base64.b64encode( + hashlib.sha1(key.encode() + WS_KEY).digest() + ).decode() + response_headers = CIMultiDict( + { + hdrs.UPGRADE: "websocket", + hdrs.CONNECTION: "upgrade", + hdrs.SEC_WEBSOCKET_ACCEPT: accept_val, + } + ) + + notakeover = False + compress = 0 + if self._compress: + extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS) + # Server side always get return with no exception. + # If something happened, just drop compress extension + compress, notakeover = ws_ext_parse(extensions, isserver=True) + if compress: + enabledext = ws_ext_gen( + compress=compress, isserver=True, server_notakeover=notakeover + ) + response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext + + if protocol: + response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol + return ( + response_headers, + protocol, + compress, + notakeover, + ) + + def _pre_start(self, request: BaseRequest) -> Tuple[Optional[str], WebSocketWriter]: + self._loop = request._loop + + headers, protocol, compress, notakeover = self._handshake(request) + + self.set_status(101) + self.headers.update(headers) + self.force_close() + self._compress = compress + transport = request._protocol.transport + assert transport is not None + writer = WebSocketWriter( + request._protocol, + transport, + compress=compress, + notakeover=notakeover, + limit=self._writer_limit, + ) + + return protocol, writer + + def _post_start( + self, request: BaseRequest, protocol: Optional[str], writer: WebSocketWriter + ) -> None: + self._ws_protocol = protocol + self._writer = writer + + self._reset_heartbeat() + + loop = self._loop + assert loop is not None + self._reader = WebSocketDataQueue(request._protocol, 2**16, loop=loop) + request.protocol.set_parser( + WebSocketReader( + self._reader, self._max_msg_size, compress=bool(self._compress) + ) + ) + # disable HTTP keepalive for WebSocket + request.protocol.keep_alive(False) + + def can_prepare(self, request: BaseRequest) -> WebSocketReady: + if self._writer is not None: + raise RuntimeError("Already started") + try: + _, protocol, _, _ = self._handshake(request) + except HTTPException: + return WebSocketReady(False, None) + else: + return WebSocketReady(True, protocol) + + @property + def prepared(self) -> bool: + return self._writer is not None + + @property + def closed(self) -> bool: + return self._closed + + @property + def close_code(self) -> Optional[int]: + return self._close_code + + @property + def ws_protocol(self) -> Optional[str]: + return self._ws_protocol + + @property + def compress(self) -> Union[int, bool]: + return self._compress + + def get_extra_info(self, name: str, default: Any = None) -> Any: + """Get optional transport information. + + If no value associated with ``name`` is found, ``default`` is returned. + """ + writer = self._writer + if writer is None: + return default + transport = writer.transport + if transport is None: + return default + return transport.get_extra_info(name, default) + + def exception(self) -> Optional[BaseException]: + return self._exception + + async def ping(self, message: bytes = b"") -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.send_frame(message, WSMsgType.PING) + + async def pong(self, message: bytes = b"") -> None: + # unsolicited pong + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.send_frame(message, WSMsgType.PONG) + + async def send_frame( + self, message: bytes, opcode: WSMsgType, compress: Optional[int] = None + ) -> None: + """Send a frame over the websocket.""" + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.send_frame(message, opcode, compress) + + async def send_str(self, data: str, compress: Optional[int] = None) -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + if not isinstance(data, str): + raise TypeError("data argument must be str (%r)" % type(data)) + await self._writer.send_frame( + data.encode("utf-8"), WSMsgType.TEXT, compress=compress + ) + + async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError("data argument must be byte-ish (%r)" % type(data)) + await self._writer.send_frame(data, WSMsgType.BINARY, compress=compress) + + async def send_json( + self, + data: Any, + compress: Optional[int] = None, + *, + dumps: JSONEncoder = json.dumps, + ) -> None: + await self.send_str(dumps(data), compress=compress) + + async def write_eof(self) -> None: # type: ignore[override] + if self._eof_sent: + return + if self._payload_writer is None: + raise RuntimeError("Response has not been started") + + await self.close() + self._eof_sent = True + + async def close( + self, *, code: int = WSCloseCode.OK, message: bytes = b"", drain: bool = True + ) -> bool: + """Close websocket connection.""" + if self._writer is None: + raise RuntimeError("Call .prepare() first") + + if self._closed: + return False + self._set_closed() + + try: + await self._writer.close(code, message) + writer = self._payload_writer + assert writer is not None + if drain: + await writer.drain() + except (asyncio.CancelledError, asyncio.TimeoutError): + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + raise + except Exception as exc: + self._exception = exc + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + return True + + reader = self._reader + assert reader is not None + # we need to break `receive()` cycle before we can call + # `reader.read()` as `close()` may be called from different task + if self._waiting: + assert self._loop is not None + assert self._close_wait is None + self._close_wait = self._loop.create_future() + reader.feed_data(WS_CLOSING_MESSAGE, 0) + await self._close_wait + + if self._closing: + self._close_transport() + return True + + try: + async with async_timeout.timeout(self._timeout): + while True: + msg = await reader.read() + if msg.type is WSMsgType.CLOSE: + self._set_code_close_transport(msg.data) + return True + except asyncio.CancelledError: + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + raise + except Exception as exc: + self._exception = exc + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + return True + + def _set_closing(self, code: WSCloseCode) -> None: + """Set the close code and mark the connection as closing.""" + self._closing = True + self._close_code = code + self._cancel_heartbeat() + + def _set_code_close_transport(self, code: WSCloseCode) -> None: + """Set the close code and close the transport.""" + self._close_code = code + self._close_transport() + + def _close_transport(self) -> None: + """Close the transport.""" + if self._req is not None and self._req.transport is not None: + self._req.transport.close() + + async def receive(self, timeout: Optional[float] = None) -> WSMessage: + if self._reader is None: + raise RuntimeError("Call .prepare() first") + + receive_timeout = timeout or self._receive_timeout + while True: + if self._waiting: + raise RuntimeError("Concurrent call to receive() is not allowed") + + if self._closed: + self._conn_lost += 1 + if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS: + raise RuntimeError("WebSocket connection is closed.") + return WS_CLOSED_MESSAGE + elif self._closing: + return WS_CLOSING_MESSAGE + + try: + self._waiting = True + try: + if receive_timeout: + # Entering the context manager and creating + # Timeout() object can take almost 50% of the + # run time in this loop so we avoid it if + # there is no read timeout. + async with async_timeout.timeout(receive_timeout): + msg = await self._reader.read() + else: + msg = await self._reader.read() + self._reset_heartbeat() + finally: + self._waiting = False + if self._close_wait: + set_result(self._close_wait, None) + except asyncio.TimeoutError: + raise + except EofStream: + self._close_code = WSCloseCode.OK + await self.close() + return WSMessage(WSMsgType.CLOSED, None, None) + except WebSocketError as exc: + self._close_code = exc.code + await self.close(code=exc.code) + return WSMessage(WSMsgType.ERROR, exc, None) + except Exception as exc: + self._exception = exc + self._set_closing(WSCloseCode.ABNORMAL_CLOSURE) + await self.close() + return WSMessage(WSMsgType.ERROR, exc, None) + + if msg.type not in _INTERNAL_RECEIVE_TYPES: + # If its not a close/closing/ping/pong message + # we can return it immediately + return msg + + if msg.type is WSMsgType.CLOSE: + self._set_closing(msg.data) + # Could be closed while awaiting reader. + if not self._closed and self._autoclose: + # The client is likely going to close the + # connection out from under us so we do not + # want to drain any pending writes as it will + # likely result writing to a broken pipe. + await self.close(drain=False) + elif msg.type is WSMsgType.CLOSING: + self._set_closing(WSCloseCode.OK) + elif msg.type is WSMsgType.PING and self._autoping: + await self.pong(msg.data) + continue + elif msg.type is WSMsgType.PONG and self._autoping: + continue + + return msg + + async def receive_str(self, *, timeout: Optional[float] = None) -> str: + msg = await self.receive(timeout) + if msg.type is not WSMsgType.TEXT: + raise WSMessageTypeError( + f"Received message {msg.type}:{msg.data!r} is not WSMsgType.TEXT" + ) + return cast(str, msg.data) + + async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: + msg = await self.receive(timeout) + if msg.type is not WSMsgType.BINARY: + raise WSMessageTypeError( + f"Received message {msg.type}:{msg.data!r} is not WSMsgType.BINARY" + ) + return cast(bytes, msg.data) + + async def receive_json( + self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None + ) -> Any: + data = await self.receive_str(timeout=timeout) + return loads(data) + + async def write(self, data: bytes) -> None: + raise RuntimeError("Cannot call .write() for websocket") + + def __aiter__(self) -> "WebSocketResponse": + return self + + async def __anext__(self) -> WSMessage: + msg = await self.receive() + if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): + raise StopAsyncIteration + return msg + + def _cancel(self, exc: BaseException) -> None: + # web_protocol calls this from connection_lost + # or when the server is shutting down. + self._closing = True + self._cancel_heartbeat() + if self._reader is not None: + set_exception(self._reader, exc) diff --git a/venv/lib/python3.10/site-packages/aiohttp/worker.py b/venv/lib/python3.10/site-packages/aiohttp/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..f7281bfde7541412c3174aa5fdcb859fa1b7a996 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/worker.py @@ -0,0 +1,255 @@ +"""Async gunicorn worker for aiohttp.web""" + +import asyncio +import inspect +import os +import re +import signal +import sys +from types import FrameType +from typing import TYPE_CHECKING, Any, Optional + +from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat +from gunicorn.workers import base + +from aiohttp import web + +from .helpers import set_result +from .web_app import Application +from .web_log import AccessLogger + +if TYPE_CHECKING: + import ssl + + SSLContext = ssl.SSLContext +else: + try: + import ssl + + SSLContext = ssl.SSLContext + except ImportError: # pragma: no cover + ssl = None # type: ignore[assignment] + SSLContext = object # type: ignore[misc,assignment] + + +__all__ = ("GunicornWebWorker", "GunicornUVLoopWebWorker") + + +class GunicornWebWorker(base.Worker): # type: ignore[misc,no-any-unimported] + + DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT + DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default + + def __init__(self, *args: Any, **kw: Any) -> None: # pragma: no cover + super().__init__(*args, **kw) + + self._task: Optional[asyncio.Task[None]] = None + self.exit_code = 0 + self._notify_waiter: Optional[asyncio.Future[bool]] = None + + def init_process(self) -> None: + # create new event_loop after fork + asyncio.get_event_loop().close() + + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + super().init_process() + + def run(self) -> None: + self._task = self.loop.create_task(self._run()) + + try: # ignore all finalization problems + self.loop.run_until_complete(self._task) + except Exception: + self.log.exception("Exception in gunicorn worker") + self.loop.run_until_complete(self.loop.shutdown_asyncgens()) + self.loop.close() + + sys.exit(self.exit_code) + + async def _run(self) -> None: + runner = None + if isinstance(self.wsgi, Application): + app = self.wsgi + elif inspect.iscoroutinefunction(self.wsgi) or ( + sys.version_info < (3, 14) and asyncio.iscoroutinefunction(self.wsgi) + ): + wsgi = await self.wsgi() + if isinstance(wsgi, web.AppRunner): + runner = wsgi + app = runner.app + else: + app = wsgi + else: + raise RuntimeError( + "wsgi app should be either Application or " + "async function returning Application, got {}".format(self.wsgi) + ) + + if runner is None: + access_log = self.log.access_log if self.cfg.accesslog else None + runner = web.AppRunner( + app, + logger=self.log, + keepalive_timeout=self.cfg.keepalive, + access_log=access_log, + access_log_format=self._get_valid_log_format( + self.cfg.access_log_format + ), + shutdown_timeout=self.cfg.graceful_timeout / 100 * 95, + ) + await runner.setup() + + ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None + + runner = runner + assert runner is not None + server = runner.server + assert server is not None + for sock in self.sockets: + site = web.SockSite( + runner, + sock, + ssl_context=ctx, + ) + await site.start() + + # If our parent changed then we shut down. + pid = os.getpid() + try: + while self.alive: # type: ignore[has-type] + self.notify() + + cnt = server.requests_count + if self.max_requests and cnt > self.max_requests: + self.alive = False + self.log.info("Max requests, shutting down: %s", self) + + elif pid == os.getpid() and self.ppid != os.getppid(): + self.alive = False + self.log.info("Parent changed, shutting down: %s", self) + else: + await self._wait_next_notify() + except BaseException: + pass + + await runner.cleanup() + + def _wait_next_notify(self) -> "asyncio.Future[bool]": + self._notify_waiter_done() + + loop = self.loop + assert loop is not None + self._notify_waiter = waiter = loop.create_future() + self.loop.call_later(1.0, self._notify_waiter_done, waiter) + + return waiter + + def _notify_waiter_done( + self, waiter: Optional["asyncio.Future[bool]"] = None + ) -> None: + if waiter is None: + waiter = self._notify_waiter + if waiter is not None: + set_result(waiter, True) + + if waiter is self._notify_waiter: + self._notify_waiter = None + + def init_signals(self) -> None: + # Set up signals through the event loop API. + + self.loop.add_signal_handler( + signal.SIGQUIT, self.handle_quit, signal.SIGQUIT, None + ) + + self.loop.add_signal_handler( + signal.SIGTERM, self.handle_exit, signal.SIGTERM, None + ) + + self.loop.add_signal_handler( + signal.SIGINT, self.handle_quit, signal.SIGINT, None + ) + + self.loop.add_signal_handler( + signal.SIGWINCH, self.handle_winch, signal.SIGWINCH, None + ) + + self.loop.add_signal_handler( + signal.SIGUSR1, self.handle_usr1, signal.SIGUSR1, None + ) + + self.loop.add_signal_handler( + signal.SIGABRT, self.handle_abort, signal.SIGABRT, None + ) + + # Don't let SIGTERM and SIGUSR1 disturb active requests + # by interrupting system calls + signal.siginterrupt(signal.SIGTERM, False) + signal.siginterrupt(signal.SIGUSR1, False) + # Reset signals so Gunicorn doesn't swallow subprocess return codes + # See: https://github.com/aio-libs/aiohttp/issues/6130 + + def handle_quit(self, sig: int, frame: Optional[FrameType]) -> None: + self.alive = False + + # worker_int callback + self.cfg.worker_int(self) + + # wakeup closing process + self._notify_waiter_done() + + def handle_abort(self, sig: int, frame: Optional[FrameType]) -> None: + self.alive = False + self.exit_code = 1 + self.cfg.worker_abort(self) + sys.exit(1) + + @staticmethod + def _create_ssl_context(cfg: Any) -> "SSLContext": + """Creates SSLContext instance for usage in asyncio.create_server. + + See ssl.SSLSocket.__init__ for more details. + """ + if ssl is None: # pragma: no cover + raise RuntimeError("SSL is not supported.") + + ctx = ssl.SSLContext(cfg.ssl_version) + ctx.load_cert_chain(cfg.certfile, cfg.keyfile) + ctx.verify_mode = cfg.cert_reqs + if cfg.ca_certs: + ctx.load_verify_locations(cfg.ca_certs) + if cfg.ciphers: + ctx.set_ciphers(cfg.ciphers) + return ctx + + def _get_valid_log_format(self, source_format: str) -> str: + if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT: + return self.DEFAULT_AIOHTTP_LOG_FORMAT + elif re.search(r"%\([^\)]+\)", source_format): + raise ValueError( + "Gunicorn's style options in form of `%(name)s` are not " + "supported for the log formatting. Please use aiohttp's " + "format specification to configure access log formatting: " + "http://docs.aiohttp.org/en/stable/logging.html" + "#format-specification" + ) + else: + return source_format + + +class GunicornUVLoopWebWorker(GunicornWebWorker): + def init_process(self) -> None: + import uvloop + + # Close any existing event loop before setting a + # new policy. + asyncio.get_event_loop().close() + + # Setup uvloop policy, so that every + # asyncio.get_event_loop() will create an instance + # of uvloop event loop. + asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) + + super().init_process() diff --git a/venv/lib/python3.10/site-packages/dataproperty/__init__.py b/venv/lib/python3.10/site-packages/dataproperty/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..beb08988c3b9ad602ba5ac7a361514252ee2dbef --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/__init__.py @@ -0,0 +1,45 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._align import Align +from ._align_getter import align_getter +from ._column import ColumnDataProperty +from ._common import MAX_STRICT_LEVEL_MAP, MIN_STRICT_LEVEL_MAP, NOT_QUOTING_FLAGS, DefaultValue +from ._container import MinMaxContainer +from ._dataproperty import DataProperty +from ._extractor import DataPropertyExtractor, DataPropertyMatrix, MatrixFormatting +from ._formatter import Format +from ._function import calc_ascii_char_width, get_integer_digit, get_number_of_digit +from ._line_break import LineBreakHandling +from ._preprocessor import Preprocessor +from .logger import set_logger + + +__all__ = ( + "Align", + "align_getter", + "ColumnDataProperty", + "DataProperty", + "DataPropertyExtractor", + "DataPropertyMatrix", + "Format", + "LineBreakHandling", + "MatrixFormatting", + "MinMaxContainer", + "Preprocessor", + "calc_ascii_char_width", + "get_integer_digit", + "get_number_of_digit", + "MAX_STRICT_LEVEL_MAP", + "MIN_STRICT_LEVEL_MAP", + "NOT_QUOTING_FLAGS", + "DefaultValue", + "set_logger", + "__author__", + "__copyright__", + "__email__", + "__license__", + "__version__", +) diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1070641909c2167b9665fd282ad299a30c491fa5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__version__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__version__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94a35952502e1d0f36911fcfa1d8875da313432b Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__version__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9e3f5de86a22e94f4affb726f045af7c1fd4b62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align_getter.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align_getter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3f9f0991c566db89057dea8d24e4a620344b053 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align_getter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8d86e4119dd715b87c3221ea07127c552c759e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_column.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_column.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..006343db666ff7c13347ed404c968690c678e2bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_column.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae989b7e6183319142ce3ad2357c20da720a803c Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_container.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fb9b6059e204fa13418228651baea8cbcfbedff Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_converter.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab5811ee4a5c8c2e4f2b38a04ce154352d339b80 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_converter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_dataproperty.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_dataproperty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4faa28723b068de26330a8d4e41c24f79966994 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_dataproperty.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_extractor.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_extractor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48d848b8930b5f817666ffff9a0f347ddc78293c Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_extractor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_formatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab26793c0967c4c6cb317ccb4ba863497c3dc1f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_formatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4ea11027e60660e96931962297f049ead07a8f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_interface.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d69daa4591d1d65fdb7dacb0d2c6ade17f526f3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_interface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_line_break.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_line_break.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bb12e1cc6580f6a9213ebeadbfc7e2b652193a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_line_break.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_preprocessor.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_preprocessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2aa00a256090cb8acf9819b8f5c5603a264d4255 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_preprocessor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/typing.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a86e9fe77457c22eb8e29894a5ec51bac6d1bc0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/typing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__version__.py b/venv/lib/python3.10/site-packages/dataproperty/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..6b02049e0e9ae8a2e497d783aa86167c4dc34f49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/__version__.py @@ -0,0 +1,9 @@ +from typing import Final + + +__author__: Final = "Tsuyoshi Hombashi" +__copyright__: Final = f"Copyright 2016-2024, {__author__}" +__license__: Final = "MIT License" +__version__ = "1.1.0" +__maintainer__: Final = __author__ +__email__: Final = "tsuyoshi.hombashi@gmail.com" diff --git a/venv/lib/python3.10/site-packages/dataproperty/_align.py b/venv/lib/python3.10/site-packages/dataproperty/_align.py new file mode 100644 index 0000000000000000000000000000000000000000..7d8a318349fc4cdacf4397fcabc62b5fe3e2552a --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_align.py @@ -0,0 +1,25 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import enum + + +@enum.unique +class Align(enum.Enum): + AUTO = (1 << 0, "auto") + LEFT = (1 << 1, "left") + RIGHT = (1 << 2, "right") + CENTER = (1 << 3, "center") + + @property + def align_code(self) -> int: + return self.__align_code + + @property + def align_string(self) -> str: + return self.__align_string + + def __init__(self, code: int, string: str) -> None: + self.__align_code = code + self.__align_string = string diff --git a/venv/lib/python3.10/site-packages/dataproperty/_align_getter.py b/venv/lib/python3.10/site-packages/dataproperty/_align_getter.py new file mode 100644 index 0000000000000000000000000000000000000000..3550e7681cb74b699e0856bee5bad141ba288ca2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_align_getter.py @@ -0,0 +1,33 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from typing import Dict + +from typepy import Typecode + +from ._align import Align + + +class AlignGetter: + @property + def typecode_align_table(self): + raise NotImplementedError() + + @typecode_align_table.setter + def typecode_align_table(self, x: Dict[Typecode, Align]) -> None: + self.__typecode_align_table = x + + def get_align_from_typecode(self, typecode: Typecode) -> Align: + return self.__typecode_align_table.get(typecode, self.default_align) + + def __init__(self) -> None: + self.typecode_align_table = { + Typecode.STRING: Align.LEFT, + Typecode.INTEGER: Align.RIGHT, + Typecode.REAL_NUMBER: Align.RIGHT, + } + self.default_align = Align.LEFT + + +align_getter = AlignGetter() diff --git a/venv/lib/python3.10/site-packages/dataproperty/_base.py b/venv/lib/python3.10/site-packages/dataproperty/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..2b25b5cd805ecd3412a69f6558b189dae382e9d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_base.py @@ -0,0 +1,98 @@ +from typing import Final, Optional + +from typepy import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + List, + Nan, + NoneType, + NullString, + RealNumber, + String, + Typecode, +) +from typepy.type import AbstractType + +from ._formatter import Formatter +from ._interface import DataPeropertyInterface + + +class DataPeropertyBase(DataPeropertyInterface): + __slots__ = ( + "_datetime_format_str", + "_decimal_places", + "_east_asian_ambiguous_width", + "_formatter", + "_typecode", + "__format_str", + ) + + __TYPE_CLASS_TABLE: Final[dict[Typecode, type[AbstractType]]] = { + Typecode.BOOL: Bool, + Typecode.DATETIME: DateTime, + Typecode.DICTIONARY: Dictionary, + Typecode.INTEGER: Integer, + Typecode.INFINITY: Infinity, + Typecode.IP_ADDRESS: IpAddress, + Typecode.LIST: List, + Typecode.NAN: Nan, + Typecode.NONE: NoneType, + Typecode.NULL_STRING: NullString, + Typecode.REAL_NUMBER: RealNumber, + Typecode.STRING: String, + } + + @property + def type_class(self) -> type[AbstractType]: + return self.__TYPE_CLASS_TABLE[self.typecode] + + @property + def typecode(self) -> Typecode: + """ + ``typepy.Typecode`` that corresponds to the type of the ``data``. + + :return: + One of the Enum value that are defined ``typepy.Typecode``. + :rtype: typepy.Typecode + """ + + assert self._typecode + + return self._typecode + + @property + def typename(self) -> str: + return self.typecode.name + + def __init__( + self, + format_flags: Optional[int], + is_formatting_float: bool, + datetime_format_str: str, + east_asian_ambiguous_width: int, + ) -> None: + self._decimal_places: Optional[int] = None + self._east_asian_ambiguous_width = east_asian_ambiguous_width + self._typecode: Optional[Typecode] = None + + self._datetime_format_str = datetime_format_str + self.__format_str = "" + + self._formatter = Formatter( + format_flags=format_flags, + datetime_format_str=self._datetime_format_str, + is_formatting_float=is_formatting_float, + ) + + @property + def format_str(self) -> str: + if self.__format_str: + return self.__format_str + + self.__format_str = self._formatter.make_format_str(self.typecode, self.decimal_places) + + return self.__format_str diff --git a/venv/lib/python3.10/site-packages/dataproperty/_column.py b/venv/lib/python3.10/site-packages/dataproperty/_column.py new file mode 100644 index 0000000000000000000000000000000000000000..dff22531cbfa858b3dcdb7911b62b5579e2cb2bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_column.py @@ -0,0 +1,352 @@ +from typing import Any, Optional + +from mbstrdecoder import MultiByteStrDecoder +from typepy import Integer, StrictLevel, Typecode, TypeConversionError + +from ._align import Align +from ._align_getter import align_getter +from ._base import DataPeropertyBase +from ._common import DefaultValue +from ._container import ListContainer, MinMaxContainer +from ._dataproperty import DataProperty +from ._function import calc_ascii_char_width +from .typing import FloatType + + +class ColumnDataProperty(DataPeropertyBase): + __slots__ = ( + "__header_ascii_char_width", + "__body_ascii_char_width", + "__column_index", + "__dp_list", + "__float_type", + "__format_map", + "__is_calculate", + "__max_precision", + "__minmax_integer_digits", + "__minmax_decimal_places", + "__minmax_additional_format_len", + "__typecode_bitmap", + ) + + @property + def align(self) -> Align: + return align_getter.get_align_from_typecode(self.typecode) + + @property + def bit_length(self) -> Optional[int]: + if self.typecode != Typecode.INTEGER: + return None + + bit_length = 0 + for value_dp in self.__dp_list: + try: + bit_length = max(bit_length, int.bit_length(value_dp.data)) + except TypeError: + pass + + return bit_length + + @property + def column_index(self) -> int: + return self.__column_index + + @property + def decimal_places(self) -> Optional[int]: + return self._decimal_places + + @property + def ascii_char_width(self) -> int: + return max(self.__header_ascii_char_width, self.__body_ascii_char_width) + + @property + def minmax_integer_digits(self) -> MinMaxContainer: + return self.__minmax_integer_digits + + @property + def minmax_decimal_places(self) -> ListContainer: + return self.__minmax_decimal_places + + @property + def minmax_additional_format_len(self) -> MinMaxContainer: + return self.__minmax_additional_format_len + + def __init__( + self, + column_index: int, + float_type: Optional[FloatType], + min_width: int = 0, + format_flags: Optional[int] = None, + is_formatting_float: bool = True, + datetime_format_str: str = DefaultValue.DATETIME_FORMAT, + east_asian_ambiguous_width: int = 1, + max_precision: int = DefaultValue.MAX_PRECISION, + ) -> None: + super().__init__( + format_flags=format_flags, + is_formatting_float=is_formatting_float, + datetime_format_str=datetime_format_str, + east_asian_ambiguous_width=east_asian_ambiguous_width, + ) + + self.__header_ascii_char_width = 0 + self.__body_ascii_char_width = min_width + self.__column_index = column_index + + self.__float_type = float_type + + self.__is_calculate = True + self.__dp_list: list[DataProperty] = [] + self.__minmax_integer_digits = MinMaxContainer() + self.__minmax_decimal_places = ListContainer() + self.__minmax_additional_format_len = MinMaxContainer() + self.__max_precision = max_precision + + self.__typecode_bitmap = Typecode.NONE.value + self.__calc_typecode_from_bitmap() + + self.__format_map: dict[Typecode, str] = self._formatter.make_format_map( + decimal_places=self._decimal_places + ) + + def __repr__(self) -> str: + element_list = [] + + if self.column_index is not None: + element_list.append(f"column={self.column_index}") + + element_list.extend( + [ + f"type={self.typename}", + f"align={self.align.align_string}", + f"ascii_width={self.ascii_char_width}", + ] + ) + + if Integer(self.bit_length).is_type(): + element_list.append(f"bit_len={self.bit_length}") + + if self.minmax_integer_digits.has_value(): + if self.minmax_integer_digits.is_same_value(): + value = f"int_digits={self.minmax_integer_digits.min_value}" + else: + value = f"int_digits=({self.minmax_integer_digits})" + + element_list.append(value) + + if self.minmax_decimal_places.has_value(): + if self.minmax_decimal_places.is_same_value(): + value = f"decimal_places={self.minmax_decimal_places.min_value}" + else: + value = f"decimal_places=({self.minmax_decimal_places})" + + element_list.append(value) + + if not self.minmax_additional_format_len.is_zero(): + if self.minmax_additional_format_len.is_same_value(): + value = f"extra_len={self.minmax_additional_format_len.min_value}" + else: + value = f"extra_len=({self.minmax_additional_format_len})" + + element_list.append(value) + + return ", ".join(element_list) + + def dp_to_str(self, value_dp: DataProperty) -> str: + if value_dp.typecode == Typecode.STRING: + return str(value_dp.data) + + try: + value = self.__preprocess_value_before_tostring(value_dp) + except TypeConversionError: + return self.__format_map.get(value_dp.typecode, "{:s}").format(value_dp.data) + + to_string_format_str = self.__get_tostring_format(value_dp) + + try: + return to_string_format_str.format(value) + except (ValueError, TypeError): + pass + + try: + return MultiByteStrDecoder(value).unicode_str + except ValueError: + pass + + return str(value) + + def extend_width(self, ascii_char_width: int) -> None: + self.extend_header_width(ascii_char_width) + self.extend_body_width(ascii_char_width) + + def extend_header_width(self, ascii_char_width: int) -> None: + self.__header_ascii_char_width += ascii_char_width + + def extend_body_width(self, ascii_char_width: int) -> None: + self.__body_ascii_char_width += ascii_char_width + + def update_header(self, header_db: DataProperty) -> None: + self.__header_ascii_char_width = header_db.ascii_char_width + + def update_body(self, value_dp: DataProperty) -> None: + if value_dp.is_include_ansi_escape: + assert value_dp.no_ansi_escape_dp + value_dp = value_dp.no_ansi_escape_dp + + self.__typecode_bitmap |= value_dp.typecode.value + self.__calc_typecode_from_bitmap() + + if value_dp.typecode in (Typecode.REAL_NUMBER, Typecode.INTEGER): + self.__minmax_integer_digits.update(value_dp.integer_digits) + self.__minmax_decimal_places.update(value_dp.decimal_places) + self.__update_decimal_places() + + self.__minmax_additional_format_len.update(value_dp.additional_format_len) + + self.__dp_list.append(value_dp) + self.__update_ascii_char_width() + + def merge(self, column_dp: "ColumnDataProperty") -> None: + self.__typecode_bitmap |= column_dp.typecode.value + self.__calc_typecode_from_bitmap() + + self.__minmax_integer_digits.merge(column_dp.minmax_integer_digits) + self.__minmax_decimal_places.merge(column_dp.minmax_decimal_places) + self.__update_decimal_places() + + self.__minmax_additional_format_len.merge(column_dp.minmax_additional_format_len) + + self.__body_ascii_char_width = max(self.__body_ascii_char_width, column_dp.ascii_char_width) + self.__update_ascii_char_width() + + def begin_update(self) -> None: + self.__is_calculate = False + + def end_update(self) -> None: + self.__is_calculate = True + + self.__calc_typecode_from_bitmap() + self.__update_decimal_places() + self.__update_ascii_char_width() + + def __is_not_single_typecode(self, typecode_bitmap: int) -> bool: + return bool( + self.__typecode_bitmap & typecode_bitmap and self.__typecode_bitmap & ~typecode_bitmap + ) + + def __is_float_typecode(self) -> bool: + FLOAT_TYPECODE_BMP = ( + Typecode.REAL_NUMBER.value | Typecode.INFINITY.value | Typecode.NAN.value + ) + NUMBER_TYPECODE_BMP = FLOAT_TYPECODE_BMP | Typecode.INTEGER.value + + if self.__is_not_single_typecode(NUMBER_TYPECODE_BMP | Typecode.NULL_STRING.value): + return False + + if ( + bin(self.__typecode_bitmap & (FLOAT_TYPECODE_BMP | Typecode.NULL_STRING.value)).count( + "1" + ) + >= 2 + ): + return True + + if bin(self.__typecode_bitmap & NUMBER_TYPECODE_BMP).count("1") >= 2: + return True + + return False + + def __calc_body_ascii_char_width(self) -> int: + width_list = [self.__body_ascii_char_width] + + for value_dp in self.__dp_list: + if value_dp.is_include_ansi_escape: + assert value_dp.no_ansi_escape_dp + value_dp = value_dp.no_ansi_escape_dp + + width_list.append( + calc_ascii_char_width(self.dp_to_str(value_dp), self._east_asian_ambiguous_width) + ) + + return max(width_list) + + def __calc_decimal_places(self) -> Optional[int]: + if self.minmax_decimal_places.max_value is None: + return None + + return min(self.__max_precision, int(self.minmax_decimal_places.max_value)) + + def __get_tostring_format(self, value_dp: DataProperty) -> str: + if self.typecode == Typecode.STRING: + return self.__format_map.get(value_dp.typecode, "{:s}") + + return self.__format_map.get(self.typecode, "{:s}") + + def __get_typecode_from_bitmap(self) -> Typecode: + if self.__is_float_typecode(): + return Typecode.REAL_NUMBER + + if any( + [ + self.__is_not_single_typecode(Typecode.BOOL.value), + self.__is_not_single_typecode(Typecode.DATETIME.value), + ] + ): + return Typecode.STRING + + typecode_list = [ + Typecode.STRING, + Typecode.REAL_NUMBER, + Typecode.INTEGER, + Typecode.DATETIME, + Typecode.DICTIONARY, + Typecode.IP_ADDRESS, + Typecode.LIST, + Typecode.BOOL, + Typecode.INFINITY, + Typecode.NAN, + Typecode.NULL_STRING, + ] + + for typecode in typecode_list: + if self.__typecode_bitmap & typecode.value: + return typecode + + if self.__typecode_bitmap == Typecode.NONE.value: + return Typecode.NONE + + return Typecode.STRING + + def __update_ascii_char_width(self) -> None: + if not self.__is_calculate: + return + + self.__body_ascii_char_width = self.__calc_body_ascii_char_width() + + def __update_decimal_places(self) -> None: + if not self.__is_calculate: + return + + self._decimal_places = self.__calc_decimal_places() + self.__format_map = self._formatter.make_format_map(decimal_places=self._decimal_places) + + def __calc_typecode_from_bitmap(self) -> None: + if not self.__is_calculate: + return + + self._typecode = self.__get_typecode_from_bitmap() + + def __preprocess_value_before_tostring(self, value_dp: DataProperty) -> Any: + if self.typecode == value_dp.typecode or self.typecode in [ + Typecode.STRING, + Typecode.BOOL, + Typecode.DATETIME, + ]: + return value_dp.data + + return self.type_class( + value_dp.data, + strict_level=StrictLevel.MIN, + float_type=self.__float_type, + strip_ansi_escape=False, + ).convert() diff --git a/venv/lib/python3.10/site-packages/dataproperty/_common.py b/venv/lib/python3.10/site-packages/dataproperty/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..7f0b689b66d220d744b0ffab887c75b707cdcc94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_common.py @@ -0,0 +1,74 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import itertools +from datetime import datetime +from decimal import Decimal +from typing import Final + +from typepy import StrictLevel, Typecode + +from .typing import StrictLevelMap, TypeValueMap + + +NOT_QUOTING_FLAGS: Final = { + Typecode.BOOL: False, + Typecode.DATETIME: False, + Typecode.DICTIONARY: False, + Typecode.INFINITY: False, + Typecode.INTEGER: False, + Typecode.IP_ADDRESS: False, + Typecode.LIST: False, + Typecode.NAN: False, + Typecode.NULL_STRING: False, + Typecode.NONE: False, + Typecode.REAL_NUMBER: False, + Typecode.STRING: False, +} + +MAX_STRICT_LEVEL_MAP: Final[StrictLevelMap] = dict( + itertools.product(list(Typecode), [StrictLevel.MAX]) +) +MIN_STRICT_LEVEL_MAP: Final[StrictLevelMap] = dict( + itertools.product(list(Typecode), [StrictLevel.MIN]) +) + + +class DefaultValue: + DATETIME_FORMAT: Final = "%Y-%m-%dT%H:%M:%S%z" + FLOAT_TYPE: Final = Decimal + INF_VALUE: Final = FLOAT_TYPE("inf") + NAN_VALUE: Final = FLOAT_TYPE("nan") + + QUOTING_FLAGS: Final = copy.deepcopy(NOT_QUOTING_FLAGS) + + STRICT_LEVEL_MAP: Final[StrictLevelMap] = { + "default": StrictLevel.MAX, + Typecode.BOOL: StrictLevel.MAX, + Typecode.DATETIME: StrictLevel.MAX, + Typecode.DICTIONARY: StrictLevel.MAX, + Typecode.REAL_NUMBER: 1, + Typecode.INFINITY: StrictLevel.MIN, + Typecode.INTEGER: 1, + Typecode.IP_ADDRESS: StrictLevel.MAX, + Typecode.LIST: StrictLevel.MAX, + Typecode.NAN: StrictLevel.MIN, + Typecode.NONE: StrictLevel.MAX, + Typecode.NULL_STRING: StrictLevel.MIN, + Typecode.STRING: StrictLevel.MIN, + } + + TYPE_VALUE_MAP: Final[TypeValueMap] = { + Typecode.NONE: None, + Typecode.INFINITY: INF_VALUE, + Typecode.NAN: NAN_VALUE, + } + + MAX_WORKERS: Final = 1 + MAX_PRECISION: Final = 100 + + +def default_datetime_formatter(value: datetime) -> str: + return value.strftime(DefaultValue.DATETIME_FORMAT) diff --git a/venv/lib/python3.10/site-packages/dataproperty/_container.py b/venv/lib/python3.10/site-packages/dataproperty/_container.py new file mode 100644 index 0000000000000000000000000000000000000000..543576395cabcc830071aa8d3e9f5c68663aebe5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_container.py @@ -0,0 +1,196 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +from collections.abc import Sequence +from decimal import Decimal +from typing import Any, Final, Optional, Union + +from typepy import RealNumber + + +T = Union[int, float, Decimal] +NAN: Final = Decimal("nan") + + +class AbstractContainer(metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def min_value(self) -> Optional[Decimal]: # pragma: no cover + pass + + @property + @abc.abstractmethod + def max_value(self) -> Optional[Decimal]: # pragma: no cover + pass + + @abc.abstractmethod + def mean(self) -> Decimal: # pragma: no cover + pass + + @abc.abstractmethod + def update(self, value: Optional[T]) -> None: # pragma: no cover + pass + + @abc.abstractmethod + def merge(self, value: "AbstractContainer") -> None: # pragma: no cover + pass + + def __repr__(self) -> str: + if not self.has_value(): + return "None" + + return ", ".join([f"min={self.min_value}", f"max={self.max_value}"]) + + def has_value(self) -> bool: + return self.min_value is not None and self.max_value is not None + + def is_same_value(self) -> bool: + return self.has_value() and self.min_value == self.max_value + + def is_zero(self) -> bool: + return self.has_value() and self.min_value == 0 and self.max_value == 0 + + +class ListContainer(AbstractContainer): + __slots__ = ("__value_list",) + + @property + def min_value(self) -> Optional[Decimal]: + try: + return min(self.__value_list) + except ValueError: + return None + + @property + def max_value(self) -> Optional[Decimal]: + try: + return max(self.__value_list) + except ValueError: + return None + + @property + def value_list(self) -> list[Decimal]: + return self.__value_list + + def __init__(self, value_list: Optional[list[Decimal]] = None) -> None: + if value_list is None: + self.__value_list: list[Decimal] = [] + return + + for value in value_list: + self.update(value) + + def mean(self) -> Decimal: + try: + return Decimal(sum(self.__value_list) / len(self.__value_list)) + except ZeroDivisionError: + return NAN + + def update(self, value: Union[int, float, Decimal, None]) -> None: + if value is None: + return + + store_value = RealNumber(value).try_convert() + if store_value is None: + return + + self.__value_list.append(store_value) + + def merge(self, value: "AbstractContainer") -> None: + if not isinstance(value, ListContainer): + return + + for v in value.value_list: + self.update(v) + + +class MinMaxContainer(AbstractContainer): + __slots__ = ("__min_value", "__max_value") + + def __init__(self, value_list: Optional[Sequence[Decimal]] = None) -> None: + self.__min_value: Optional[Decimal] = None + self.__max_value: Optional[Decimal] = None + + if value_list is None: + return + + for value in value_list: + self.update(value) + + @property + def min_value(self) -> Optional[Decimal]: + return self.__min_value + + @property + def max_value(self) -> Optional[Decimal]: + return self.__max_value + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, MinMaxContainer): + return False + + return all([self.min_value == other.min_value, self.max_value == other.max_value]) + + def __ne__(self, other: Any) -> bool: + if not isinstance(other, MinMaxContainer): + return True + + return any([self.min_value != other.min_value, self.max_value != other.max_value]) + + def __contains__(self, x: T) -> bool: + if self.min_value is None: + return False + + if self.max_value is None: + return False + + return self.min_value <= x <= self.max_value + + def diff(self) -> Decimal: + if self.min_value is None: + return NAN + + if self.max_value is None: + return NAN + + try: + return self.max_value - self.min_value + except TypeError: + return NAN + + def mean(self) -> Decimal: + if self.min_value is None: + return NAN + + if self.max_value is None: + return NAN + + try: + return (self.max_value + self.min_value) * Decimal("0.5") + except TypeError: + return NAN + + def update(self, value: Optional[T]) -> None: + if value is None: + return + + decimal_value = Decimal(value) + + if self.__min_value is None: + self.__min_value = decimal_value + else: + self.__min_value = min(self.__min_value, decimal_value) + + if self.__max_value is None: + self.__max_value = decimal_value + else: + self.__max_value = max(self.__max_value, decimal_value) + + def merge(self, value: "AbstractContainer") -> None: + if not isinstance(value, MinMaxContainer): + return + + self.update(value.min_value) + self.update(value.max_value) diff --git a/venv/lib/python3.10/site-packages/dataproperty/_converter.py b/venv/lib/python3.10/site-packages/dataproperty/_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..40757c5abcbf8c6f1b90952694eee5aae4576101 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_converter.py @@ -0,0 +1,90 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re +from typing import Any, Final, Optional + +from typepy import Typecode, TypeConversionError + +from ._common import MAX_STRICT_LEVEL_MAP, DefaultValue +from ._dataproperty import DataProperty +from ._preprocessor import Preprocessor +from .typing import DateTimeFormatter, FloatType, StrictLevelMap, TypeValueMap + + +class DataPropertyConverter: + __RE_QUOTE_LINE: Final = re.compile(r"^\s*[\"'].*[\"']\s*$") # noqa: w605 + __RE_QUOTE_CHAR: Final = re.compile("[\"']") + + def __init__( + self, + preprocessor: Preprocessor, + datetime_format_str: str, + datetime_formatter: Optional[DateTimeFormatter] = None, + type_value_map: Optional[TypeValueMap] = None, + quoting_flags: Optional[dict[Typecode, bool]] = None, + float_type: Optional[FloatType] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> None: + self.__preprocessor = preprocessor + self.__type_value_map: TypeValueMap = ( + type_value_map if type_value_map else DefaultValue.TYPE_VALUE_MAP + ) + self.__quoting_flags: dict[Typecode, bool] = ( + quoting_flags if quoting_flags else DefaultValue.QUOTING_FLAGS + ) + + self.__datetime_formatter = datetime_formatter + self.__datetime_format_str = datetime_format_str + self.__float_type = float_type + self.__strict_level_map = strict_level_map + + def convert(self, dp_value: DataProperty) -> DataProperty: + try: + return self.__create_dataproperty(self.__convert_value(dp_value)) + except TypeConversionError: + pass + + if not self.__quoting_flags.get(dp_value.typecode): + if self.__preprocessor.is_escape_html_tag: + return self.__create_dataproperty(dp_value.to_str()) + + return dp_value + + return self.__create_dataproperty(self.__apply_quote(dp_value.typecode, dp_value.to_str())) + + def __create_dataproperty(self, value: Any) -> DataProperty: + return DataProperty( + value, + preprocessor=self.__preprocessor, + float_type=self.__float_type, + datetime_format_str=self.__datetime_format_str, + strict_level_map=MAX_STRICT_LEVEL_MAP, + ) + + def __apply_quote(self, typecode: Typecode, data: Any) -> Any: + if not self.__quoting_flags.get(typecode): + return data + + try: + if self.__RE_QUOTE_LINE.search(data): + return data + except TypeError: + return data + + return '"{}"'.format(self.__RE_QUOTE_CHAR.sub('\\"', data.replace("\\", "\\\\"))) + + def __convert_value(self, dp_value: DataProperty) -> Any: + if dp_value.typecode in self.__type_value_map: + return self.__apply_quote(dp_value.typecode, self.__type_value_map[dp_value.typecode]) + + if dp_value.typecode == Typecode.DATETIME and self.__datetime_formatter: + try: + return self.__apply_quote( + dp_value.typecode, self.__datetime_formatter(dp_value.data) + ) + except TypeError: + raise TypeConversionError + + raise TypeConversionError("no need to convert") diff --git a/venv/lib/python3.10/site-packages/dataproperty/_dataproperty.py b/venv/lib/python3.10/site-packages/dataproperty/_dataproperty.py new file mode 100644 index 0000000000000000000000000000000000000000..20c9887c300d60cc1fcbb662e1e19f375f284d02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_dataproperty.py @@ -0,0 +1,381 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from decimal import Decimal +from typing import Any, Final, Optional, cast + +import typepy +from mbstrdecoder import MultiByteStrDecoder +from typepy import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + Nan, + NoneType, + NullString, + RealNumber, + StrictLevel, + String, + Typecode, + TypeConversionError, +) +from typepy.type import AbstractType + +from ._align import Align +from ._align_getter import align_getter +from ._base import DataPeropertyBase +from ._common import DefaultValue +from ._function import calc_ascii_char_width, get_number_of_digit +from ._preprocessor import Preprocessor +from .typing import FloatType, StrictLevelMap, TypeHint + + +class DataProperty(DataPeropertyBase): + __slots__ = ( + "__data", + "__no_ansi_escape_data", + "__align", + "__integer_digits", + "__additional_format_len", + "__length", + "__ascii_char_width", + ) + + __type_class_list: Final[list[type[AbstractType]]] = [ + NoneType, + Integer, + Infinity, + Nan, + IpAddress, + RealNumber, + Bool, + typepy.List, + Dictionary, + DateTime, + NullString, + String, + ] + + def __init__( + self, + data: Any, + preprocessor: Optional[Preprocessor] = None, + type_hint: TypeHint = None, + float_type: Optional[FloatType] = None, + format_flags: Optional[int] = None, + datetime_format_str: str = DefaultValue.DATETIME_FORMAT, + strict_level_map: Optional[StrictLevelMap] = None, + east_asian_ambiguous_width: int = 1, + ) -> None: + super().__init__( + format_flags=format_flags, + is_formatting_float=True, + datetime_format_str=datetime_format_str, + east_asian_ambiguous_width=east_asian_ambiguous_width, + ) + + self.__additional_format_len: Optional[int] = None + self.__align: Optional[Align] = None + self.__ascii_char_width: Optional[int] = None + self.__integer_digits: Optional[int] = None + self.__length: Optional[int] = None + + if preprocessor is None: + preprocessor = Preprocessor() + + data, no_ansi_escape_data = preprocessor.preprocess(data) + + self.__set_data(data, type_hint, float_type, strict_level_map) + + if no_ansi_escape_data is None or len(data) == len(no_ansi_escape_data): + self.__no_ansi_escape_data: Optional[DataProperty] = None + else: + self.__no_ansi_escape_data = DataProperty(no_ansi_escape_data, float_type=float_type) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, DataProperty): + return False + + if self.typecode != other.typecode: + return False + + if self.typecode == Typecode.NAN: + return True + + return self.data == other.data + + def __ne__(self, other: Any) -> bool: + if not isinstance(other, DataProperty): + return True + + if self.typecode != other.typecode: + return True + + if self.typecode == Typecode.NAN: + return False + + return self.data != other.data + + def __repr__(self) -> str: + element_list = [] + + if self.typecode == Typecode.DATETIME: + element_list.append(f"data={str(self.data):s}") + else: + try: + element_list.append("data=" + self.to_str()) + except UnicodeEncodeError: + element_list.append(f"data={MultiByteStrDecoder(self.data).unicode_str}") + + element_list.extend( + [ + f"type={self.typename:s}", + f"align={self.align.align_string}", + f"ascii_width={self.ascii_char_width:d}", + ] + ) + + if Integer(self.length).is_type(): + element_list.append(f"length={self.length}") + + if Integer(self.integer_digits).is_type(): + element_list.append(f"int_digits={self.integer_digits}") + + if Integer(self.decimal_places).is_type(): + element_list.append(f"decimal_places={self.decimal_places}") + + if Integer(self.additional_format_len).is_type(): + element_list.append(f"extra_len={self.additional_format_len}") + + return ", ".join(element_list) + + @property + def align(self) -> Align: + if not self.__align: + if self.is_include_ansi_escape: + assert self.no_ansi_escape_dp + self.__align = self.no_ansi_escape_dp.align + else: + self.__align = align_getter.get_align_from_typecode(self.typecode) + + assert self.__align + + return self.__align + + @property + def decimal_places(self) -> Optional[int]: + """ + :return: + Decimal places if the ``data`` type either ``float`` or + ``decimal.Decimal``. Returns ``0`` if the ``data`` type is ``int``. + Otherwise, returns ``float("nan")``. + :rtype: int + """ + + if self._decimal_places is None: + self.__set_digit() + + return self._decimal_places + + @property + def data(self) -> Any: + """ + :return: Original data value. + :rtype: Original data type. + """ + + return self.__data + + @property + def is_include_ansi_escape(self) -> bool: + if self.no_ansi_escape_dp is None: + return False + + return self.length != self.no_ansi_escape_dp.length + + @property + def no_ansi_escape_dp(self) -> Optional["DataProperty"]: + return self.__no_ansi_escape_data + + @property + def length(self) -> Optional[int]: + """ + :return: Length of the ``data``. + :rtype: int + """ + + if self.__length is None: + self.__length = self.__get_length() + + return self.__length + + @property + def ascii_char_width(self) -> int: + if self.__ascii_char_width is None: + self.__ascii_char_width = self.__calc_ascii_char_width() + + return self.__ascii_char_width + + @property + def integer_digits(self) -> Optional[int]: + """ + :return: + Integer digits if the ``data`` type either + ``int``/``float``/``decimal.Decimal``. + Otherwise, returns ``None``. + :rtype: int + """ + + if self.__integer_digits is None: + self.__set_digit() + + return self.__integer_digits + + @property + def additional_format_len(self) -> int: + if self.__additional_format_len is None: + self.__additional_format_len = self.__get_additional_format_len() + + return self.__additional_format_len + + def get_padding_len(self, ascii_char_width: int) -> int: + if self.typecode in (Typecode.LIST, Typecode.DICTIONARY): + unicode_str_len = DataProperty(MultiByteStrDecoder(str(self.data)).unicode_str).length + assert unicode_str_len + return max( + ascii_char_width - (self.ascii_char_width - unicode_str_len), + 0, + ) + + try: + return max(ascii_char_width - (self.ascii_char_width - cast(int, self.length)), 0) + except TypeError: + return ascii_char_width + + def to_str(self) -> str: + return self.format_str.format(self.data) + + def __get_additional_format_len(self) -> int: + if not RealNumber(self.data, strip_ansi_escape=False).is_type(): + return 0 + + format_len = 0 + + if Decimal(self.data) < 0: + # for minus character + format_len += 1 + + return format_len + + def __get_base_float_len(self) -> int: + assert self.integer_digits is not None + assert self.decimal_places is not None + + if any([self.integer_digits < 0, self.decimal_places < 0]): + raise ValueError("integer digits and decimal places must be greater or equals to zero") + + float_len = self.integer_digits + self.decimal_places + if self.decimal_places > 0: + # for dot + float_len += 1 + + return float_len + + def __get_length(self) -> Optional[int]: + if self.typecode in (Typecode.DICTIONARY, Typecode.LIST, Typecode.STRING): + return len(self.data) + + return None + + def __calc_ascii_char_width(self) -> int: + if self.typecode == Typecode.INTEGER: + return cast(int, self.integer_digits) + self.additional_format_len + + if self.typecode == Typecode.REAL_NUMBER: + return self.__get_base_float_len() + self.additional_format_len + + if self.typecode == Typecode.DATETIME: + try: + return len(self.to_str()) + except ValueError: + # reach to this line if the year <1900. + # the datetime strftime() methods require year >= 1900. + return len(str(self.data)) + + if self.is_include_ansi_escape: + assert self.no_ansi_escape_dp + return self.no_ansi_escape_dp.ascii_char_width + + try: + unicode_str = MultiByteStrDecoder(self.data).unicode_str + except ValueError: + unicode_str = self.to_str() + + return calc_ascii_char_width(unicode_str, self._east_asian_ambiguous_width) + + def __set_data( + self, + data: Any, + type_hint: TypeHint, + float_type: Optional[FloatType], + strict_level_map: Optional[StrictLevelMap], + ) -> None: + if float_type is None: + float_type = DefaultValue.FLOAT_TYPE + + if strict_level_map is None: + strict_level_map = DefaultValue.STRICT_LEVEL_MAP + + if type_hint: + type_obj = type_hint( + data, strict_level=StrictLevel.MIN, float_type=float_type, strip_ansi_escape=False + ) + self._typecode = type_obj.typecode + self.__data = type_obj.try_convert() + + if type_hint( + self.__data, + strict_level=StrictLevel.MAX, + float_type=float_type, + strip_ansi_escape=False, + ).is_type(): + return + + for type_class in self.__type_class_list: + strict_level = strict_level_map.get( + type_class(None, 0).typecode, strict_level_map.get("default", StrictLevel.MAX) + ) + + if self.__try_convert_type(data, type_class, strict_level, float_type): + return + + raise TypeConversionError( + f"failed to convert: data={data}, strict_level={strict_level_map}" + ) + + def __set_digit(self) -> None: + integer_digits, decimal_places = get_number_of_digit(self.__data) + self.__integer_digits = integer_digits + self._decimal_places = decimal_places + + def __try_convert_type( + self, + data: Any, + type_class: type[AbstractType], + strict_level: int, + float_type: Optional[FloatType], + ) -> bool: + type_obj = type_class(data, strict_level, float_type=float_type, strip_ansi_escape=False) + + try: + self.__data = type_obj.convert() + except TypeConversionError: + return False + + self._typecode = type_obj.typecode + + return True diff --git a/venv/lib/python3.10/site-packages/dataproperty/_extractor.py b/venv/lib/python3.10/site-packages/dataproperty/_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0bc9ea444ab1ac9fa3ceb814d5aa378ecb50b0ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_extractor.py @@ -0,0 +1,817 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import enum +import sys +import typing +from collections import Counter +from collections.abc import Sequence +from decimal import Decimal +from typing import Any, Optional, Union, cast + +import typepy +from typepy import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + Nan, + NoneType, + NullString, + RealNumber, + StrictLevel, + String, + Typecode, + is_empty_sequence, +) +from typepy.type import AbstractType + +from ._column import ColumnDataProperty +from ._common import MIN_STRICT_LEVEL_MAP, DefaultValue +from ._converter import DataPropertyConverter +from ._dataproperty import DataProperty +from ._formatter import Format +from ._preprocessor import Preprocessor +from .logger import logger # type: ignore +from .typing import ( + DateTimeFormatter, + StrictLevelMap, + TransFunc, + TypeHint, + TypeValueMap, + normalize_type_hint, +) + + +DataPropertyMatrix = list[list[DataProperty]] + + +@enum.unique +class MatrixFormatting(enum.Enum): + # raise exception if the matrix is not properly formatted + EXCEPTION = 1 << 1 + + # trim to the minimum size column + TRIM = 1 << 2 + + # Append None values to columns so that it is the same as the maximum + # column size. + FILL_NONE = 1 << 3 + + HEADER_ALIGNED = 1 << 4 + + +class DataPropertyExtractor: + """ + .. py:attribute:: quoting_flags + + Configurations to add double quote to for each items in a matrix, + where |Typecode| of table-value is |True| in the ``quote_flag_table`` + mapping table. ``quote_flag_table`` should be a dictionary. + And is ``{ Typecode : bool }``. Defaults to: + + .. code-block:: json + :caption: The default values + + { + Typecode.BOOL: False, + Typecode.DATETIME: False, + Typecode.DICTIONARY: False, + Typecode.INFINITY: False, + Typecode.INTEGER: False, + Typecode.IP_ADDRESS: False, + Typecode.LIST: False, + Typecode.NAN: False, + Typecode.NULL_STRING: False, + Typecode.NONE: False, + Typecode.REAL_NUMBER: False, + Typecode.STRING: False, + } + """ + + def __init__(self, max_precision: Optional[int] = None) -> None: + self.max_workers = DefaultValue.MAX_WORKERS + + if max_precision is None: + self.__max_precision = DefaultValue.MAX_PRECISION + else: + self.__max_precision = max_precision + + self.__headers: Sequence[str] = [] + self.__default_type_hint: TypeHint = None + self.__col_type_hints: list[TypeHint] = [] + + self.__strip_str_header: Optional[str] = None + self.__is_formatting_float = True + self.__min_col_ascii_char_width = 0 + self.__default_format_flags = Format.NONE + self.__format_flags_list: Sequence[int] = [] + self.__float_type: Union[type[float], type[Decimal], None] = None + self.__datetime_format_str = DefaultValue.DATETIME_FORMAT + self.__strict_level_map = copy.deepcopy( + cast(dict[Union[Typecode, str], int], DefaultValue.STRICT_LEVEL_MAP) + ) + self.__east_asian_ambiguous_width = 1 + + self.__preprocessor = Preprocessor() + + self.__type_value_map: TypeValueMap = copy.deepcopy(DefaultValue.TYPE_VALUE_MAP) + + self.__trans_func_list: list[TransFunc] = [] + self.__quoting_flags = copy.deepcopy(DefaultValue.QUOTING_FLAGS) + self.__datetime_formatter: Optional[DateTimeFormatter] = None + self.__matrix_formatting = MatrixFormatting.TRIM + self.__dp_converter: DataPropertyConverter + + self.__clear_cache() + + def __clear_cache(self) -> None: + self.__update_dp_converter() + self.__dp_cache_zero = self.__to_dp_raw(0) + self.__dp_cache_one = self.__to_dp_raw(1) + self.__dp_cache_true = self.__to_dp_raw(True) + self.__dp_cache_false = self.__to_dp_raw(False) + self.__dp_cache_map = {None: self.__to_dp_raw(None), "": self.__to_dp_raw("")} + + @property + def headers(self) -> Sequence[str]: + return self.__headers + + @headers.setter + def headers(self, value: Sequence[str]) -> None: + if self.__headers == value: + return + + self.__headers = value + self.__clear_cache() + + @property + def default_type_hint(self) -> TypeHint: + return self.__default_type_hint + + @default_type_hint.setter + def default_type_hint(self, value: TypeHint) -> None: + if self.__default_type_hint == value: + return + + self.__default_type_hint = value + self.__clear_cache() + + @property + def column_type_hints(self) -> list[TypeHint]: + return self.__col_type_hints + + @column_type_hints.setter + def column_type_hints(self, value: Sequence[Union[str, TypeHint]]) -> None: + normalized_type_hints: list[TypeHint] = [] + + for type_hint in value: + type_hint = normalize_type_hint(type_hint) + if type_hint not in ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + typepy.List, + Nan, + NoneType, + RealNumber, + String, + NullString, + None, + ): + raise ValueError(f"invalid type hint: {type(type_hint)}") + + normalized_type_hints.append(type_hint) + + if self.__col_type_hints == normalized_type_hints: + return + + self.__col_type_hints = normalized_type_hints + self.__clear_cache() + + @property + def is_formatting_float(self) -> bool: + return self.__is_formatting_float + + @is_formatting_float.setter + def is_formatting_float(self, value: bool) -> None: + self.__is_formatting_float = value + + @property + def max_precision(self) -> int: + return self.__max_precision + + @max_precision.setter + def max_precision(self, value: int) -> None: + if self.__max_precision == value: + return + + self.__max_precision = value + self.__clear_cache() + + @property + def preprocessor(self) -> Preprocessor: + return self.__preprocessor + + @preprocessor.setter + def preprocessor(self, value: Preprocessor) -> None: + if self.preprocessor == value: + return + + self.__preprocessor = value + self.__update_dp_converter() + + @property + def strip_str_header(self) -> Optional[str]: + return self.__strip_str_header + + @strip_str_header.setter + def strip_str_header(self, value: str) -> None: + if self.__strip_str_header == value: + return + + self.__strip_str_header = value + self.__clear_cache() + + @property + def min_column_width(self) -> int: + return self.__min_col_ascii_char_width + + @min_column_width.setter + def min_column_width(self, value: int) -> None: + if self.__min_col_ascii_char_width == value: + return + + self.__min_col_ascii_char_width = value + self.__clear_cache() + + @property + def default_format_flags(self) -> int: + return self.__default_format_flags + + @default_format_flags.setter + def default_format_flags(self, value: int) -> None: + if self.__default_format_flags == value: + return + + self.__default_format_flags = value + self.__clear_cache() + + @property + def format_flags_list(self) -> Sequence[int]: + return self.__format_flags_list + + @format_flags_list.setter + def format_flags_list(self, value: Sequence[int]) -> None: + if self.__format_flags_list == value: + return + + self.__format_flags_list = value + self.__clear_cache() + + @property + def float_type(self) -> Union[type[float], type[Decimal], None]: + return self.__float_type + + @float_type.setter + def float_type(self, value: Union[type[float], type[Decimal]]) -> None: + if self.__float_type == value: + return + + self.__float_type = value + self.__clear_cache() + + @property + def datetime_format_str(self) -> str: + return self.__datetime_format_str + + @datetime_format_str.setter + def datetime_format_str(self, value: str) -> None: + if self.__datetime_format_str == value: + return + + self.__datetime_format_str = value + self.__clear_cache() + + @property + def strict_level_map(self) -> StrictLevelMap: + return self.__strict_level_map + + @strict_level_map.setter + def strict_level_map(self, value: StrictLevelMap) -> None: + if self.__strict_level_map == value: + return + + self.__strict_level_map = cast(dict[Union[Typecode, str], int], value) + self.__clear_cache() + + @property + def east_asian_ambiguous_width(self) -> int: + return self.__east_asian_ambiguous_width + + @east_asian_ambiguous_width.setter + def east_asian_ambiguous_width(self, value: int) -> None: + if self.__east_asian_ambiguous_width == value: + return + + self.__east_asian_ambiguous_width = value + self.__clear_cache() + + @property + def type_value_map(self) -> TypeValueMap: + return self.__type_value_map + + @type_value_map.setter + def type_value_map(self, value: TypeValueMap) -> None: + if self.__type_value_map == value: + return + + self.__type_value_map = value + self.__clear_cache() + + def set_type_value(self, key: Typecode, value: Union[float, str, Decimal, None]) -> None: + self.__type_value_map[key] = value + self.__clear_cache() + + def register_trans_func(self, trans_func: TransFunc) -> None: + self.__trans_func_list.insert(0, trans_func) + self.__clear_cache() + + @property + def quoting_flags(self) -> dict[Typecode, bool]: + return self.__quoting_flags + + @quoting_flags.setter + def quoting_flags(self, value: dict[Typecode, bool]) -> None: + if self.__quoting_flags == value: + return + + self.__quoting_flags = value + self.__clear_cache() + + @property + def datetime_formatter(self) -> Optional[DateTimeFormatter]: + return self.__datetime_formatter + + @datetime_formatter.setter + def datetime_formatter(self, value: Optional[DateTimeFormatter]) -> None: + if self.__datetime_formatter == value: + return + + self.__datetime_formatter = value + self.__clear_cache() + + @property + def matrix_formatting(self) -> MatrixFormatting: + return self.__matrix_formatting + + @matrix_formatting.setter + def matrix_formatting(self, value: MatrixFormatting) -> None: + if self.__matrix_formatting == value: + return + + self.__matrix_formatting = value + self.__clear_cache() + + @property + def max_workers(self) -> int: + assert self.__max_workers + + return self.__max_workers + + @max_workers.setter + def max_workers(self, value: Optional[int]) -> None: + try: + from _multiprocessing import SemLock, sem_unlink # noqa + except ImportError: + logger.debug("This platform lacks a functioning sem_open implementation") + value = 1 + + if "pytest" in sys.modules and value != 1: + logger.debug("set max_workers to 1 to avoid deadlock when executed from pytest") + value = 1 + + self.__max_workers = value + if not self.__max_workers: + self.__max_workers = DefaultValue.MAX_WORKERS + + def to_dp(self, value: Any) -> DataProperty: + self.__update_dp_converter() + + return self.__to_dp(value) + + def to_dp_list(self, values: Sequence[Any]) -> list[DataProperty]: + if is_empty_sequence(values): + return [] + + self.__update_dp_converter() + + return self._to_dp_list(values) + + def to_column_dp_list( + self, + value_dp_matrix: Any, + previous_column_dp_list: Optional[Sequence[ColumnDataProperty]] = None, + ) -> list[ColumnDataProperty]: + col_dp_list = self.__get_col_dp_list_base() + + logger.debug("converting to column dataproperty:") + + logs = [" params:"] + if self.headers: + logs.append(f" headers={len(self.headers)}") + logs.extend( + [ + " prev_col_count={}".format( + len(previous_column_dp_list) if previous_column_dp_list else None + ), + f" matrix_formatting={self.matrix_formatting}", + ] + ) + if self.column_type_hints: + logs.append( + " column_type_hints=({})".format( + ", ".join( + [ + type_hint.__name__ if type_hint else "none" + for type_hint in self.column_type_hints + ] + ) + ) + ) + else: + logs.append(" column_type_hints=()") + + for log in logs: + logger.debug(log) + + logger.debug(" results:") + for col_idx, value_dp_list in enumerate(zip(*value_dp_matrix)): + try: + col_dp_list[col_idx] + except IndexError: + col_dp_list.append( + ColumnDataProperty( + column_index=col_idx, + float_type=self.float_type, + min_width=self.min_column_width, + format_flags=self.__get_format_flags(col_idx), + is_formatting_float=self.is_formatting_float, + datetime_format_str=self.datetime_format_str, + east_asian_ambiguous_width=self.east_asian_ambiguous_width, + max_precision=self.__max_precision, + ) + ) + + col_dp = col_dp_list[col_idx] + col_dp.begin_update() + + try: + col_dp.merge(previous_column_dp_list[col_idx]) # type: ignore + except (TypeError, IndexError): + pass + + for value_dp in value_dp_list: + col_dp.update_body(value_dp) + + col_dp.end_update() + + logger.debug(f" {str(col_dp):s}") + + return col_dp_list + + def to_dp_matrix(self, value_matrix: Sequence[Sequence[Any]]) -> DataPropertyMatrix: + self.__update_dp_converter() + logger.debug(f"max_workers={self.max_workers}, preprocessor={self.__preprocessor}") + + value_matrix = self.__strip_data_matrix(value_matrix) + + if self.__is_dp_matrix(value_matrix): + logger.debug("already a dataproperty matrix") + return value_matrix # type: ignore + + if self.max_workers <= 1: + return self.__to_dp_matrix_st(value_matrix) + + return self.__to_dp_matrix_mt(value_matrix) + + def to_header_dp_list(self) -> list[DataProperty]: + self.__update_dp_converter() + + preprocessor = copy.deepcopy(self.__preprocessor) + preprocessor.strip_str = self.strip_str_header + + return self._to_dp_list( + self.headers, + type_hint=String, + preprocessor=preprocessor, + strict_level_map=MIN_STRICT_LEVEL_MAP, + ) + + def update_preprocessor(self, **kwargs: Any) -> bool: + is_updated = self.__preprocessor.update(**kwargs) + self.__update_dp_converter() + + return is_updated + + def update_strict_level_map(self, value: StrictLevelMap) -> bool: + org = copy.deepcopy(self.__strict_level_map) + self.__strict_level_map.update(value) + + if org == self.__strict_level_map: + return False + + self.__clear_cache() + + return True + + """ + def update_dict(self, lhs: Mapping, rhs: Mapping) -> bool: + is_updated = False + + for key, value in rhs.items(): + if key not in lhs: + lhs[] + continue + + if getattr(lhs, key) == value: + continue + + setattr(lhs, key, value) + is_updated = True + + return is_updated + """ + + @staticmethod + def __is_dp_matrix(value: Any) -> bool: + try: + return isinstance(value[0][0], DataProperty) + except (TypeError, IndexError): + return False + + def __get_col_type_hint(self, col_idx: int) -> TypeHint: + try: + return self.column_type_hints[col_idx] + except (TypeError, IndexError): + return self.default_type_hint + + def __get_format_flags(self, col_idx: int) -> int: + try: + return self.format_flags_list[col_idx] + except (TypeError, IndexError): + return self.__default_format_flags + + def __to_dp( + self, + data: Any, + type_hint: TypeHint = None, + preprocessor: Optional[Preprocessor] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> DataProperty: + for trans_func in self.__trans_func_list: + data = trans_func(data) + + if type_hint: + return self.__to_dp_raw( + data, + type_hint=type_hint, + preprocessor=preprocessor, + strict_level_map=strict_level_map, + ) + + try: + if data in self.__dp_cache_map: + return self.__dp_cache_map[data] + except TypeError: + # unhashable type + pass + + if data == 0: + if data is False: + return self.__dp_cache_false + return self.__dp_cache_zero + if data == 1: + if data is True: + return self.__dp_cache_true + return self.__dp_cache_one + + return self.__to_dp_raw( + data, type_hint=type_hint, preprocessor=preprocessor, strict_level_map=strict_level_map + ) + + def __to_dp_raw( + self, + data: Any, + type_hint: TypeHint = None, + preprocessor: Optional[Preprocessor] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> DataProperty: + if preprocessor: + preprocessor = Preprocessor( + dequote=preprocessor.dequote, + line_break_handling=preprocessor.line_break_handling, + line_break_repl=preprocessor.line_break_repl, + strip_str=preprocessor.strip_str, + is_escape_formula_injection=preprocessor.is_escape_formula_injection, + ) + else: + preprocessor = Preprocessor( + dequote=self.preprocessor.dequote, + line_break_handling=self.preprocessor.line_break_handling, + line_break_repl=self.preprocessor.line_break_repl, + strip_str=self.preprocessor.strip_str, + is_escape_formula_injection=self.__preprocessor.is_escape_formula_injection, + ) + + value_dp = DataProperty( + data, + preprocessor=preprocessor, + type_hint=(type_hint if type_hint is not None else self.default_type_hint), + float_type=self.float_type, + datetime_format_str=self.datetime_format_str, + strict_level_map=(strict_level_map if type_hint is not None else self.strict_level_map), + east_asian_ambiguous_width=self.east_asian_ambiguous_width, + ) + + return self.__dp_converter.convert(value_dp) + + def __to_dp_matrix_st(self, value_matrix: Sequence[Sequence[Any]]) -> DataPropertyMatrix: + return list( + zip( # type: ignore + *( + _to_dp_list_helper( + self, + col_idx, + values, + self.__get_col_type_hint(col_idx), + self.__preprocessor, + )[1] + for col_idx, values in enumerate(zip(*value_matrix)) + ) + ) + ) + + def __to_dp_matrix_mt(self, value_matrix: Sequence[Sequence[Any]]) -> DataPropertyMatrix: + from concurrent import futures + + col_data_map = {} + + with futures.ProcessPoolExecutor(self.max_workers) as executor: + future_list = [ + executor.submit( + _to_dp_list_helper, + self, + col_idx, + values, + self.__get_col_type_hint(col_idx), + self.__preprocessor, + ) + for col_idx, values in enumerate(zip(*value_matrix)) + ] + + for future in futures.as_completed(future_list): + col_idx, value_dp_list = future.result() + col_data_map[col_idx] = value_dp_list + + return list( + zip(*(col_data_map[col_idx] for col_idx in sorted(col_data_map))) # type: ignore + ) + + def _to_dp_list( + self, + data_list: Sequence[Any], + type_hint: TypeHint = None, + preprocessor: Optional[Preprocessor] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> list[DataProperty]: + if is_empty_sequence(data_list): + return [] + + type_counter: typing.Counter[type[AbstractType]] = Counter() + + dp_list = [] + for data in data_list: + expect_type_hint: TypeHint = type_hint + if type_hint is None: + try: + expect_type_hint, _count = type_counter.most_common(1)[0] + if not expect_type_hint( + data, float_type=self.float_type, strict_level=StrictLevel.MAX + ).is_type(): + expect_type_hint = None + except IndexError: + pass + + dataprop = self.__to_dp( + data=data, + type_hint=expect_type_hint, + preprocessor=preprocessor if preprocessor else self.__preprocessor, + strict_level_map=strict_level_map, + ) + type_counter[dataprop.type_class] += 1 + + dp_list.append(dataprop) + + return dp_list + + def __strip_data_matrix(self, data_matrix: Sequence[Sequence[Any]]) -> Sequence[Sequence[Any]]: + header_col_size = len(self.headers) if self.headers else 0 + try: + col_size_list = [len(data_list) for data_list in data_matrix] + except TypeError: + return [] + + if self.headers: + min_col_size = min([header_col_size] + col_size_list) + max_col_size = max([header_col_size] + col_size_list) + elif col_size_list: + min_col_size = min(col_size_list) + max_col_size = max(col_size_list) + else: + min_col_size = 0 + max_col_size = 0 + + if self.matrix_formatting == MatrixFormatting.EXCEPTION: + if min_col_size != max_col_size: + raise ValueError( + "nonuniform column size found: min={}, max={}".format( + min_col_size, max_col_size + ) + ) + + return data_matrix + + if self.matrix_formatting == MatrixFormatting.HEADER_ALIGNED: + if header_col_size > 0: + format_col_size = header_col_size + else: + format_col_size = max_col_size + elif self.matrix_formatting == MatrixFormatting.TRIM: + format_col_size = min_col_size + elif self.matrix_formatting == MatrixFormatting.FILL_NONE: + format_col_size = max_col_size + else: + raise ValueError(f"unknown matrix formatting: {self.matrix_formatting}") + + return [ + list(data_matrix[row_idx][:format_col_size]) + [None] * (format_col_size - col_size) + for row_idx, col_size in enumerate(col_size_list) + ] + + def __get_col_dp_list_base(self) -> list[ColumnDataProperty]: + header_dp_list = self.to_header_dp_list() + col_dp_list = [] + + for col_idx, header_dp in enumerate(header_dp_list): + col_dp = ColumnDataProperty( + column_index=col_idx, + float_type=self.float_type, + min_width=self.min_column_width, + format_flags=self.__get_format_flags(col_idx), + is_formatting_float=self.is_formatting_float, + datetime_format_str=self.datetime_format_str, + east_asian_ambiguous_width=self.east_asian_ambiguous_width, + max_precision=self.__max_precision, + ) + col_dp.update_header(header_dp) + col_dp_list.append(col_dp) + + return col_dp_list + + def __update_dp_converter(self) -> None: + preprocessor = Preprocessor( + line_break_handling=self.__preprocessor.line_break_handling, + line_break_repl=self.preprocessor.line_break_repl, + is_escape_html_tag=self.__preprocessor.is_escape_html_tag, + is_escape_formula_injection=self.__preprocessor.is_escape_formula_injection, + ) + self.__dp_converter = DataPropertyConverter( + preprocessor=preprocessor, + type_value_map=self.type_value_map, + quoting_flags=self.quoting_flags, + datetime_formatter=self.datetime_formatter, + datetime_format_str=self.datetime_format_str, + float_type=self.float_type, + strict_level_map=self.strict_level_map, + ) + + +def _to_dp_list_helper( + extractor: DataPropertyExtractor, + col_idx: int, + data_list: Sequence[Any], + type_hint: TypeHint, + preprocessor: Preprocessor, +) -> tuple[int, list[DataProperty]]: + return ( + col_idx, + extractor._to_dp_list(data_list, type_hint=type_hint, preprocessor=preprocessor), + ) diff --git a/venv/lib/python3.10/site-packages/dataproperty/_formatter.py b/venv/lib/python3.10/site-packages/dataproperty/_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..088edded65da9d7a0f04adac7927ebb6cf72504d --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_formatter.py @@ -0,0 +1,98 @@ +import copy +from decimal import Decimal +from typing import Final, Optional, Union + +from typepy import Nan, Typecode + + +DecimalPlaces = Union[float, Decimal] + + +class Format: + NONE: Final = 0 + THOUSAND_SEPARATOR: Final = 1 + + +class Formatter: + __slots__ = ("__is_formatting_float", "__format_flags", "__datetime_format_str") + + _BLANK_CURLY_BRACES_FORMAT_MAP: Final[dict[Typecode, str]] = { + Typecode.NONE: "{}", + Typecode.IP_ADDRESS: "{}", + Typecode.BOOL: "{}", + Typecode.DICTIONARY: "{}", + Typecode.LIST: "{}", + } + + def __init__( + self, + datetime_format_str: str, + is_formatting_float: Optional[bool] = True, + format_flags: Optional[int] = None, + ) -> None: + if format_flags is not None: + self.__format_flags = format_flags + else: + self.__format_flags = Format.NONE + + self.__datetime_format_str = datetime_format_str + self.__is_formatting_float = is_formatting_float + + def make_format_map( + self, decimal_places: Optional[DecimalPlaces] = None + ) -> dict[Typecode, str]: + format_map = copy.copy(self._BLANK_CURLY_BRACES_FORMAT_MAP) + format_map.update( + { + Typecode.INTEGER: self.make_format_str(Typecode.INTEGER), + Typecode.REAL_NUMBER: self.make_format_str(Typecode.REAL_NUMBER, decimal_places), + Typecode.INFINITY: self.make_format_str(Typecode.INFINITY), + Typecode.NAN: self.make_format_str(Typecode.NAN), + Typecode.DATETIME: self.make_format_str(Typecode.DATETIME), + } + ) + + return format_map + + def make_format_str( + self, typecode: Typecode, decimal_places: Optional[DecimalPlaces] = None + ) -> str: + format_str = self._BLANK_CURLY_BRACES_FORMAT_MAP.get(typecode) + if format_str is not None: + return format_str + + if typecode == Typecode.INTEGER: + return self.__get_integer_format() + + if typecode in (Typecode.REAL_NUMBER, Typecode.INFINITY, Typecode.NAN): + return self.__get_realnumber_format(decimal_places) + + if typecode == Typecode.DATETIME: + return "{:" + self.__datetime_format_str + "}" + + return "{:s}" + + def __get_base_format_str(self) -> str: + if self.__format_flags & Format.THOUSAND_SEPARATOR: + return "," + + return "" + + def __get_integer_format(self) -> str: + return "{:" + self.__get_base_format_str() + "d}" + + def __get_realnumber_format(self, decimal_places: Optional[DecimalPlaces]) -> str: + if not self.__is_formatting_float: + return "{}" + + base_format = self.__get_base_format_str() + + if decimal_places is None or Nan(decimal_places).is_type(): + return "{:" + base_format + "f}" + + try: + return "{:" + f"{base_format:s}.{decimal_places:d}f" + "}" + except ValueError: + pass + + return "{:" + base_format + "f}" diff --git a/venv/lib/python3.10/site-packages/dataproperty/_function.py b/venv/lib/python3.10/site-packages/dataproperty/_function.py new file mode 100644 index 0000000000000000000000000000000000000000..40d3fa4d4138581c278c89194efef826784c7ea2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_function.py @@ -0,0 +1,116 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import decimal +import re +from decimal import Decimal +from typing import Any, Final, Optional, Union + +from typepy import Integer, RealNumber, TypeConversionError + + +_ansi_escape: Final = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]", re.IGNORECASE) + + +def get_integer_digit(value: Any) -> int: + float_type: Final = RealNumber(value) + + with decimal.localcontext() as ctx: + ctx.prec = 60 + ctx.rounding = decimal.ROUND_HALF_DOWN + + try: + abs_value = abs(float_type.convert()) + except TypeConversionError: + try: + abs_value = abs(Integer(value).convert()) + except TypeConversionError: + raise ValueError( + f"the value must be a number: value='{value}' type='{type(value)}'" + ) + + return len(str(abs_value)) + + if abs_value.is_zero(): + return 1 + + try: + return len(str(abs_value.quantize(Decimal("1."), rounding=decimal.ROUND_DOWN))) + except decimal.InvalidOperation: + return len(str(abs_value)) + + +class DigitCalculator: + REGEXP_COMMON_LOG: Final = re.compile(r"[\d\.]+[eE]\-\d+") + REGEXP_SPLIT: Final = re.compile(r"[eE]\-") + + def get_decimal_places(self, value: Union[str, float, int, Decimal]) -> int: + if Integer(value).is_type(): + return 0 + + float_digit_len = 0 + abs_value = abs(float(value)) + text_value = str(abs_value) + float_text = "0" + if text_value.find(".") != -1: + float_text = text_value.split(".")[1] + float_digit_len = len(float_text) + elif self.REGEXP_COMMON_LOG.search(text_value): + float_text = self.REGEXP_SPLIT.split(text_value)[1] + float_digit_len = int(float_text) + + return float_digit_len + + +_digit_calculator = DigitCalculator() + + +def get_number_of_digit( + value: Any, max_decimal_places: int = 99 +) -> tuple[Optional[int], Optional[int]]: + try: + integer_digits = get_integer_digit(value) + except (ValueError, TypeError, OverflowError): + return (None, None) + + try: + decimal_places: Optional[int] = min( + _digit_calculator.get_decimal_places(value), max_decimal_places + ) + except (ValueError, TypeError): + decimal_places = None + + return (integer_digits, decimal_places) + + +def _validate_eaaw(east_asian_ambiguous_width: int) -> None: + if east_asian_ambiguous_width in (1, 2): + return + + raise ValueError( + "invalid east_asian_ambiguous_width: expected=1 or 2, actual={}".format( + east_asian_ambiguous_width + ) + ) + + +def strip_ansi_escape(unicode_str: str) -> str: + return _ansi_escape.sub("", unicode_str) + + +def calc_ascii_char_width(unicode_str: str, east_asian_ambiguous_width: int = 1) -> int: + import unicodedata + + width = 0 + for char in unicode_str: + char_width = unicodedata.east_asian_width(char) + if char_width in "WF": + width += 2 + elif char_width == "A": + _validate_eaaw(east_asian_ambiguous_width) + width += east_asian_ambiguous_width + else: + width += 1 + + return width diff --git a/venv/lib/python3.10/site-packages/dataproperty/_interface.py b/venv/lib/python3.10/site-packages/dataproperty/_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..bab43415753cf5029310fda35ad6cae661e9b153 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_interface.py @@ -0,0 +1,34 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +from typing import Optional + +from typepy import Typecode + +from ._align import Align + + +class DataPeropertyInterface(metaclass=abc.ABCMeta): + __slots__ = () + + @property + @abc.abstractmethod + def align(self) -> Align: # pragma: no cover + pass + + @property + @abc.abstractmethod + def decimal_places(self) -> Optional[int]: # pragma: no cover + pass + + @property + @abc.abstractmethod + def typecode(self) -> Typecode: # pragma: no cover + pass + + @property + @abc.abstractmethod + def typename(self) -> str: # pragma: no cover + pass diff --git a/venv/lib/python3.10/site-packages/dataproperty/_line_break.py b/venv/lib/python3.10/site-packages/dataproperty/_line_break.py new file mode 100644 index 0000000000000000000000000000000000000000..e98d3e830b8174a7f784067d28cc74ee0d92f8f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_line_break.py @@ -0,0 +1,8 @@ +from enum import Enum, unique + + +@unique +class LineBreakHandling(Enum): + NOP = 0 + REPLACE = 1 + ESCAPE = 2 diff --git a/venv/lib/python3.10/site-packages/dataproperty/_preprocessor.py b/venv/lib/python3.10/site-packages/dataproperty/_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..784941375501f6dd8895c1eb3da19f75b83fa229 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_preprocessor.py @@ -0,0 +1,173 @@ +import html +import re +from typing import Any, Final, Optional, Union + +from mbstrdecoder import MultiByteStrDecoder + +from ._function import strip_ansi_escape +from ._line_break import LineBreakHandling + + +_RE_LINE_BREAK: Final = re.compile(r"\r\n|\n") +_RE_FORMULA_PREFIX: Final = re.compile(r"^[-\+=@]") + + +def normalize_lbh(value: Optional[LineBreakHandling]) -> LineBreakHandling: + if isinstance(value, LineBreakHandling): + return value + + if value is None: + return LineBreakHandling.NOP + + return LineBreakHandling[value.upper()] # type: ignore + + +class Preprocessor: + @property + def line_break_handling(self) -> Optional[LineBreakHandling]: + return self.__line_break_handling + + @line_break_handling.setter + def line_break_handling(self, value: Optional[LineBreakHandling]) -> None: + self.__line_break_handling = normalize_lbh(value) + + def __init__( + self, + strip_str: Optional[Union[str, bytes]] = None, + replace_tabs_with_spaces: bool = True, + tab_length: int = 2, + line_break_handling: Optional[LineBreakHandling] = None, + line_break_repl: str = " ", + dequote: bool = False, + is_escape_html_tag: bool = False, + is_escape_formula_injection: bool = False, + ) -> None: + self.strip_str = strip_str + self.replace_tabs_with_spaces = replace_tabs_with_spaces + self.tab_length = tab_length + self.line_break_handling = line_break_handling + self.line_break_repl = line_break_repl + self.dequote = dequote + self.is_escape_html_tag = is_escape_html_tag + self.is_escape_formula_injection = is_escape_formula_injection + + def __repr__(self) -> str: + return ", ".join( + [ + f"strip_str={self.strip_str!r}", + f"replace_tabs_with_spaces={self.replace_tabs_with_spaces}", + f"tab_length={self.tab_length}", + f"line_break_handling={self.line_break_handling}", + f"line_break_repl={self.line_break_repl}", + f"escape_html_tag={self.is_escape_html_tag}", + f"escape_formula_injection={self.is_escape_formula_injection}", + ] + ) + + def preprocess(self, data: Any) -> tuple: + data, no_ansi_escape_data = self.__preprocess_string( + self.__preprocess_data(data, self.strip_str), + ) + return (data, no_ansi_escape_data) + + def update(self, **kwargs: Any) -> bool: + is_updated = False + + for key, value in kwargs.items(): + if not hasattr(self, key): + continue + + if getattr(self, key) == value: + continue + + setattr(self, key, value) + is_updated = True + + return is_updated + + def __preprocess_string(self, raw_data: Any) -> tuple[Any, Optional[str]]: + data = raw_data + + if not isinstance(data, str): + return (data, None) + + if self.replace_tabs_with_spaces: + try: + data = data.replace("\t", " " * self.tab_length) + except (TypeError, AttributeError, ValueError): + pass + + if self.is_escape_html_tag: + try: + data = html.escape(data) + except AttributeError: + return (data, None) + + data = self.__process_line_break(data) + data = self.__escape_formula_injection(data) + data = self.__dequote(data) + + try: + return (data, strip_ansi_escape(data)) + except TypeError: + return (data, None) + + @staticmethod + def __preprocess_data(data: Any, strip_str: Optional[Union[str, bytes]]) -> Any: + if strip_str is None: + return data + + try: + return data.strip(strip_str) + except AttributeError: + return data + except UnicodeDecodeError: + return MultiByteStrDecoder(data).unicode_str.strip(str(strip_str)) + except TypeError: + # reach here when data and strip_str type are different + if isinstance(data, bytes): + return MultiByteStrDecoder(data).unicode_str.strip(str(strip_str)) + elif isinstance(strip_str, bytes): + return data.strip(MultiByteStrDecoder(strip_str).unicode_str) + + def __dequote(self, s: str) -> str: + if not self.dequote or not s: + return s + + try: + if (s[0] == s[-1]) and s.startswith(("'", '"')): + if s.count(s[0]) == 2: + return s[1:-1] + except TypeError: + pass + + return s + + def __process_line_break(self, data: str) -> str: + lbh = self.line_break_handling + + if lbh == LineBreakHandling.NOP: + return data + + try: + if lbh == LineBreakHandling.REPLACE: + return _RE_LINE_BREAK.sub(self.line_break_repl, data) + + if lbh == LineBreakHandling.ESCAPE: + return data.replace("\n", "\\n").replace("\r", "\\r") + except (TypeError, AttributeError): + return data + + raise ValueError(f"unexpected line_break_handling: {lbh}") + + def __escape_formula_injection(self, data: str) -> str: + if not self.is_escape_formula_injection: + return data + + try: + if _RE_FORMULA_PREFIX.search(data): + return "'" + data + except (TypeError, AttributeError): + return data + + return data diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__init__.py b/venv/lib/python3.10/site-packages/dataproperty/logger/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..af2ff6d54dfd57bdea24e51fc39722ae07b14491 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/logger/__init__.py @@ -0,0 +1,7 @@ +from ._logger import logger, set_logger # type: ignore + + +__all__ = ( + "logger", + "set_logger", +) diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bc037f38f1e7b433ff9e945dcb63181b4aab440 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d246f1b5b116742d436db482dd5cae253d455a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c02ff8aec07245700fa19fa22f04d29905e71fd0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/_logger.py b/venv/lib/python3.10/site-packages/dataproperty/logger/_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..4a828f2bcf3d9ecbcfd6c0aaeaf9227e4edd4f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/logger/_logger.py @@ -0,0 +1,22 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from ._null_logger import NullLogger + + +MODULE_NAME = "dataproperty" + +try: + from loguru import logger # type: ignore + + logger.disable(MODULE_NAME) +except ImportError: + logger = NullLogger() + + +def set_logger(is_enable: bool, propagation_depth: int = 1) -> None: + if is_enable: + logger.enable(MODULE_NAME) + else: + logger.disable(MODULE_NAME) diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py b/venv/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..ad0f293ce2a288057ea07297e21504c7669edfd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py @@ -0,0 +1,41 @@ +class NullLogger: + level_name = None + + def remove(self, handler_id=None): # pragma: no cover + pass + + def add(self, sink, **kwargs): # pragma: no cover + pass + + def disable(self, name): # pragma: no cover + pass + + def enable(self, name): # pragma: no cover + pass + + def critical(self, __message, *args, **kwargs): # pragma: no cover + pass + + def debug(self, __message, *args, **kwargs): # pragma: no cover + pass + + def error(self, __message, *args, **kwargs): # pragma: no cover + pass + + def exception(self, __message, *args, **kwargs): # pragma: no cover + pass + + def info(self, __message, *args, **kwargs): # pragma: no cover + pass + + def log(self, __level, __message, *args, **kwargs): # pragma: no cover + pass + + def success(self, __message, *args, **kwargs): # pragma: no cover + pass + + def trace(self, __message, *args, **kwargs): # pragma: no cover + pass + + def warning(self, __message, *args, **kwargs): # pragma: no cover + pass diff --git a/venv/lib/python3.10/site-packages/dataproperty/py.typed b/venv/lib/python3.10/site-packages/dataproperty/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/dataproperty/typing.py b/venv/lib/python3.10/site-packages/dataproperty/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..ff3ab01cdcd2ee06713dad8a2df5ac6aa81f0eb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/typing.py @@ -0,0 +1,63 @@ +from collections.abc import Mapping +from datetime import datetime +from decimal import Decimal +from typing import Any, Callable, Final, Optional, Union + +from typepy import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + List, + Nan, + NoneType, + NullString, + RealNumber, + String, + Typecode, +) +from typepy.type import AbstractType + + +TypeHint = Optional[type[AbstractType]] +TransFunc = Callable[[Any], Any] +DateTimeFormatter = Callable[[datetime], str] + +FloatType = Union[type[Decimal], type[float]] +StrictLevelMap = Mapping[Union[str, Typecode], int] +TypeValueMap = dict[Typecode, Union[float, str, Decimal, None]] + +_type_hint_map: Final = { + # high frequently used types + "int": Integer, + "float": RealNumber, + "realnumber": RealNumber, + "str": String, + # low frequently used types + "bool": Bool, + "datetime": DateTime, + "dict": Dictionary, + "inf": Infinity, + "ip": IpAddress, + "list": List, + "nan": Nan, + "none": NoneType, + "nullstr": NullString, +} + + +def normalize_type_hint(type_hint: Union[str, TypeHint]) -> TypeHint: + if not type_hint: + return None + + if not isinstance(type_hint, str): + return type_hint + + type_hint = type_hint.strip().casefold() + for key, value in _type_hint_map.items(): + if type_hint.startswith(key): + return value + + raise ValueError(f"unknown typehint: {type_hint}") diff --git a/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/LICENSE b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/METADATA b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..74ad1dacefb3d189b57a46a0c48733212b3b96a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/METADATA @@ -0,0 +1,209 @@ +Metadata-Version: 2.1 +Name: evaluate +Version: 0.4.5 +Summary: HuggingFace community-driven open-source library of evaluation +Home-page: https://github.com/huggingface/evaluate +Author: HuggingFace Inc. +Author-email: leandro@huggingface.co +License: Apache 2.0 +Download-URL: https://github.com/huggingface/evaluate/tags +Keywords: metrics machine learning evaluate evaluation +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: datasets>=2.0.0 +Requires-Dist: numpy>=1.17 +Requires-Dist: dill +Requires-Dist: pandas +Requires-Dist: requests>=2.19.0 +Requires-Dist: tqdm>=4.62.1 +Requires-Dist: xxhash +Requires-Dist: multiprocess +Requires-Dist: fsspec[http]>=2021.05.0 +Requires-Dist: huggingface-hub>=0.7.0 +Requires-Dist: packaging +Requires-Dist: importlib-metadata; python_version < "3.8" +Provides-Extra: dev +Requires-Dist: absl-py; extra == "dev" +Requires-Dist: charcut>=1.1.1; extra == "dev" +Requires-Dist: cer>=1.2.0; extra == "dev" +Requires-Dist: nltk; extra == "dev" +Requires-Dist: pytest; extra == "dev" +Requires-Dist: pytest-datadir; extra == "dev" +Requires-Dist: pytest-xdist; extra == "dev" +Requires-Dist: numpy<2.0.0; extra == "dev" +Requires-Dist: tensorflow!=2.6.0,!=2.6.1,<=2.10,>=2.3; extra == "dev" +Requires-Dist: torch; extra == "dev" +Requires-Dist: accelerate; extra == "dev" +Requires-Dist: bert-score>=0.3.6; extra == "dev" +Requires-Dist: rouge-score>=0.1.2; extra == "dev" +Requires-Dist: sacrebleu; extra == "dev" +Requires-Dist: sacremoses; extra == "dev" +Requires-Dist: scipy>=1.10.0; extra == "dev" +Requires-Dist: seqeval; extra == "dev" +Requires-Dist: scikit-learn; extra == "dev" +Requires-Dist: jiwer; extra == "dev" +Requires-Dist: sentencepiece; extra == "dev" +Requires-Dist: transformers; extra == "dev" +Requires-Dist: mauve-text; extra == "dev" +Requires-Dist: trectools; extra == "dev" +Requires-Dist: toml>=0.10.1; extra == "dev" +Requires-Dist: requests-file>=1.5.1; extra == "dev" +Requires-Dist: tldextract>=3.1.0; extra == "dev" +Requires-Dist: texttable>=1.6.3; extra == "dev" +Requires-Dist: unidecode>=1.3.4; extra == "dev" +Requires-Dist: Werkzeug>=1.0.1; extra == "dev" +Requires-Dist: six~=1.15.0; extra == "dev" +Requires-Dist: black~=22.0; extra == "dev" +Requires-Dist: flake8>=3.8.3; extra == "dev" +Requires-Dist: isort>=5.0.0; extra == "dev" +Requires-Dist: pyyaml>=5.3.1; extra == "dev" +Provides-Extra: docs +Requires-Dist: s3fs; extra == "docs" +Provides-Extra: evaluator +Requires-Dist: transformers; extra == "evaluator" +Requires-Dist: scipy>=1.7.1; extra == "evaluator" +Provides-Extra: quality +Requires-Dist: black~=22.0; extra == "quality" +Requires-Dist: flake8>=3.8.3; extra == "quality" +Requires-Dist: isort>=5.0.0; extra == "quality" +Requires-Dist: pyyaml>=5.3.1; extra == "quality" +Provides-Extra: template +Requires-Dist: cookiecutter; extra == "template" +Requires-Dist: gradio>=3.0.0; extra == "template" +Provides-Extra: tensorflow +Requires-Dist: tensorflow!=2.6.0,!=2.6.1,>=2.2.0; extra == "tensorflow" +Provides-Extra: tensorflow_gpu +Requires-Dist: tensorflow-gpu!=2.6.0,!=2.6.1,>=2.2.0; extra == "tensorflow-gpu" +Provides-Extra: tests +Requires-Dist: absl-py; extra == "tests" +Requires-Dist: charcut>=1.1.1; extra == "tests" +Requires-Dist: cer>=1.2.0; extra == "tests" +Requires-Dist: nltk; extra == "tests" +Requires-Dist: pytest; extra == "tests" +Requires-Dist: pytest-datadir; extra == "tests" +Requires-Dist: pytest-xdist; extra == "tests" +Requires-Dist: numpy<2.0.0; extra == "tests" +Requires-Dist: tensorflow!=2.6.0,!=2.6.1,<=2.10,>=2.3; extra == "tests" +Requires-Dist: torch; extra == "tests" +Requires-Dist: accelerate; extra == "tests" +Requires-Dist: bert-score>=0.3.6; extra == "tests" +Requires-Dist: rouge-score>=0.1.2; extra == "tests" +Requires-Dist: sacrebleu; extra == "tests" +Requires-Dist: sacremoses; extra == "tests" +Requires-Dist: scipy>=1.10.0; extra == "tests" +Requires-Dist: seqeval; extra == "tests" +Requires-Dist: scikit-learn; extra == "tests" +Requires-Dist: jiwer; extra == "tests" +Requires-Dist: sentencepiece; extra == "tests" +Requires-Dist: transformers; extra == "tests" +Requires-Dist: mauve-text; extra == "tests" +Requires-Dist: trectools; extra == "tests" +Requires-Dist: toml>=0.10.1; extra == "tests" +Requires-Dist: requests-file>=1.5.1; extra == "tests" +Requires-Dist: tldextract>=3.1.0; extra == "tests" +Requires-Dist: texttable>=1.6.3; extra == "tests" +Requires-Dist: unidecode>=1.3.4; extra == "tests" +Requires-Dist: Werkzeug>=1.0.1; extra == "tests" +Requires-Dist: six~=1.15.0; extra == "tests" +Provides-Extra: torch +Requires-Dist: torch; extra == "torch" + +

    +
    + +
    +

    + +

    + + Build + + + GitHub + + + Documentation + + + GitHub release + + + Contributor Covenant + +

    + + + +> **Tip:** For more recent evaluation approaches, for example for evaluating LLMs, we recommend our newer and more actively maintained library [LightEval](https://github.com/huggingface/lighteval). + + + +🤗 Evaluate is a library that makes evaluating and comparing models and reporting their performance easier and more standardized. + +It currently contains: + +- **implementations of dozens of popular metrics**: the existing metrics cover a variety of tasks spanning from NLP to Computer Vision, and include dataset-specific metrics for datasets. With a simple command like `accuracy = load("accuracy")`, get any of these metrics ready to use for evaluating a ML model in any framework (Numpy/Pandas/PyTorch/TensorFlow/JAX). +- **comparisons and measurements**: comparisons are used to measure the difference between models and measurements are tools to evaluate datasets. +- **an easy way of adding new evaluation modules to the 🤗 Hub**: you can create new evaluation modules and push them to a dedicated Space in the 🤗 Hub with `evaluate-cli create [metric name]`, which allows you to see easily compare different metrics and their outputs for the same sets of references and predictions. + +[🎓 **Documentation**](https://huggingface.co/docs/evaluate/) + +🔎 **Find a [metric](https://huggingface.co/evaluate-metric), [comparison](https://huggingface.co/evaluate-comparison), [measurement](https://huggingface.co/evaluate-measurement) on the Hub** + +[🌟 **Add a new evaluation module**](https://huggingface.co/docs/evaluate/) + +🤗 Evaluate also has lots of useful features like: + +- **Type checking**: the input types are checked to make sure that you are using the right input formats for each metric +- **Metric cards**: each metrics comes with a card that describes the values, limitations and their ranges, as well as providing examples of their usage and usefulness. +- **Community metrics:** Metrics live on the Hugging Face Hub and you can easily add your own metrics for your project or to collaborate with others. + + +# Installation + +## With pip + +🤗 Evaluate can be installed from PyPi and has to be installed in a virtual environment (venv or conda for instance) + +```bash +pip install evaluate +``` + +# Usage + +🤗 Evaluate's main methods are: + +- `evaluate.list_evaluation_modules()` to list the available metrics, comparisons and measurements +- `evaluate.load(module_name, **kwargs)` to instantiate an evaluation module +- `results = module.compute(*kwargs)` to compute the result of an evaluation module + +# Adding a new evaluation module + +First install the necessary dependencies to create a new metric with the following command: +```bash +pip install evaluate[template] +``` +Then you can get started with the following command which will create a new folder for your metric and display the necessary steps: +```bash +evaluate-cli create "Awesome Metric" +``` +See this [step-by-step guide](https://huggingface.co/docs/evaluate/creating_and_sharing) in the documentation for detailed instructions. + +## Credits + +Thanks to [@marella](https://github.com/marella) for letting us use the `evaluate` namespace on PyPi previously used by his [library](https://github.com/marella/evaluate). + + diff --git a/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/RECORD b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..599587025f77f17cc9a25fe3b1105b32614af9be --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/RECORD @@ -0,0 +1,64 @@ +../../../bin/evaluate-cli,sha256=ODUWPN9FXCyaWRjbll5LNTU3vxDeeuzDO8WUxPaglWY,299 +evaluate-0.4.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +evaluate-0.4.5.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +evaluate-0.4.5.dist-info/METADATA,sha256=czq9HNbPJ-cWEMNUnPgySh5v815I1plnh4rQDhXmXeQ,9521 +evaluate-0.4.5.dist-info/RECORD,, +evaluate-0.4.5.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92 +evaluate-0.4.5.dist-info/entry_points.txt,sha256=m2P3heof0lsg47nq6tYW_yUtxTfimd3RuD26Yk8KMkM,70 +evaluate-0.4.5.dist-info/top_level.txt,sha256=wBEoxird-u8p4OKDwq5z9rlfH-ybeez8rjaKNLNJ3B0,9 +evaluate/__init__.py,sha256=pASJAEDB0v9_quEpedIkfFCVzsyl1igrbcm3Mw0Nk6Y,1754 +evaluate/__pycache__/__init__.cpython-310.pyc,, +evaluate/__pycache__/config.cpython-310.pyc,, +evaluate/__pycache__/hub.cpython-310.pyc,, +evaluate/__pycache__/info.cpython-310.pyc,, +evaluate/__pycache__/inspect.cpython-310.pyc,, +evaluate/__pycache__/loading.cpython-310.pyc,, +evaluate/__pycache__/module.cpython-310.pyc,, +evaluate/__pycache__/naming.cpython-310.pyc,, +evaluate/__pycache__/saving.cpython-310.pyc,, +evaluate/__pycache__/visualization.cpython-310.pyc,, +evaluate/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +evaluate/commands/__pycache__/__init__.cpython-310.pyc,, +evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc,, +evaluate/commands/evaluate_cli.py,sha256=w7GWb48JPjoC0BX7Jn12qtxQUBYOlZNhdg4YegA93Fw,4491 +evaluate/config.py,sha256=g4g-S6hVAw0Ys9As7gKaFP66pZeh8hoJJ5GEXaLSWV8,6648 +evaluate/evaluation_suite/__init__.py,sha256=TjcFihBDf_ZQAoIjSXPEC0iFBeEC_LFqCfXKbrkyhWs,4941 +evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc,, +evaluate/evaluator/__init__.py,sha256=JoWqRP-qCgNzDre6nO8zpJ2Iyp0eUkN7eDKPOPUXz2g,5788 +evaluate/evaluator/__pycache__/__init__.cpython-310.pyc,, +evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc,, +evaluate/evaluator/__pycache__/base.cpython-310.pyc,, +evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc,, +evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/utils.cpython-310.pyc,, +evaluate/evaluator/audio_classification.py,sha256=v5myOnm0PN8BWVnm4nWCzcyklaLtdnbOS3EJ09TPFhg,5804 +evaluate/evaluator/automatic_speech_recognition.py,sha256=jOveYJXsH-t5SzGe7FzXhnHeDKFhqWZUtK3S1l9XYus,4392 +evaluate/evaluator/base.py,sha256=--M302w8Bea6u6iYCc9dGFZL1wDIRGd7uUorhcmUAus,22881 +evaluate/evaluator/image_classification.py,sha256=RJ7NUS91hjZkr5JqhqtYsr5dxBkChA3Qim6An8fHT50,4751 +evaluate/evaluator/question_answering.py,sha256=ArF5BKfE9J9uC-q1GQwbvkAHw1ThgA997ERKmPS-Z4g,9566 +evaluate/evaluator/text2text_generation.py,sha256=M2itKYfIz9z_9J-Y7sXyx4HKMhQbdYwbv8oThSw8Yzw,9676 +evaluate/evaluator/text_classification.py,sha256=g1MUwa3TCUCUBGvZDmdeJ_l8BAOgbn0Q0y4TDvep8Uk,6676 +evaluate/evaluator/text_generation.py,sha256=4ZnHweTUpvNZhaprewTPms__00I8Tnje586ZDCG_ZlU,2679 +evaluate/evaluator/token_classification.py,sha256=XMzteW1coN2e3KWmpWj-OGafj22pzMa7UiHylooirHk,11546 +evaluate/evaluator/utils.py,sha256=HDKdLWLHtfpP-Hhe9cf1TFVIRsmfNgLHifDcGYujKZs,2451 +evaluate/hub.py,sha256=ZX6VYZU0EkjTWmABuJ6Zg6oHXIT2dHkHy0u8RgyL9UQ,4550 +evaluate/info.py,sha256=l5gXfqHhj77-XvFhz57Mns-Ev-lNJsLxsyYPHPvSzj0,5490 +evaluate/inspect.py,sha256=vVSCLr7HWLxIpXzwpDPuiE5XwiP5QQ82oGkdok7aO7o,4969 +evaluate/loading.py,sha256=P5MjZvrGHRgOE6jVPnyCNWOpbY-iPz_kLIydZjiNT7Q,35219 +evaluate/module.py,sha256=vMsLOskdsD6c_pU85AVo_kceg_r1RNGMFGAR6oZZuHM,46420 +evaluate/naming.py,sha256=Lpw8JmoJfiWs4xDUMEDzcIKO9Nw9RS2lzjeuUP-9acA,2827 +evaluate/saving.py,sha256=UoixNIHmWEceJREvGZlJNViVjRkgNf3MRflwnnhnNUA,2159 +evaluate/utils/__init__.py,sha256=kdFi2pVFSXm_y4EvvuQNnlPUkOPmGLNtc9YTfxAmdsI,1201 +evaluate/utils/__pycache__/__init__.cpython-310.pyc,, +evaluate/utils/__pycache__/file_utils.cpython-310.pyc,, +evaluate/utils/__pycache__/gradio.cpython-310.pyc,, +evaluate/utils/__pycache__/logging.cpython-310.pyc,, +evaluate/utils/file_utils.py,sha256=4jtbBhFfAjrHOIEwFcXaZ5H1bw-gCfVPTzfW5BE36Rk,22144 +evaluate/utils/gradio.py,sha256=UXGRxiPsJ41Xm5gGF7Jf_1vTOPopE_wDoBIyBS0S8d4,4434 +evaluate/utils/logging.py,sha256=nRy963i3_-H0Qcer6ETgnTFiJoQhojSiapeXQ9-eUyk,6698 +evaluate/visualization.py,sha256=m-mD6vxOIQ-_KXTues2tB4r7c4jdygBybHJeidP-jgw,9293 diff --git a/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/WHEEL b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..79d5c89a71989389294854aa34e329701325f8b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.45.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbb3e69c055695cd6dccee54dec460f319c3fd25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +evaluate-cli = evaluate.commands.evaluate_cli:main + diff --git a/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6adde7833123ecc7f347ea1f5492568989dfd45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate-0.4.5.dist-info/top_level.txt @@ -0,0 +1 @@ +evaluate diff --git a/venv/lib/python3.10/site-packages/evaluate/__init__.py b/venv/lib/python3.10/site-packages/evaluate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75ff0841fe52f8f59f33ab90fc60bd942b0f892c --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/__init__.py @@ -0,0 +1,51 @@ +# flake8: noqa +# Copyright 2020 The HuggingFace Evaluate Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +# pylint: enable=line-too-long +# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position + +__version__ = "0.4.5" + +from packaging import version + + +SCRIPTS_VERSION = "main" if version.parse(__version__).is_devrelease else __version__ + +del version + +from .evaluation_suite import EvaluationSuite +from .evaluator import ( + AudioClassificationEvaluator, + AutomaticSpeechRecognitionEvaluator, + Evaluator, + ImageClassificationEvaluator, + QuestionAnsweringEvaluator, + SummarizationEvaluator, + Text2TextGenerationEvaluator, + TextClassificationEvaluator, + TextGenerationEvaluator, + TokenClassificationEvaluator, + TranslationEvaluator, + evaluator, +) +from .hub import push_to_hub +from .info import ComparisonInfo, EvaluationModuleInfo, MeasurementInfo, MetricInfo +from .inspect import inspect_evaluation_module, list_evaluation_modules +from .loading import load +from .module import CombinedEvaluations, Comparison, EvaluationModule, Measurement, Metric, combine +from .saving import save +from .utils import * +from .utils import gradio, logging diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e145413d266abe7bdcf3517a945cca51501e2385 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9947e0f3457237a673fbdafa181e60f684a9e57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c885773cb86259ef3586ed4501fb54e116f3f70 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68982920a87394167019959740e0f24fb316171e Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8760ddee4b2d68f15b09236303302a877f5ee022 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39584cbd332e749fbcd8c5f4c42e45425f9a6116 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8c2292aa4359ec0341dc566d549a854bbd96301 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef6fce71a129cdc201db60e9d40a0b3e630cca4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b0c090b15155fd15300cd630faa59d4ef74f442 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ff90e5d8050f7f15cc64eb0f5aef00fd233fb80 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/commands/__init__.py b/venv/lib/python3.10/site-packages/evaluate/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc38d83df67a7145d17c754f6d4cfe4b40a01ec9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4ba8994e8efc8e86c7ba60179dbf5b2c3430f84 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py b/venv/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..80593c4dfa0f96c8d3ea5ff6131c13c0a94181eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py @@ -0,0 +1,137 @@ +import argparse +import os +import subprocess +from pathlib import Path + +from cookiecutter.main import cookiecutter +from huggingface_hub import HfApi, Repository, create_repo + +from evaluate.utils.logging import get_logger + + +logger = get_logger(__name__) + +INSTRUCTIONS = """\ +A new repository for your module "{module_name}" of type "{module_type}" has been created at {output_dir} and pushed to the Hugging Face Hub: {repo_url}. + +Here are the next steps: +- implement the module logic in {module_slug}/{module_slug}.py +- document your module in {module_slug}/README.md +- add test cases for your module in {module_slug}/tests.py +- if your module has any dependencies update them in {module_slug}/requirements.txt + +You can test your module's widget locally by running: + +``` +python {output_dir}/{module_slug}/app.py +``` + +When you are happy with your changes you can push your changes with the following commands to the Hugging Face Hub: + +``` +cd {output_dir}/{module_slug} +git add . +git commit -m "Updating module" +git push +``` + +You should then see the update widget on the Hugging Face Hub: {repo_url} +And you can load your module in Python with the following code: + +``` +from evaluate import load +module = load("{namespace}/{module_slug}") +``` +""" + + +def main(): + parser = argparse.ArgumentParser("HuggingFace Evaluate CLI tool", usage="evaluate-cli []") + subparsers = parser.add_subparsers() + parser_create = subparsers.add_parser("create", help="Create new evaluation module.") + parser_create.add_argument( + "module_name", type=str, help='Pretty name of new evaluation module, e.g. "Recall" or "Exact Match".' + ) + parser_create.add_argument( + "--module_type", + default="metric", + type=str, + help="Type of module, has to be one of [metric|comparison|measurement].", + ) + parser_create.add_argument( + "--dataset_name", default="", type=str, help="Name of dataset if evaluation module is dataset specific." + ) + parser_create.add_argument("--module_description", type=str, help="Short description of evaluation module.") + parser_create.add_argument("--output_dir", default=Path.cwd(), type=str, help="Path to output directory.") + parser_create.add_argument( + "--organization", default=None, type=str, help="Organization on the Hub to push evaluation module to." + ) + parser_create.add_argument("--private", action="store_true", help="Sets evaluation module repository to private.") + args = vars(parser.parse_args()) + + if args["module_type"] not in ["metric", "comparison", "measurement"]: + raise ValueError("The module_type needs to be one of metric, comparison, or measurement") + + if "-" in args["module_name"]: + raise ValueError("Hyphens ('-') are not allowed in module names.") + + output_dir = Path(args["output_dir"]) + organization = args["organization"] + module_slug = args["module_name"].lower().replace(" ", "_") + + if organization is None: + hfapi = HfApi() + namespace = hfapi.whoami()["name"] + else: + namespace = organization + args["namespace"] = namespace + repo_url = f"https://huggingface.co/spaces/{namespace}/{module_slug}" + + try: + create_repo(namespace + "/" + module_slug, repo_type="space", space_sdk="gradio", private=args["private"]) + except Exception as exception: + logger.error( + f"Could not create Space for module at hf.co/spaces/{namespace}/{module_slug}. Make sure this space does not exist already." + ) + raise exception + subprocess.run( + f"git clone {repo_url}".split(), + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + check=True, + encoding="utf-8", + cwd=output_dir, + env=os.environ.copy(), + ) + + repo = Repository( + local_dir=output_dir / module_slug, + ) + + cookiecutter( + "https://github.com/huggingface/evaluate/", + directory="templates", + no_input=True, + extra_context=args, + output_dir=output_dir, + overwrite_if_exists=True, + ) + + repo.git_add() + repo.git_commit("add module default template") + repo.git_push() + + print( + INSTRUCTIONS.format( + module_name=args["module_name"], + module_type=args["module_type"], + module_slug=module_slug, + namespace=namespace, + repo_url=repo_url, + output_dir=output_dir, + ) + ) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/evaluate/config.py b/venv/lib/python3.10/site-packages/evaluate/config.py new file mode 100644 index 0000000000000000000000000000000000000000..4909fa251ff82893d7c3c536bb111ae947735a8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/config.py @@ -0,0 +1,192 @@ +import importlib +import os +import platform +from pathlib import Path + +from packaging import version + +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +# Metrics +S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics" +CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric" +REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/metrics/{path}/{name}" +REPO_MEASUREMENTS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/measurements/{path}/{name}" +REPO_COMPARISONS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/comparisons/{path}/{name}" + +# Evaluation module types +EVALUATION_MODULE_TYPES = ["metric", "comparison", "measurement"] + +# Hub +HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") +HF_LIST_ENDPOINT = HF_ENDPOINT + "/api/spaces?filter={type}" +HUB_EVALUATE_URL = HF_ENDPOINT + "/spaces/{path}/resolve/{revision}/{name}" +HUB_DEFAULT_VERSION = "main" + +PY_VERSION = version.parse(platform.python_version()) + +if PY_VERSION < version.parse("3.8"): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + +# General environment variables accepted values for booleans +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + + +# Imports +PANDAS_VERSION = version.parse(importlib_metadata.version("pandas")) +PYARROW_VERSION = version.parse(importlib_metadata.version("pyarrow")) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_JAX", "AUTO").upper() + +TORCH_VERSION = "N/A" +TORCH_AVAILABLE = False + +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None + if TORCH_AVAILABLE: + try: + TORCH_VERSION = version.parse(importlib_metadata.version("torch")) + logger.info(f"PyTorch version {TORCH_VERSION} available.") + except importlib_metadata.PackageNotFoundError: + pass +else: + logger.info("Disabling PyTorch because USE_TF is set") + +TF_VERSION = "N/A" +TF_AVAILABLE = False + +if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None + if TF_AVAILABLE: + # For the metadata, we have to look for both tensorflow and tensorflow-cpu + for package in [ + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "intel-tensorflow", + "tensorflow-rocm", + "tensorflow-macos", + ]: + try: + TF_VERSION = version.parse(importlib_metadata.version(package)) + except importlib_metadata.PackageNotFoundError: + continue + else: + break + else: + TF_AVAILABLE = False + if TF_AVAILABLE: + if TF_VERSION.major < 2: + logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.") + TF_AVAILABLE = False + else: + logger.info(f"TensorFlow version {TF_VERSION} available.") +else: + logger.info("Disabling Tensorflow because USE_TORCH is set") + + +JAX_VERSION = "N/A" +JAX_AVAILABLE = False + +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + JAX_AVAILABLE = importlib.util.find_spec("jax") is not None + if JAX_AVAILABLE: + try: + JAX_VERSION = version.parse(importlib_metadata.version("jax")) + logger.info(f"JAX version {JAX_VERSION} available.") + except importlib_metadata.PackageNotFoundError: + pass +else: + logger.info("Disabling JAX because USE_JAX is set to False") + + +# Cache location +DEFAULT_XDG_CACHE_HOME = "~/.cache" +XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME) +DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface") +HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME)) + +DEFAULT_HF_EVALUATE_CACHE = os.path.join(HF_CACHE_HOME, "evaluate") +HF_EVALUATE_CACHE = Path(os.getenv("HF_EVALUATE_CACHE", DEFAULT_HF_EVALUATE_CACHE)) + +DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics") +HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE)) + +DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules") +HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE)) + +DOWNLOADED_DATASETS_DIR = "downloads" +DEFAULT_DOWNLOADED_EVALUATE_PATH = os.path.join(HF_EVALUATE_CACHE, DOWNLOADED_DATASETS_DIR) +DOWNLOADED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_EVALUATE_PATH", DEFAULT_DOWNLOADED_EVALUATE_PATH)) + +EXTRACTED_EVALUATE_DIR = "extracted" +DEFAULT_EXTRACTED_EVALUATE_PATH = os.path.join(DEFAULT_DOWNLOADED_EVALUATE_PATH, EXTRACTED_EVALUATE_DIR) +EXTRACTED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_EVALUATE_PATH", DEFAULT_EXTRACTED_EVALUATE_PATH)) + +# Download count for the website +HF_UPDATE_DOWNLOAD_COUNTS = ( + os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES +) + +# Offline mode +HF_EVALUATE_OFFLINE = os.environ.get("HF_EVALUATE_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES + + +# File names +LICENSE_FILENAME = "LICENSE" +METRIC_INFO_FILENAME = "metric_info.json" +DATASETDICT_JSON_FILENAME = "dataset_dict.json" + +MODULE_NAME_FOR_DYNAMIC_MODULES = "evaluate_modules" + +HF_HUB_ALLOWED_TASKS = [ + "image-classification", + "translation", + "image-segmentation", + "fill-mask", + "automatic-speech-recognition", + "token-classification", + "sentence-similarity", + "audio-classification", + "question-answering", + "summarization", + "zero-shot-classification", + "table-to-text", + "feature-extraction", + "other", + "multiple-choice", + "text-classification", + "text-to-image", + "text2text-generation", + "zero-shot-image-classification", + "tabular-classification", + "tabular-regression", + "image-to-image", + "tabular-to-text", + "unconditional-image-generation", + "text-retrieval", + "text-to-speech", + "object-detection", + "audio-to-audio", + "text-generation", + "conversational", + "table-question-answering", + "visual-question-answering", + "image-to-text", + "reinforcement-learning", + "voice-activity-detection", + "time-series-forecasting", + "document-question-answering", +] diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py b/venv/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a306d8068a6d66b60fbcc5420bf0cbb334c36305 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py @@ -0,0 +1,128 @@ +import importlib +import inspect +from dataclasses import dataclass +from pathlib import Path +from typing import Callable, Dict, Optional, Union + +from datasets import Dataset, DownloadConfig, DownloadMode, load_dataset +from datasets.utils.version import Version + +from ..evaluator import evaluator +from ..loading import evaluation_module_factory +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +@dataclass +class SubTask: + task_type: str + data: Optional[Union[str, Dataset]] = None + subset: Optional[str] = None + split: Optional[str] = None + data_preprocessor: Optional[Callable] = None + args_for_task: Optional[dict] = None + + def __post_init__(self): + if type(self.task_type) is not str: + raise ValueError(f"'task_type' must be type 'str', got {type(self.task_type)}") + if type(self.data) not in [Dataset, str]: + raise ValueError( + f"'data' must be an already-instantiated Dataset object or type 'str', got {type(self.data)}" + ) + if self.subset and type(self.subset) is not str: + raise ValueError(f"'subset' must be type 'str', got {type(self.subset)}") + if self.split and type(self.split) is not str: + raise ValueError(f"'split' must be type 'str', got {type(self.split)}") + if self.data_preprocessor and not callable(self.data_preprocessor): + raise ValueError(f"'data_preprocessor' must be a Callable', got {self.data_preprocessor}") + if self.args_for_task and type(self.args_for_task) is not dict: + raise ValueError(f"'args_for_task' must be type 'dict', got {type(self.args_for_task)}") + + +def import_main_class(module_path): + """Import a module at module_path and return the EvaluationSuite class""" + module = importlib.import_module(module_path) + + module_main_cls = None + for name, obj in module.__dict__.items(): + if isinstance(obj, type) and obj.__name__ == "Suite": + if inspect.isabstract(obj): + continue + module_main_cls = obj + break + + return module_main_cls + + +class EvaluationSuite: + """ + This class instantiates an evaluation suite made up of multiple tasks, where each task consists of a dataset and + an associated metric, and runs evaluation on a model or pipeline. Evaluation suites can be a Python script found + either locally or uploaded as a Space on the Hugging Face Hub. + Usage: + ```python + from evaluate import EvaluationSuite + suite = EvaluationSuite.load("evaluate/evaluation-suite-ci") + results = suite.run("lvwerra/distilbert-imdb") + ``` + """ + + def __init__(self, name): + self.name = name + + @staticmethod + def load( + path: str, + download_mode: Optional[DownloadMode] = None, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + ): + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + evaluation_module = evaluation_module_factory( + path, module_type=None, revision=revision, download_config=download_config, download_mode=download_mode + ) + name = Path(path).stem + evaluation_cls = import_main_class(evaluation_module.module_path) + evaluation_instance = evaluation_cls(name) + + return evaluation_instance + + def __repr__(self): + self.tasks = [str(task) for task in self.suite] + return f'EvaluationSuite name: "{self.name}", ' f"Tasks: {self.tasks})" + + def assert_suite_nonempty(self): + if not self.suite: + raise ValueError( + "No evaluation tasks found. The EvaluationSuite must include at least one SubTask definition." + ) + + def run( + self, model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"] # noqa: F821 + ) -> Dict[str, float]: + + self.assert_suite_nonempty() + + results_all = [] + for task in self.suite: + + task_name = task.data + + if task.data_preprocessor: # task requires extra preprocessing + ds = load_dataset(task.data, name=task.subset, split=task.split) + task.data = ds.map(task.data_preprocessor) + + task_evaluator = evaluator(task.task_type) + args_for_task = task.args_for_task + args_for_task["model_or_pipeline"] = model_or_pipeline + args_for_task["data"] = task.data + args_for_task["subset"] = task.subset + args_for_task["split"] = task.split + results = task_evaluator.compute(**args_for_task) + + results["task_name"] = task_name + "/" + task.subset if task.subset else task_name + results["data_preprocessor"] = str(task.data_preprocessor) if task.data_preprocessor is not None else None + results_all.append(results) + return results_all diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b5ba5a084738f47b01d9876f1cfee909374941 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__init__.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a2fe4be8a1332417fb8515f019c1b7e8c41a58bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/__init__.py @@ -0,0 +1,140 @@ +# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + from transformers.pipelines import SUPPORTED_TASKS as SUPPORTED_PIPELINE_TASKS + from transformers.pipelines import TASK_ALIASES + from transformers.pipelines import check_task as check_pipeline_task + + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + +from typing import Dict, List + +from .audio_classification import AudioClassificationEvaluator +from .automatic_speech_recognition import AutomaticSpeechRecognitionEvaluator +from .base import Evaluator +from .image_classification import ImageClassificationEvaluator +from .question_answering import QuestionAnsweringEvaluator +from .text2text_generation import SummarizationEvaluator, Text2TextGenerationEvaluator, TranslationEvaluator +from .text_classification import TextClassificationEvaluator +from .text_generation import TextGenerationEvaluator +from .token_classification import TokenClassificationEvaluator + + +SUPPORTED_EVALUATOR_TASKS = { + "text-classification": { + "implementation": TextClassificationEvaluator, + "default_metric_name": "accuracy", + }, + "image-classification": { + "implementation": ImageClassificationEvaluator, + "default_metric_name": "accuracy", + }, + "question-answering": { + "implementation": QuestionAnsweringEvaluator, + "default_metric_name": "squad", + }, + "token-classification": { + "implementation": TokenClassificationEvaluator, + "default_metric_name": "seqeval", + }, + "text-generation": { + "implementation": TextGenerationEvaluator, + "default_metric_name": "word_count", + }, + "text2text-generation": { + "implementation": Text2TextGenerationEvaluator, + "default_metric_name": "bleu", + }, + "summarization": { + "implementation": SummarizationEvaluator, + "default_metric_name": "rouge", + }, + "translation": { + "implementation": TranslationEvaluator, + "default_metric_name": "bleu", + }, + "automatic-speech-recognition": { + "implementation": AutomaticSpeechRecognitionEvaluator, + "default_metric_name": "wer", + }, + "audio-classification": { + "implementation": AudioClassificationEvaluator, + "default_metric_name": "accuracy", + }, +} + + +def get_supported_tasks() -> List[str]: + """ + Returns a list of supported task strings. + """ + return list(SUPPORTED_EVALUATOR_TASKS.keys()) + + +def check_task(task: str) -> Dict: + """ + Checks an incoming task string, to validate it's correct and returns the default Evaluator class and default metric + name. It first performs a check to validata that the string is a valid `Pipeline` task, then it checks if it's a + valid `Evaluator` task. `Evaluator` tasks are a substet of `Pipeline` tasks. + Args: + task (`str`): + The task defining which evaluator will be returned. Currently accepted tasks are: + - `"image-classification"` + - `"question-answering"` + - `"text-classification"` (alias `"sentiment-analysis"` available) + - `"token-classification"` + Returns: + task_defaults: `dict`, contains the implementasion class of a give Evaluator and the default metric name. + """ + if task in TASK_ALIASES: + task = TASK_ALIASES[task] + if not check_pipeline_task(task): + raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.") + if task in SUPPORTED_EVALUATOR_TASKS.keys() and task in SUPPORTED_PIPELINE_TASKS.keys(): + return SUPPORTED_EVALUATOR_TASKS[task] + raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.") + + +def evaluator(task: str = None) -> Evaluator: + """ + Utility factory method to build an [`Evaluator`]. + Evaluators encapsulate a task and a default metric name. They leverage `pipeline` functionality from `transformers` + to simplify the evaluation of multiple combinations of models, datasets and metrics for a given task. + Args: + task (`str`): + The task defining which evaluator will be returned. Currently accepted tasks are: + - `"image-classification"`: will return a [`ImageClassificationEvaluator`]. + - `"question-answering"`: will return a [`QuestionAnsweringEvaluator`]. + - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationEvaluator`]. + - `"token-classification"`: will return a [`TokenClassificationEvaluator`]. + Returns: + [`Evaluator`]: An evaluator suitable for the task. + Examples: + ```python + >>> from evaluate import evaluator + >>> # Sentiment analysis evaluator + >>> evaluator("sentiment-analysis") + ```""" + if not TRANSFORMERS_AVAILABLE: + raise ImportError( + "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[transformers]`." + ) + targeted_task = check_task(task) + evaluator_class = targeted_task["implementation"] + default_metric_name = targeted_task["default_metric_name"] + return evaluator_class(task=task, default_metric_name=default_metric_name) diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c88374bec296240012871ade1cb6c2002d7c47b Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6853a472a53e3d0d6848604dff20e9929abcc623 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a02f37b03d216c122131e90861288e5b2ad81e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da51ce1d61926bf699ebd74af4e5c2ee41635c77 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11896c12be66fec85804550242f9118b27b529c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de047ea3bf0bd1657167320116e915c37c916915 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d97a8c103acd426c3718aecf14e6365cdd39a59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76bfddc4209262be099cadc51d0fb21f2c1a9fe2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b512320d1d007a2c099e154a7b8acd800b250006 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b812d6582827f8110fc35ca43c29ad73edd4b136 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca24f841cd6c9a2d2fea92fc0cd63c5722322ef1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..685fb9fd8515f8506b89e9375948fea181f79a8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py @@ -0,0 +1,151 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from numbers import Number +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + + + + Remember that, in order to process audio files, you need ffmpeg installed (https://ffmpeg.org/download.html) + + + + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + + >>> task_evaluator = evaluator("audio-classification") + >>> data = load_dataset("superb", 'ks', split="test[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"", + >>> data=data, + >>> label_column="label", + >>> input_column="file", + >>> metric="accuracy", + >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"} + >>> ) + ``` + + + + The evaluator supports raw audio data as well, in the form of a numpy array. However, be aware that calling + the audio column automatically decodes and resamples the audio files, which can be slow for large datasets. + + + + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + + >>> task_evaluator = evaluator("audio-classification") + >>> data = load_dataset("superb", 'ks', split="test[:40]") + >>> data = data.map(lambda example: {"audio": example["audio"]["array"]}) + >>> results = task_evaluator.compute( + >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"", + >>> data=data, + >>> label_column="label", + >>> input_column="audio", + >>> metric="accuracy", + >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"} + >>> ) + ``` +""" + + +class AudioClassificationEvaluator(Evaluator): + """ + Audio classification evaluator. + This audio classification evaluator can currently be loaded from [`evaluator`] using the default task name + `audio-classification`. + Methods in this class assume a data format compatible with the [`transformers.AudioClassificationPipeline`]. + """ + + PIPELINE_KWARGS = {} + + def __init__(self, task="audio-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions] + pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label] + + return {"predictions": pred_label} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "file", + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Tuple[Dict[str, float], Any]: + + """ + input_column (`str`, defaults to `"file"`): + The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`): + We want to map class labels defined by the model in the pipeline to values consistent with those + defined in the `label_column` of the `data` dataset. + """ + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + label_mapping=label_mapping, + ) + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..ee423826cdd7bac384080b3db8a369cc59a53283 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py @@ -0,0 +1,112 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("automatic-speech-recognition") + >>> data = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="validation[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="https://huggingface.co/openai/whisper-tiny.en", + >>> data=data, + >>> input_column="path", + >>> label_column="sentence", + >>> metric="wer", + >>> ) + ``` +""" + + +class AutomaticSpeechRecognitionEvaluator(Evaluator): + """ + Automatic speech recognition evaluator. + This automatic speech recognition evaluator can currently be loaded from [`evaluator`] using the default task name + `automatic-speech-recognition`. + Methods in this class assume a data format compatible with the [`AutomaticSpeechRecognitionPipeline`]. + """ + + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="automatic-speech-recognition", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + return {"predictions": [pred["text"] for pred in predictions]} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "path", + label_column: str = "sentence", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + """ + input_column (`str`, defaults to `"path"`): + the name of the column containing the input audio path in the dataset specified by `data`. + label_column (`str`, defaults to `"sentence"`): + the name of the column containing the labels in the dataset specified by `data`. + generation_kwargs (`Dict`, *optional*, defaults to `None`): + The generation kwargs are passed to the pipeline and set the text generation strategy. + """ + + if generation_kwargs is not None: + self.PIPELINE_KWARGS.update(generation_kwargs) + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + ) + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/base.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/base.py new file mode 100644 index 0000000000000000000000000000000000000000..09de31f19dedda5dbe38ce0295d8e5b0d962f389 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/base.py @@ -0,0 +1,544 @@ +# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from numbers import Number +from typing import Any, Callable, Dict, List, Optional, Union + +# Lint as: python3 +from datasets import Dataset, load_dataset + +from evaluate.evaluator.utils import choose_split + + +try: + from scipy.stats import bootstrap + + SCIPY_AVAILABLE = True +except ImportError: + SCIPY_AVAILABLE = False + +try: + import transformers + from transformers import Pipeline, pipeline + + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + +from time import perf_counter + +from typing_extensions import Literal + +from ..loading import load +from ..module import EvaluationModule +from ..utils.logging import get_logger +from .utils import DatasetColumn + + +logger = get_logger(__name__) + + +EVALUTOR_COMPUTE_START_DOCSTRING = r""" + Compute the metric for a given pipeline and dataset combination. + Args: + model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`): + If the argument in not specified, we initialize the default pipeline for the task (in this case + `text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or + is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the + argument specifies a pre-initialized pipeline. + data (`str` or `Dataset`, defaults to `None`): + Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset + name, and load it. Otherwise we assume it represents a pre-loaded dataset. + subset (`str`, defaults to `None`): + Defines which dataset subset to load. If `None` is passed the default subset is loaded. + split (`str`, defaults to `None`): + Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function. + metric (`str` or `EvaluationModule`, defaults to `None`): + Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and + load it. Otherwise we assume it represents a pre-loaded metric. + tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`): + Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for + which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore + this argument. + strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"): + specifies the evaluation strategy. Possible values are: + - `"simple"` - we evaluate the metric and return the scores. + - `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each + of the returned metric keys, using `scipy`'s `bootstrap` method + https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html. + confidence_level (`float`, defaults to `0.95`): + The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. + n_resamples (`int`, defaults to `9999`): + The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. + device (`int`, defaults to `None`): + Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive + integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and + CUDA:0 used if available, CPU otherwise. + random_state (`int`, *optional*, defaults to `None`): + The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for + debugging. +""" + +EVALUATOR_COMPUTE_RETURN_DOCSTRING = r""" + Return: + A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the + `"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict` + containing the score, the confidence interval and the standard error calculated for each metric key. +""" + + +class Evaluator(ABC): + """ + The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across + different evaluators. + Base class implementing evaluator operations. + """ + + PIPELINE_KWARGS = {} + METRIC_KWARGS = {} + + def __init__(self, task: str, default_metric_name: str = None): + if not TRANSFORMERS_AVAILABLE: + raise ImportError( + "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`." + ) + if not SCIPY_AVAILABLE: + raise ImportError( + "If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`." + ) + self.task = task + self.default_metric_name = default_metric_name + + @staticmethod + def _compute_confidence_interval( + metric, + metric_inputs, + metric_keys: List[str], + confidence_level: float = 0.95, + n_resamples: int = 9999, + random_state: Optional[int] = None, + ) -> Dict[str, Any]: + """ + A utility function enabling the confidence interval calculation for metrics computed + by the evaluator based on `scipy`'s `bootstrap` method. + """ + + # bootstrap only works with functions that use args and no kwargs + def build_args_metric(metric, key, **kwargs): + def args_metric(*args): + return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key] + + return args_metric + + bootstrap_dict = {} + for key in metric_keys: + bs = bootstrap( + data=list(metric_inputs.values()), + statistic=build_args_metric(metric, key, **metric_inputs), + paired=True, + vectorized=False, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + bootstrap_dict[key] = { + "confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high), + "standard_error": bs.standard_error, + } + return bootstrap_dict + + @staticmethod + def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]: + """ + A utility function computing time performance metrics: + - `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds, + - `samples_per_second` - pipeline throughput in the number of samples per second. + - `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample, + + """ + latency = end_time - start_time + throughput = num_samples / latency + latency_sample = 1.0 / throughput + + return { + "total_time_in_seconds": latency, + "samples_per_second": throughput, + "latency_in_seconds": latency_sample, + } + + @staticmethod + def _infer_device() -> int: + """Helper function to check if GPU or CPU is available for inference.""" + # try infer with torch first + try: + import torch + + if torch.cuda.is_available(): + device = 0 # first GPU + else: + device = -1 # CPU + except ImportError: + # if not available try TF + try: + import tensorflow as tf + + if len(tf.config.list_physical_devices("GPU")) > 0: + device = 0 # first GPU + else: + device = -1 # CPU + except ImportError: + device = -1 + + if device == -1: + logger.info("No GPU found. The default device for pipeline inference is set to CPU.") + else: + logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).") + + return device + + @abstractmethod + def predictions_processor(self, *args, **kwargs): + """ + A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric. + """ + raise NotImplementedError() + + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Dict[str, float]: + + result = {} + + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + # Prepare inputs + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column) + pipe = self.prepare_pipeline( + model_or_pipeline=model_or_pipeline, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + device=device, + ) + metric = self.prepare_metric(metric) + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, pipe_inputs) + predictions = self.predictions_processor(predictions, label_mapping) + + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + # TODO: To clarify why `wer` and `cer` return float + # even though metric.compute contract says that it + # returns Optional[dict]. + if type(metric_results) is float: + metric_results = {metric.name: metric_results} + + result.update(metric_results) + result.update(perf_results) + + return result + + @staticmethod + def check_for_mismatch_in_device_setup(device, model_or_pipeline): + if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline): + if model_or_pipeline.device.type == "cpu": + raise ValueError( + "The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an " + "accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during " + "initialization to use an accelerator, or pass `device=None` to `compute`. " + ) + elif device != model_or_pipeline.device.index: + raise ValueError( + f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`." + ) + + def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]): + """ + Ensure the columns required for the evaluation are present in the dataset. + + Args: + data (`str` or [`Dataset`]): + Specifies the dataset we will run evaluation on. + columns_names (`List[str]`): + List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method, + while the values are the column names to check. + + Example: + + ```py + >>> from datasets import load_dataset + >>> from evaluate import evaluator + >>> data = load_dataset("rotten_tomatoes', split="train") + >>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"}) + ``` + """ + for input_name, column_name in columns_names.items(): + if column_name not in data.column_names: + raise ValueError( + f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}." + ) + + @staticmethod + def get_dataset_split(data, subset=None, split=None): + """ + Infers which split to use if `None` is given. + + Args: + data (`str`): + Name of dataset. + subset (`str`): + Name of config for datasets with multiple configurations (e.g. 'glue/cola'). + split (`str`, defaults to `None`): + Split to use. + Returns: + `split`: `str` containing which split to use + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes") + WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST + 'test' + ``` + """ + if split is None: + split = choose_split(data, subset) + logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}") + return split + + def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None): + """ + Load dataset with given subset and split. + Args: + data ([`Dataset`] or `str`, defaults to `None`): + Specifies the dataset we will run evaluation on. If it is of + type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset. + subset (`str`, defaults to `None`): + Specifies dataset subset to be passed to `name` in `load_dataset`. To be + used with datasets with several configurations (e.g. glue/sst2). + split (`str`, defaults to `None`): + User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`). + If not defined and data is a `str` type, will automatically select the best one via `choose_split()`. + Returns: + data ([`Dataset`]): Loaded dataset which will be used for evaluation. + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train") + Dataset({ + features: ['text', 'label'], + num_rows: 8530 + }) + ``` + """ + if isinstance(data, str): + split = self.get_dataset_split(data, subset, split) + data = load_dataset(data, name=subset, split=split) + return data + elif isinstance(data, Dataset): + if split is not None or subset is not None: + logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.") + return data + else: + raise ValueError( + "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object." + ) + + def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs): + """ + Prepare data. + + Args: + data ([`Dataset`]): + Specifies the dataset we will run evaluation on. + input_column (`str`, defaults to `"text"`): + The name of the column containing the text feature in the dataset specified by `data`. + second_input_column(`str`, *optional*): + The name of the column containing the second text feature if there is one. Otherwise, set to `None`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + Returns: + `dict`: metric inputs. + `list`: pipeline inputs. + + Example: + + ```py + >>> from evaluate import evaluator + >>> from datasets import load_dataset + + >>> ds = load_dataset("rotten_tomatoes", split="train") + >>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label") + ``` + """ + + self.check_required_columns(data, {"input_column": input_column, "label_column": label_column}) + + return {"references": data[label_column]}, DatasetColumn(data, input_column) + + def prepare_pipeline( + self, + model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821 + tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + device: int = None, + ): + """ + Prepare pipeline. + + Args: + model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`): + If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or + is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the + argument specifies a pre-initialized pipeline. + preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`): + Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for + which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore + this argument. + Returns: + The initialized pipeline. + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased") + ``` + """ + + if device is None: + device = self._infer_device() + + if ( + isinstance(model_or_pipeline, str) + or isinstance(model_or_pipeline, transformers.PreTrainedModel) + or isinstance(model_or_pipeline, transformers.TFPreTrainedModel) + ): + pipe = pipeline( + self.task, + model=model_or_pipeline, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + device=device, + ) + else: + if model_or_pipeline is None: + pipe = pipeline(self.task, device=device) + else: + pipe = model_or_pipeline + if tokenizer is not None and feature_extractor is not None: + logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).") + if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")): + raise ValueError( + f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task." + ) + return pipe + + def prepare_metric(self, metric: Union[str, EvaluationModule]): + """ + Prepare metric. + + Args: + metric (`str` or [`EvaluationModule`], defaults to `None`): + Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and + load it. Otherwise we assume it represents a pre-loaded metric. + + Returns: + The loaded metric. + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").prepare_metric("accuracy") + ``` + """ + # Prepare metric. + if metric is None: + if self.default_metric_name is None: + raise ValueError( + "`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument." + ) + metric = load(self.default_metric_name) + elif isinstance(metric, str): + metric = load(metric) + + return metric + + def call_pipeline(self, pipe, *args, **kwargs): + start_time = perf_counter() + pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS) + end_time = perf_counter() + return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output)) + + def compute_metric( + self, + metric: EvaluationModule, + metric_inputs: Dict, + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + random_state: Optional[int] = None, + ): + """Compute and return metrics.""" + result = metric.compute(**metric_inputs, **self.METRIC_KWARGS) + + if strategy == "bootstrap": + metric_keys = result.keys() + bootstrap_dict = self._compute_confidence_interval( + metric, + metric_inputs, + metric_keys, + confidence_level, + n_resamples, + random_state, + ) + for key in metric_keys: + bootstrap_dict[key]["score"] = result[key] + + return bootstrap_dict + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..82831458bb8789ce9c9418d6c19d4af4ba5b35a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py @@ -0,0 +1,119 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from numbers import Number +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("image-classification") + >>> data = load_dataset("beans", split="test[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="nateraw/vit-base-beans", + >>> data=data, + >>> label_column="labels", + >>> metric="accuracy", + >>> label_mapping={'angular_leaf_spot': 0, 'bean_rust': 1, 'healthy': 2}, + >>> strategy="bootstrap" + >>> ) + ``` +""" + + +class ImageClassificationEvaluator(Evaluator): + """ + Image classification evaluator. + This image classification evaluator can currently be loaded from [`evaluator`] using the default task name + `image-classification`. + Methods in this class assume a data format compatible with the [`ImageClassificationPipeline`]. + """ + + PIPELINE_KWARGS = {} + + def __init__(self, task="image-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions] + pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label] + + return {"predictions": pred_label} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "image", + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Tuple[Dict[str, float], Any]: + + """ + input_column (`str`, defaults to `"image"`): + The name of the column containing the images as PIL ImageFile in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`): + We want to map class labels defined by the model in the pipeline to values consistent with those + defined in the `label_column` of the `data` dataset. + """ + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + label_mapping=label_mapping, + ) + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..99b4190eebdda4e90617d0979fe23af2965d3204 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py @@ -0,0 +1,239 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +# Lint as: python3 +from datasets import Dataset + + +try: + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from ..utils.logging import get_logger +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator +from .utils import DatasetColumn + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +logger = get_logger(__name__) + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("question-answering") + >>> data = load_dataset("squad", split="validation[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="sshleifer/tiny-distilbert-base-cased-distilled-squad", + >>> data=data, + >>> metric="squad", + >>> ) + ``` + + + + Datasets where the answer may be missing in the context are supported, for example SQuAD v2 dataset. In this case, it is safer to pass `squad_v2_format=True` to + the compute() call. + + + + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("question-answering") + >>> data = load_dataset("squad_v2", split="validation[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="mrm8488/bert-tiny-finetuned-squadv2", + >>> data=data, + >>> metric="squad_v2", + >>> squad_v2_format=True, + >>> ) + ``` +""" + + +class QuestionAnsweringEvaluator(Evaluator): + """ + Question answering evaluator. This evaluator handles + [**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering), + where the answer to the question is extracted from a context. + + This question answering evaluator can currently be loaded from [`evaluator`] using the default task name + `question-answering`. + + Methods in this class assume a data format compatible with the + [`~transformers.QuestionAnsweringPipeline`]. + """ + + PIPELINE_KWARGS = {} + + def __init__(self, task="question-answering", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def prepare_data( + self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str + ): + """Prepare data.""" + if data is None: + raise ValueError( + "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object." + ) + self.check_required_columns( + data, + { + "question_column": question_column, + "context_column": context_column, + "id_column": id_column, + "label_column": label_column, + }, + ) + + metric_inputs = dict() + metric_inputs["references"] = [ + {"id": element[id_column], "answers": element[label_column]} for element in data + ] + + return metric_inputs, { + "question": DatasetColumn(data, question_column), + "context": DatasetColumn(data, context_column), + } + + def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"): + """ + Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context. + In this case, the answer text list should be `[]`. + """ + original_num_rows = data.num_rows + nonempty_num_rows = data.filter( + lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False + ).num_rows + if original_num_rows > nonempty_num_rows: + return True + else: + return False + + def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List): + result = [] + for i in range(len(predictions)): + pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]} + if squad_v2_format: + pred["no_answer_probability"] = predictions[i]["score"] + result.append(pred) + return {"predictions": result} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + question_column: str = "question", + context_column: str = "context", + id_column: str = "id", + label_column: str = "answers", + squad_v2_format: Optional[bool] = None, + ) -> Tuple[Dict[str, float], Any]: + """ + question_column (`str`, defaults to `"question"`): + The name of the column containing the question in the dataset specified by `data`. + context_column (`str`, defaults to `"context"`): + The name of the column containing the context in the dataset specified by `data`. + id_column (`str`, defaults to `"id"`): + The name of the column containing the identification field of the question and answer pair in the + dataset specified by `data`. + label_column (`str`, defaults to `"answers"`): + The name of the column containing the answers in the dataset specified by `data`. + squad_v2_format (`bool`, *optional*, defaults to `None`): + Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset + has questions where the answer is not in the context, more specifically when are answers as + `{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter + should be set to `False`. If this parameter is not provided, the format will be automatically inferred. + """ + result = {} + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data( + data=data, + question_column=question_column, + context_column=context_column, + id_column=id_column, + label_column=label_column, + ) + + if squad_v2_format is None: + squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column) + logger.warning( + f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}." + ) + pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device) + + metric = self.prepare_metric(metric) + + if squad_v2_format and metric.name == "squad": + logger.warning( + "The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric." + ) + if not squad_v2_format and metric.name == "squad_v2": + logger.warning( + "The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric." + ) + + if squad_v2_format: + self.PIPELINE_KWARGS["handle_impossible_answer"] = True + else: + self.PIPELINE_KWARGS["handle_impossible_answer"] = False + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs) + predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column]) + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + result.update(metric_results) + result.update(perf_results) + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..6dfd2c035695b38c1e4f0d9d4929b12c6be30920 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py @@ -0,0 +1,267 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION_KWARGS = r""" + input_column (`str`, defaults to `"text"`): + the name of the column containing the input text in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + the name of the column containing the labels in the dataset specified by `data`. + generation_kwargs (`Dict`, *optional*, defaults to `None`): + The generation kwargs are passed to the pipeline and set the text generation strategy. +""" + +TEXT2TEXT_TASK_DOCSTRING_EXAMPLE = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("text2text-generation") + >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="facebook/bart-large-cnn", + >>> data=data, + >>> input_column="article", + >>> label_column="highlights", + >>> metric="rouge", + >>> ) + ``` +""" + +SUMMARIZATION_TASK_DOCSTRING_EXAMPLE = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("summarization") + >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="facebook/bart-large-cnn", + >>> data=data, + >>> input_column="article", + >>> label_column="highlights", + >>> ) + ``` +""" + + +TRANSLATION_TASK_DOCSTRING_EXAMPLE = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("translation") + >>> data = load_dataset("wmt19", "fr-de", split="validation[:40]") + >>> data = data.map(lambda x: {"text": x["translation"]["de"], "label": x["translation"]["fr"]}) + >>> results = task_evaluator.compute( + >>> model_or_pipeline="Helsinki-NLP/opus-mt-de-fr", + >>> data=data, + >>> ) + ``` +""" + + +class Text2TextGenerationEvaluator(Evaluator): + """ + Text2Text generation evaluator. + This Text2Text generation evaluator can currently be loaded from [`evaluator`] using the default task name + `text2text-generation`. + Methods in this class assume a data format compatible with the [`~transformers.Text2TextGenerationPipeline`]. + """ + + PREDICTION_PREFIX = "generated" + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="text2text-generation", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + return {"predictions": [pred[f"{self.PREDICTION_PREFIX}_text"] for pred in predictions]} + + @add_start_docstrings( + EVALUTOR_COMPUTE_START_DOCSTRING, + TASK_DOCUMENTATION_KWARGS, + EVALUATOR_COMPUTE_RETURN_DOCSTRING, + TEXT2TEXT_TASK_DOCSTRING_EXAMPLE, + ) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + if generation_kwargs is not None: + self.PIPELINE_KWARGS.update(generation_kwargs) + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + ) + + return result + + +class SummarizationEvaluator(Text2TextGenerationEvaluator): + """ + Text summarization evaluator. + This text summarization evaluator can currently be loaded from [`evaluator`] using the default task name + `summarization`. + Methods in this class assume a data format compatible with the [`SummarizationEvaluator`]. + """ + + PREDICTION_PREFIX = "summary" + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="summarization", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + @add_start_docstrings( + EVALUTOR_COMPUTE_START_DOCSTRING, + TASK_DOCUMENTATION_KWARGS, + EVALUATOR_COMPUTE_RETURN_DOCSTRING, + SUMMARIZATION_TASK_DOCSTRING_EXAMPLE, + ) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + generation_kwargs=generation_kwargs, + ) + + return result + + +class TranslationEvaluator(Text2TextGenerationEvaluator): + """ + Translation evaluator. + This translation generation evaluator can currently be loaded from [`evaluator`] using the default task name + `translation`. + Methods in this class assume a data format compatible with the [`~transformers.TranslationPipeline`]. + """ + + PREDICTION_PREFIX = "translation" + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="translation", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + @add_start_docstrings( + EVALUTOR_COMPUTE_START_DOCSTRING, + TASK_DOCUMENTATION_KWARGS, + EVALUATOR_COMPUTE_RETURN_DOCSTRING, + TRANSLATION_TASK_DOCSTRING_EXAMPLE, + ) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + generation_kwargs=generation_kwargs, + ) + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..200eb01d70336148db473edebebc96e3137c5799 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py @@ -0,0 +1,160 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from numbers import Number +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset, load_dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator +from .utils import DatasetColumnPair + + +if TYPE_CHECKING: + from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("text-classification") + >>> data = load_dataset("imdb", split="test[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli", + >>> data=data, + >>> metric="accuracy", + >>> label_mapping={"LABEL_0": 0.0, "LABEL_1": 1.0}, + >>> strategy="bootstrap", + >>> n_resamples=10, + >>> random_state=0 + >>> ) + ``` +""" + + +class TextClassificationEvaluator(Evaluator): + """ + Text classification evaluator. + This text classification evaluator can currently be loaded from [`evaluator`] using the default task name + `text-classification` or with a `"sentiment-analysis"` alias. + Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual + feature as input and a categorical label as output. + """ + + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="text-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str): + if data is None: + raise ValueError( + "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object." + ) + + self.check_required_columns(data, {"input_column": input_column, "label_column": label_column}) + + if second_input_column is not None: + self.check_required_columns(data, {"second_input_column": second_input_column}) + + data = load_dataset(data) if isinstance(data, str) else data + + return {"references": data[label_column]}, DatasetColumnPair( + data, input_column, second_input_column, "text", "text_pair" + ) + + def predictions_processor(self, predictions, label_mapping): + predictions = [ + label_mapping[element["label"]] if label_mapping is not None else element["label"] + for element in predictions + ] + return {"predictions": predictions} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + second_input_column: Optional[str] = None, + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Tuple[Dict[str, float], Any]: + """ + input_column (`str`, *optional*, defaults to `"text"`): + The name of the column containing the text feature in the dataset specified by `data`. + second_input_column (`str`, *optional*, defaults to `None`): + The name of the second column containing the text features. This may be useful for classification tasks + as MNLI, where two columns are used. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`): + We want to map class labels defined by the model in the pipeline to values consistent with those + defined in the `label_column` of the `data` dataset. + """ + + result = {} + + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + # Prepare inputs + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data( + data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column + ) + pipe = self.prepare_pipeline( + model_or_pipeline=model_or_pipeline, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + device=device, + ) + metric = self.prepare_metric(metric) + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, pipe_inputs) + predictions = self.predictions_processor(predictions, label_mapping) + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + result.update(metric_results) + result.update(perf_results) + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..15522e860f7eb6fc693780f637337c0fdb22a21c --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py @@ -0,0 +1,69 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Tuple + +from datasets import Dataset + +from .base import Evaluator +from .utils import DatasetColumn + + +TASK_DOCUMENTATION_KWARGS = r""" + input_column (`str`, defaults to `"text"`): + the name of the column containing the input text in the dataset specified by `data`. + generation_kwargs (`Dict`, *optional*, defaults to `None`): + The generation kwargs are passed to the pipeline and set the text generation strategy. +""" + + +class TextGenerationEvaluator(Evaluator): + """ + Text generation evaluator. + This Text generation evaluator can currently be loaded from [`evaluator`] using the default task name + `text-generation`. + Methods in this class assume a data format compatible with the [`~transformers.TextGenerationPipeline`]. + """ + + def predictions_processor(self, predictions, *args, **kwargs): + """ + Args: + predictions: A list of lists of dicts + + Returns: + `dict`: All the generated texts are flattened and stored under the "data" key. + """ + return {"data": [pred[f"{self.predictions_prefix}_text"] for pred_list in predictions for pred in pred_list]} + + def __init__(self, task="text-generation", default_metric_name=None, predictions_prefix: str = "generated"): + super().__init__(task=task, default_metric_name=default_metric_name) + self.predictions_prefix = predictions_prefix + + def prepare_data(self, data: Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, DatasetColumn]: + """ + Prepare data. + + Args: + data ([`Dataset`]): + Specifies the dataset we will run evaluation on. + input_column (`str`, defaults to `"text"`): + The name of the column containing the text feature in the dataset specified by `data`. + Returns: + `dict`: metric inputs. + `list`: pipeline inputs. + """ + + self.check_required_columns(data, {"input_column": input_column}) + + return {}, DatasetColumn(data, input_column) diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..ba08ebd58d72417eed4e20c93a46c53adaa49811 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py @@ -0,0 +1,278 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +from datasets import ClassLabel, Dataset, Sequence +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator +from .utils import DatasetColumn + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + The dataset input and label columns are expected to be formatted as a list of words and a list of labels respectively, following [conll2003 dataset](https://huggingface.co/datasets/conll2003). Datasets whose inputs are single strings, and labels are a list of offset are not supported. + + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("token-classification") + >>> data = load_dataset("conll2003", split="validation[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="elastic/distilbert-base-uncased-finetuned-conll03-english", + >>> data=data, + >>> metric="seqeval", + >>> ) + ``` + + + + For example, the following dataset format is accepted by the evaluator: + + ```python + dataset = Dataset.from_dict( + mapping={ + "tokens": [["New", "York", "is", "a", "city", "and", "Felix", "a", "person", "."]], + "ner_tags": [[1, 2, 0, 0, 0, 0, 3, 0, 0, 0]], + }, + features=Features({ + "tokens": Sequence(feature=Value(dtype="string")), + "ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC", "B-PER", "I-PER"])), + }), + ) + ``` + + + + + + For example, the following dataset format is **not** accepted by the evaluator: + + ```python + dataset = Dataset.from_dict( + mapping={ + "tokens": [["New York is a city and Felix a person."]], + "starts": [[0, 23]], + "ends": [[7, 27]], + "ner_tags": [["LOC", "PER"]], + }, + features=Features({ + "tokens": Value(dtype="string"), + "starts": Sequence(feature=Value(dtype="int32")), + "ends": Sequence(feature=Value(dtype="int32")), + "ner_tags": Sequence(feature=Value(dtype="string")), + }), + ) + ``` + + +""" + + +class TokenClassificationEvaluator(Evaluator): + """ + Token classification evaluator. + + This token classification evaluator can currently be loaded from [`evaluator`] using the default task name + `token-classification`. + + Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`]. + """ + + PIPELINE_KWARGS = {"ignore_labels": []} + + def __init__(self, task="token-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str): + """ + Transform the pipeline predictions into a list of predicted labels of the same length as the true labels. + + Args: + predictions (`List[List[Dict]]`): + List of pipeline predictions, where each token has been labeled. + words (`List[List[str]]`): + Original input data to the pipeline, used to build predicted labels of the same length. + join_by (`str`): + String to use to join two words. In English, it will typically be " ". + + Returns: + `dict`: a dictionary holding the predictions + """ + preds = [] + + # iterate over the data rows + for i, prediction in enumerate(predictions): + pred_processed = [] + + # get a list of tuples giving the indexes of the start and end character of each word + words_offsets = self.words_to_offsets(words[i], join_by) + + token_index = 0 + for word_offset in words_offsets: + # for each word, we may keep only the predicted label for the first token, discard the others + while prediction[token_index]["start"] < word_offset[0]: + token_index += 1 + + if prediction[token_index]["start"] > word_offset[0]: # bad indexing + pred_processed.append("O") + elif prediction[token_index]["start"] == word_offset[0]: + pred_processed.append(prediction[token_index]["entity"]) + + preds.append(pred_processed) + + return {"predictions": preds} + + def words_to_offsets(self, words: List[str], join_by: str): + """ + Convert a list of words to a list of offsets, where word are joined by `join_by`. + + Args: + words (`List[str]`): + List of words to get offsets from. + join_by (`str`): + String to insert between words. + + Returns: + `List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words. + """ + offsets = [] + + start = 0 + for word in words: + end = start + len(word) - 1 + offsets.append((start, end)) + start = end + len(join_by) + 1 + + return offsets + + def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str): + super().prepare_data(data, input_column, label_column) + + if not isinstance(data.features[input_column], Sequence) or not isinstance( + data.features[label_column], Sequence + ): + raise ValueError( + "TokenClassificationEvaluator expects the input and label columns to be provided as lists." + ) + + # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. + # Otherwise, we have to get the list of labels manually. + labels_are_int = isinstance(data.features[label_column].feature, ClassLabel) + if labels_are_int: + label_list = data.features[label_column].feature.names # list of string labels + id_to_label = {i: label for i, label in enumerate(label_list)} + references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]] + elif data.features[label_column].feature.dtype.startswith("int"): + raise NotImplementedError( + "References provided as integers, but the reference column is not a Sequence of ClassLabels." + ) + else: + # In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings + # An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset + references = data[label_column] + + metric_inputs = {"references": references} + data = data.map(lambda x: {input_column: join_by.join(x[input_column])}) + pipeline_inputs = DatasetColumn(data, input_column) + + return metric_inputs, pipeline_inputs + + def prepare_pipeline( + self, + model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821 + tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + device: int = None, + ): + pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device) + + # check the pipeline outputs start characters in its predictions + dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS) + if dummy_output[0][0]["start"] is None: + raise ValueError( + "TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). " + "Transformers pipelines with a slow tokenizer will raise this error." + ) + + return pipe + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: str = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: Optional[int] = None, + random_state: Optional[int] = None, + input_column: str = "tokens", + label_column: str = "ner_tags", + join_by: Optional[str] = " ", + ) -> Tuple[Dict[str, float], Any]: + """ + input_column (`str`, defaults to `"tokens"`): + The name of the column containing the tokens feature in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + join_by (`str`, *optional*, defaults to `" "`): + This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join + words to generate a string input. This is especially useful for languages that do not separate words by a space. + """ + result = {} + + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + # Prepare inputs + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data( + data=data, input_column=input_column, label_column=label_column, join_by=join_by + ) + pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device) + metric = self.prepare_metric(metric) + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, pipe_inputs) + predictions = self.predictions_processor(predictions, data[input_column], join_by) + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + result.update(metric_results) + result.update(perf_results) + + return result diff --git a/venv/lib/python3.10/site-packages/evaluate/evaluator/utils.py b/venv/lib/python3.10/site-packages/evaluate/evaluator/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e364276d008b689d726b8dbbea1402fa93886d9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/evaluator/utils.py @@ -0,0 +1,84 @@ +from datasets import Dataset, get_dataset_split_names + + +class DatasetColumn(list): + """Helper class to avoid loading a dataset column into memory when accessing it.""" + + def __init__(self, dataset: Dataset, key: str): + self.dataset = dataset + self.key = key + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return self.dataset[i][self.key] + + def __iter__(self): + return (self.dataset[i][self.key] for i in range(len(self))) + + +def choose_split(data, subset=None): + available_splits = get_dataset_split_names(data, subset) + preferred_split_order = [ + "test", + "testing", + "eval", + "evaluation", + "validation", + "val", + "valid", + "dev", + "train", + "training", + ] + for split in preferred_split_order: + if split in available_splits: + return split + raise ValueError("No dataset split defined! Pass an explicit value to the `split` kwarg.") + + +class DatasetColumnPair(list): + """Helper class to avoid loading two dataset columns into memory when accessing it.""" + + def __init__( + self, + dataset: Dataset, + first_col: str, + second_col: str, + first_key: str, + second_key: str, + ): + """ + Args: + dataset (Dataset): dataset to build an iterator on + first_col (str): first column name to use in the dataset + second_col (str): second column name to use in the dataset + first_key (str): key name used for the first column in the returned dictionary + second_key (str): key name used for the second column in the returned dictionary + """ + self.dataset = dataset + + self.first_col = first_col + self.second_col = second_col + + self.first_key = first_key + self.second_key = second_key + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return { + self.first_key: self.dataset[i][self.first_col], + self.second_key: self.dataset[i][self.second_col] if self.second_col else None, + } + + def __iter__(self): + return ( + { + self.first_key: self.dataset[i][self.first_col], + self.second_key: self.dataset[i][self.second_col] if self.second_col else None, + } + for i in range(len(self)) + ) diff --git a/venv/lib/python3.10/site-packages/evaluate/hub.py b/venv/lib/python3.10/site-packages/evaluate/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..86118332c6d2293f475e84e80726364dcc63e292 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/hub.py @@ -0,0 +1,133 @@ +from typing import Dict + +import requests +from huggingface_hub import dataset_info, model_info +from huggingface_hub.repocard import metadata_update + +from .config import HF_HUB_ALLOWED_TASKS +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +def push_to_hub( + model_id: str, + task_type: str, + dataset_type: str, + dataset_name: str, + metric_type: str, + metric_name: str, + metric_value: float, + task_name: str = None, + dataset_config: str = None, + dataset_split: str = None, + dataset_revision: str = None, + dataset_args: Dict[str, int] = None, + metric_config: str = None, + metric_args: Dict[str, int] = None, + overwrite: bool = False, +): + r""" + Pushes the result of a metric to the metadata of a model repository in the Hub. + + Args: + model_id (`str`): + Model id from https://hf.co/models. + task_type (`str`): + Task id, refer to the [Hub allowed tasks](https://github.com/huggingface/evaluate/blob/main/src/evaluate/config.py#L154) for allowed values. + dataset_type (`str`): + Dataset id from https://hf.co/datasets. + dataset_name (`str`): + Pretty name for the dataset. + metric_type (`str`): + Metric id from https://hf.co/metrics. + metric_name (`str`): + Pretty name for the metric. + metric_value (`float`): + Computed metric value. + task_name (`str`, *optional*): + Pretty name for the task. + dataset_config (`str`, *optional*): + Dataset configuration used in [`~datasets.load_dataset`]. + See [`~datasets.load_dataset`] for more info. + dataset_split (`str`, *optional*): + Name of split used for metric computation. + dataset_revision (`str`, *optional*): + Git hash for the specific version of the dataset. + dataset_args (`dict[str, int]`, *optional*): + Additional arguments passed to [`~datasets.load_dataset`]. + metric_config (`str`, *optional*): + Configuration for the metric (e.g. the GLUE metric has a configuration for each subset). + metric_args (`dict[str, int]`, *optional*): + Arguments passed during [`~evaluate.EvaluationModule.compute`]. + overwrite (`bool`, *optional*, defaults to `False`): + If set to `True` an existing metric field can be overwritten, otherwise + attempting to overwrite any existing fields will cause an error. + + Example: + + ```python + >>> push_to_hub( + ... model_id="huggingface/gpt2-wikitext2", + ... metric_value=0.5 + ... metric_type="bleu", + ... metric_name="BLEU", + ... dataset_name="WikiText", + ... dataset_type="wikitext", + ... dataset_split="test", + ... task_type="text-generation", + ... task_name="Text Generation" + ... ) + ```""" + if task_type not in HF_HUB_ALLOWED_TASKS: + raise ValueError(f"Task type not supported. Task has to be one of {HF_HUB_ALLOWED_TASKS}") + + try: + dataset_info(dataset_type) + except requests.exceptions.HTTPError: + logger.warning(f"Dataset {dataset_type} not found on the Hub at hf.co/datasets/{dataset_type}") + + try: + model_info(model_id) + except requests.exceptions.HTTPError: + raise ValueError(f"Model {model_id} not found on the Hub at hf.co/{model_id}") + + result = { + "task": { + "type": task_type, + }, + "dataset": { + "type": dataset_type, + "name": dataset_name, + }, + "metrics": [ + { + "type": metric_type, + "value": metric_value, + }, + ], + } + + if dataset_config is not None: + result["dataset"]["config"] = dataset_config + if dataset_split is not None: + result["dataset"]["split"] = dataset_split + if dataset_revision is not None: + result["dataset"]["revision"] = dataset_revision + if dataset_args is not None: + result["dataset"]["args"] = dataset_args + + if task_name is not None: + result["task"]["name"] = task_name + + if metric_name is not None: + result["metrics"][0]["name"] = metric_name + if metric_config is not None: + result["metrics"][0]["config"] = metric_config + if metric_args is not None: + result["metrics"][0]["args"] = metric_args + + metadata = {"model-index": [{"results": [result]}]} + + return metadata_update(repo_id=model_id, metadata=metadata, overwrite=overwrite) diff --git a/venv/lib/python3.10/site-packages/evaluate/info.py b/venv/lib/python3.10/site-packages/evaluate/info.py new file mode 100644 index 0000000000000000000000000000000000000000..cc095784e4f1c1f473dd85955447d97d5fdc4e65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/info.py @@ -0,0 +1,157 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" EvaluationModuleInfo records information we know about a dataset and a metric. +""" + +import dataclasses +import json +import os +from dataclasses import asdict, dataclass, field +from typing import List, Optional, Union + +from datasets.features import Features, Value + +from . import config +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +@dataclass +class EvaluationModuleInfo: + """Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`, + and `MeasurementInfo`. + + `EvaluationModuleInfo` documents an evaluation, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + # Set in the dataset scripts + description: str + citation: str + features: Union[Features, List[Features]] + inputs_description: str = field(default_factory=str) + homepage: str = field(default_factory=str) + license: str = field(default_factory=str) + codebase_urls: List[str] = field(default_factory=list) + reference_urls: List[str] = field(default_factory=list) + streamable: bool = False + format: Optional[str] = None + module_type: str = "metric" # deprecate this in the future + + # Set later by the builder + module_name: Optional[str] = None + config_name: Optional[str] = None + experiment_id: Optional[str] = None + + def __post_init__(self): + if self.format is not None: + for key, value in self.features.items(): + if not isinstance(value, Value): + raise ValueError( + f"When using 'numpy' format, all features should be a `datasets.Value` feature. " + f"Here {key} is an instance of {value.__class__.__name__}" + ) + + def write_to_directory(self, metric_info_dir): + """Write `EvaluationModuleInfo` as JSON to `metric_info_dir`. + Also save the license separately in LICENSE. + + Args: + metric_info_dir (`str`): + The directory to save `metric_info_dir` to. + + Example: + + ```py + >>> my_metric.info.write_to_directory("/path/to/directory/") + ``` + """ + with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f: + json.dump(asdict(self), f) + + with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f: + f.write(self.license) + + @classmethod + def from_directory(cls, metric_info_dir) -> "EvaluationModuleInfo": + """Create `EvaluationModuleInfo` from the JSON file in `metric_info_dir`. + + Args: + metric_info_dir (`str`): + The directory containing the `metric_info` JSON file. This + should be the root directory of a specific metric version. + + Example: + + ```py + >>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/") + ``` + """ + logger.info(f"Loading Metric info from {metric_info_dir}") + if not metric_info_dir: + raise ValueError("Calling EvaluationModuleInfo.from_directory() with undefined metric_info_dir.") + + with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f: + metric_info_dict = json.load(f) + return cls.from_dict(metric_info_dict) + + @classmethod + def from_dict(cls, metric_info_dict: dict) -> "EvaluationModuleInfo": + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names}) + + +@dataclass +class MetricInfo(EvaluationModuleInfo): + """Information about a metric. + + `EvaluationModuleInfo` documents a metric, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + module_type: str = "metric" + + +@dataclass +class ComparisonInfo(EvaluationModuleInfo): + """Information about a comparison. + + `EvaluationModuleInfo` documents a comparison, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + module_type: str = "comparison" + + +@dataclass +class MeasurementInfo(EvaluationModuleInfo): + """Information about a measurement. + + `EvaluationModuleInfo` documents a measurement, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + module_type: str = "measurement" diff --git a/venv/lib/python3.10/site-packages/evaluate/inspect.py b/venv/lib/python3.10/site-packages/evaluate/inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..20e2af28ed4df4e99c6d67cccdd24dda1c8cecf9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/inspect.py @@ -0,0 +1,129 @@ +# Copyright 2020 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" List and inspect metrics.""" + +from typing import Optional + +import requests +from datasets import DownloadConfig + +from .config import EVALUATION_MODULE_TYPES, HF_LIST_ENDPOINT +from .loading import evaluation_module_factory +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +class SplitsNotFoundError(ValueError): + pass + + +def list_evaluation_modules(module_type=None, include_community=True, with_details=False): + """List all evaluation modules available on the Hugging Face Hub. + + Args: + module_type (`str`, *optional*, defaults to `None`): + Type of evaluation modules to list. Has to be one of `'metric'`, `'comparison'`, or `'measurement'`. If `None`, all types are listed. + include_community (`bool`, *optional*, defaults to `True`): + Include community modules in the list. + with_details (`bool`, *optional*, defaults to `False`): + Return the full details on the metrics instead of only the ID. + + Returns: + `List[Union[str, dict]]` + + Example: + + ```py + >>> from evaluate import list_evaluation_modules + >>> list_evaluation_modules(module_type="metric") + ``` + """ + + if module_type is None: + evaluations_list = [] + for module_type in EVALUATION_MODULE_TYPES: + evaluations_list.extend( + _list_evaluation_modules_type( + module_type, include_community=include_community, with_details=with_details + ) + ) + else: + if module_type not in EVALUATION_MODULE_TYPES: + raise ValueError(f"Invalid module type '{module_type}'. Has to be one of {EVALUATION_MODULE_TYPES}.") + evaluations_list = _list_evaluation_modules_type( + module_type, include_community=include_community, with_details=with_details + ) + return evaluations_list + + +def _list_evaluation_modules_type(module_type, include_community=True, with_details=False): + + r = requests.get(HF_LIST_ENDPOINT.format(type=module_type)) + r.raise_for_status() + d = r.json() + + if not include_community: + d = [element for element in d if element["id"].split("/")[0] == f"evaluate-{module_type}"] + + # remove namespace for canonical modules and add community tag + for element in d: + if element["id"].split("/")[0] == f"evaluate-{module_type}": + element["id"] = element["id"].split("/")[1] + element["community"] = False + else: + element["community"] = True + + if with_details: + return [ + { + "name": element["id"], + "type": module_type, + "community": element["community"], + "likes": element.get("likes", 0), + } + for element in d + ] + else: + return [element["id"] for element in d] + + +def inspect_evaluation_module( + path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs +): + r""" + Allow inspection/modification of a evaluation script by copying it on local drive at local_path. + + Args: + path (``str``): path to the evaluation script. Can be either: + + - a local path to script or the directory containing the script (if the script has the same name as the directory), + e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'`` + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``evaluate.list_evaluation_modules()``) + e.g. ``'accuracy'``, ``'bleu'`` or ``'word_length'`` + local_path (``str``): path to the local folder to copy the datset script to. + download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters. + **download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied. + """ + evaluation_module = evaluation_module_factory( + path, download_config=download_config, force_local_path=local_path, **download_kwargs + ) + print( + f"The processing scripts for metric {path} can be inspected at {local_path}. " + f"The main class is in {evaluation_module.module_path}. " + f"You can modify this processing scripts and use it with `evaluate.load({local_path})`." + ) diff --git a/venv/lib/python3.10/site-packages/evaluate/loading.py b/venv/lib/python3.10/site-packages/evaluate/loading.py new file mode 100644 index 0000000000000000000000000000000000000000..a9cbbe60de07fed296cd27fd1954ef14cf0de032 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/loading.py @@ -0,0 +1,771 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Access datasets.""" +import filecmp +import importlib +import inspect +import json +import os +import re +import shutil +import time +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional, Tuple, Type, Union +from urllib.parse import urlparse + +from datasets import DownloadConfig, DownloadMode +from datasets.builder import DatasetBuilder +from datasets.packaged_modules import _EXTENSION_TO_MODULE, _hash_python_lines +from datasets.utils.filelock import FileLock +from datasets.utils.version import Version + +from . import SCRIPTS_VERSION, config +from .module import EvaluationModule +from .utils.file_utils import ( + cached_path, + head_hf_s3, + hf_hub_url, + init_hf_modules, + is_relative_path, + relative_to_absolute_path, + url_or_path_join, +) +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + ["zip"] + + +def init_dynamic_modules( + name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None +): + """ + Create a module with name `name` in which you can add dynamic modules + such as metrics or datasets. The module can be imported using its name. + The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can + be overriden by specifying a path to another directory in `hf_modules_cache`. + """ + hf_modules_cache = init_hf_modules(hf_modules_cache) + dynamic_modules_path = os.path.join(hf_modules_cache, name) + os.makedirs(dynamic_modules_path, exist_ok=True) + if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")): + with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"): + pass + return dynamic_modules_path + + +def import_main_class(module_path) -> Optional[Union[Type[DatasetBuilder], Type[EvaluationModule]]]: + """Import a module at module_path and return its main class, a Metric by default""" + module = importlib.import_module(module_path) + main_cls_type = EvaluationModule + + # Find the main class in our imported module + module_main_cls = None + for name, obj in module.__dict__.items(): + if isinstance(obj, type) and issubclass(obj, main_cls_type): + if inspect.isabstract(obj): + continue + module_main_cls = obj + break + + return module_main_cls + + +def files_to_hash(file_paths: List[str]) -> str: + """ + Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way. + """ + # List all python files in directories if directories are supplied as part of external imports + to_use_files: List[Union[Path, str]] = [] + for file_path in file_paths: + if os.path.isdir(file_path): + to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]"))) + else: + to_use_files.append(file_path) + + # Get the code from all these files + lines = [] + for file_path in to_use_files: + with open(file_path, encoding="utf-8") as f: + lines.extend(f.readlines()) + return _hash_python_lines(lines) + + +def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]: + """Convert a link to a file on a github repo in a link to the raw github object.""" + parsed = urlparse(url_path) + sub_directory = None + if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com": + if "blob" in url_path: + if not url_path.endswith(".py"): + raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'") + url_path = url_path.replace("blob", "raw") # Point to the raw file + else: + # Parse github url to point to zip + github_path = parsed.path[1:] + repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master") + repo_owner, repo_name = repo_info.split("/") + url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip" + sub_directory = f"{repo_name}-{branch}" + return url_path, sub_directory + + +def increase_load_count(name: str, resource_type: str): + """Update the download count of a dataset or metric.""" + if not config.HF_EVALUATE_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS: + try: + head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset")) + except Exception: + pass + + +def get_imports(file_path: str) -> Tuple[str, str, str, str]: + """Find whether we should import or clone additional files for a given processing script. + And list the import. + + We allow: + - library dependencies, + - local dependencies and + - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository. + external dependencies will be downloaded (and extracted if needed in the dataset folder). + We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script. + + Note that only direct import in the dataset processing script will be handled + We don't recursively explore the additional import to download further files. + + Example:: + + import tensorflow + import .c4_utils + import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset + """ + lines = [] + with open(file_path, encoding="utf-8") as f: + lines.extend(f.readlines()) + + logger.debug(f"Checking {file_path} for additional imports.") + imports: List[Tuple[str, str, str, Optional[str]]] = [] + is_in_docstring = False + for line in lines: + docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line) + + if len(docstr_start_match) == 1: + # flip True <=> False only if doctstring + # starts at line without finishing + is_in_docstring = not is_in_docstring + + if is_in_docstring: + # import statements in doctstrings should + # not be added as required dependencies + continue + + match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE) + if match is None: + match = re.match( + r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", + line, + flags=re.MULTILINE, + ) + if match is None: + continue + if match.group(1): + # The import starts with a '.', we will download the relevant file + if any(imp[1] == match.group(2) for imp in imports): + # We already have this import + continue + if match.group(3): + # The import has a comment with 'From:', we'll retrieve it from the given url + url_path = match.group(3) + url_path, sub_directory = convert_github_url(url_path) + imports.append(("external", match.group(2), url_path, sub_directory)) + elif match.group(2): + # The import should be at the same place as the file + imports.append(("internal", match.group(2), match.group(2), None)) + else: + if match.group(3): + # The import has a comment with `From: git+https:...`, asks user to pip install from git. + url_path = match.group(3) + imports.append(("library", match.group(2), url_path, None)) + else: + imports.append(("library", match.group(2), match.group(2), None)) + + return imports + + +def _download_additional_modules( + name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig] +) -> List[Tuple[str, str]]: + """ + Download additional module for a module .py at URL (or local path) /.py + The imports must have been parsed first using ``get_imports``. + + If some modules need to be installed with pip, an error is raised showing how to install them. + This function return the list of downloaded modules as tuples (import_name, module_file_path). + + The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``. + """ + local_imports = [] + library_imports = [] + download_config = download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading extra modules" + for import_type, import_name, import_path, sub_directory in imports: + if import_type == "library": + library_imports.append((import_name, import_path)) # Import from a library + continue + + if import_name == name: + raise ValueError( + f"Error in the {name} script, importing relative {import_name} module " + f"but {import_name} is the name of the script. " + f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' " + f"comment pointing to the original relative import file path." + ) + if import_type == "internal": + url_or_filename = url_or_path_join(base_path, import_path + ".py") + elif import_type == "external": + url_or_filename = import_path + else: + raise ValueError("Wrong import_type") + + local_import_path = cached_path( + url_or_filename, + download_config=download_config, + ) + if sub_directory is not None: + local_import_path = os.path.join(local_import_path, sub_directory) + local_imports.append((import_name, local_import_path)) + + # Check library imports + needs_to_be_installed = set() + for library_import_name, library_import_path in library_imports: + try: + lib = importlib.import_module(library_import_name) # noqa F841 + except ImportError: + library_import_name = "scikit-learn" if library_import_name == "sklearn" else library_import_name + needs_to_be_installed.add((library_import_name, library_import_path)) + if needs_to_be_installed: + raise ImportError( + f"To be able to use {name}, you need to install the following dependencies" + f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install " + f"{' '.join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'" + ) + return local_imports + + +def _copy_script_and_other_resources_in_importable_dir( + name: str, + importable_directory_path: str, + subdirectory_name: str, + original_local_path: str, + local_imports: List[Tuple[str, str]], + additional_files: List[Tuple[str, str]], + download_mode: Optional[DownloadMode], +) -> str: + """Copy a script and its required imports to an importable directory + + Args: + name (str): name of the resource to load + importable_directory_path (str): path to the loadable folder in the dynamic modules directory + subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script + original_local_path (str): local path to the resource script + local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy) + additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy) + download_mode (Optional[DownloadMode]): download mode + + Return: + importable_local_file: path to an importable module with importlib.import_module + """ + + # Define a directory with a unique name in our dataset or metric folder + # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py + # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together + importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name) + importable_local_file = os.path.join(importable_subdirectory, name + ".py") + + # Prevent parallel disk operations + lock_path = importable_directory_path + ".lock" + with FileLock(lock_path): + # Create main dataset/metrics folder if needed + if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path): + shutil.rmtree(importable_directory_path) + os.makedirs(importable_directory_path, exist_ok=True) + + # add an __init__ file to the main dataset folder if needed + init_file_path = os.path.join(importable_directory_path, "__init__.py") + if not os.path.exists(init_file_path): + with open(init_file_path, "w"): + pass + + # Create hash dataset folder if needed + os.makedirs(importable_subdirectory, exist_ok=True) + # add an __init__ file to the hash dataset folder if needed + init_file_path = os.path.join(importable_subdirectory, "__init__.py") + if not os.path.exists(init_file_path): + with open(init_file_path, "w"): + pass + + # Copy dataset.py file in hash folder if needed + if not os.path.exists(importable_local_file): + shutil.copyfile(original_local_path, importable_local_file) + + # Record metadata associating original dataset path with local unique folder + meta_path = importable_local_file.split(".py")[0] + ".json" + if not os.path.exists(meta_path): + meta = {"original file path": original_local_path, "local file path": importable_local_file} + # the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json + with open(meta_path, "w", encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + # Copy all the additional imports + for import_name, import_path in local_imports: + if os.path.isfile(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py") + if not os.path.exists(full_path_local_import): + shutil.copyfile(import_path, full_path_local_import) + elif os.path.isdir(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name) + if not os.path.exists(full_path_local_import): + shutil.copytree(import_path, full_path_local_import) + else: + raise OSError(f"Error with local import at {import_path}") + + # Copy aditional files like dataset infos file if needed + for file_name, original_path in additional_files: + destination_additional_path = os.path.join(importable_subdirectory, file_name) + if not os.path.exists(destination_additional_path) or not filecmp.cmp( + original_path, destination_additional_path + ): + shutil.copyfile(original_path, destination_additional_path) + return importable_local_file + + +def _create_importable_file( + local_path: str, + local_imports: List[Tuple[str, str]], + additional_files: List[Tuple[str, str]], + dynamic_modules_path: str, + module_namespace: str, + name: str, + download_mode: DownloadMode, +) -> Tuple[str, str]: + importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--")) + Path(importable_directory_path).mkdir(parents=True, exist_ok=True) + (Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True) + hash = files_to_hash([local_path] + [loc[1] for loc in local_imports]) + importable_local_file = _copy_script_and_other_resources_in_importable_dir( + name=name.split("/")[-1], + importable_directory_path=importable_directory_path, + subdirectory_name=hash, + original_local_path=local_path, + local_imports=local_imports, + additional_files=additional_files, + download_mode=download_mode, + ) + logger.debug(f"Created importable dataset file at {importable_local_file}") + module_path = ".".join( + [os.path.basename(dynamic_modules_path), module_namespace, name.replace("/", "--"), hash, name.split("/")[-1]] + ) + return module_path, hash + + +@dataclass +class ImportableModule: + module_path: str + hash: str + + +class _EvaluationModuleFactory: + def get_module(self) -> ImportableModule: + raise NotImplementedError + + +class LocalEvaluationModuleFactory(_EvaluationModuleFactory): + """Get the module of a local metric. The metric script is loaded from a local script.""" + + def __init__( + self, + path: str, + module_type: str = "metrics", + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.path = path + self.module_type = module_type + self.name = Path(path).stem + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + + def get_module(self) -> ImportableModule: + # get script and other files + imports = get_imports(self.path) + local_imports = _download_additional_modules( + name=self.name, + base_path=str(Path(self.path).parent), + imports=imports, + download_config=self.download_config, + ) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=self.path, + local_imports=local_imports, + additional_files=[], + dynamic_modules_path=dynamic_modules_path, + module_namespace=self.module_type, + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + return ImportableModule(module_path, hash) + + +class HubEvaluationModuleFactory(_EvaluationModuleFactory): + """Get the module of a metric from a metric repository on the Hub.""" + + def __init__( + self, + name: str, + module_type: str = "metrics", + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.module_type = module_type + self.revision = revision + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count("/") == 1 + increase_load_count(name, resource_type="metric") + + def download_loading_script(self, revision) -> str: + file_path = hf_hub_url(path=self.name, name=self.name.split("/")[1] + ".py", revision=revision) + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading builder script" + return cached_path(file_path, download_config=download_config) + + def get_module(self) -> ImportableModule: + revision = self.revision or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION) + + if re.match(r"\d*\.\d*\.\d*", revision): # revision is version number (three digits separated by full stops) + revision = "v" + revision # tagging convention on evaluate repository starts with v + + # get script and other files + try: + local_path = self.download_loading_script(revision) + except FileNotFoundError as err: + # if there is no file found with current revision tag try to load main + if self.revision is None and os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION) != "main": + revision = "main" + local_path = self.download_loading_script(revision) + else: + raise err + + imports = get_imports(local_path) + local_imports = _download_additional_modules( + name=self.name, + base_path=hf_hub_url(path=self.name, name="", revision=revision), + imports=imports, + download_config=self.download_config, + ) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=local_path, + local_imports=local_imports, + additional_files=[], + dynamic_modules_path=dynamic_modules_path, + module_namespace=self.module_type, + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + return ImportableModule(module_path, hash) + + +class CachedEvaluationModuleFactory(_EvaluationModuleFactory): + """ + Get the module of a metric that has been loaded once already and cached. + The script that is loaded from the cache is the most recent one with a matching name. + """ + + def __init__( + self, + name: str, + module_type: str = "metrics", + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.module_type = module_type + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count("/") == 0 + + def get_module(self) -> ImportableModule: + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + importable_directory_path = os.path.join(dynamic_modules_path, self.module_type, self.name) + hashes = ( + [h for h in os.listdir(importable_directory_path) if len(h) == 64] + if os.path.isdir(importable_directory_path) + else None + ) + if not hashes: + raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}") + # get most recent + + def _get_modification_time(module_hash): + return ( + (Path(importable_directory_path) / module_hash / (self.name.split("--")[-1] + ".py")).stat().st_mtime + ) + + hash = sorted(hashes, key=_get_modification_time)[-1] + logger.warning( + f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} " + f"(last modified on {time.ctime(_get_modification_time(hash))}) since it " + f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub." + ) + # make the new module to be noticed by the import system + module_path = ".".join( + [os.path.basename(dynamic_modules_path), self.module_type, self.name, hash, self.name.split("--")[-1]] + ) + importlib.invalidate_caches() + return ImportableModule(module_path, hash) + + +def evaluation_module_factory( + path: str, + module_type: Optional[str] = None, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + force_local_path: Optional[str] = None, + dynamic_modules_path: Optional[str] = None, + **download_kwargs, +) -> ImportableModule: + """ + Download/extract/cache a metric module. + + Metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks). + + Args: + + path (str): Path or name of the metric script. + + - if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory): + -> load the module from the metric script + e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``. + - if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`) + -> load the module from the metric script in the github repository at huggingface/datasets + e.g. ``'accuracy'`` or ``'rouge'``. + + revision (Optional ``Union[str, datasets.Version]``): + If specified, the module will be loaded from the datasets repository at this version. + By default: + - it is set to the local version of the lib. + - it will also try to load it from the master branch if it's not available at the local version of the lib. + Specifying a version that is different from your local version of the lib might cause compatibility issues. + download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters. + download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. + force_local_path (Optional str): Optional path to a local path to download and prepare the script to. + Used to inspect or modify the script folder. + dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules): + Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`. + By default the datasets and metrics are stored inside the `datasets_modules` module. + download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied. + + Returns: + ImportableModule + """ + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + download_config.extract_compressed_file = True + download_config.force_extract = True + + filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1] + if not filename.endswith(".py"): + filename = filename + ".py" + combined_path = os.path.join(path, filename) + # Try locally + if path.endswith(filename): + if os.path.isfile(path): + return LocalEvaluationModuleFactory( + path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + else: + raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}") + elif os.path.isfile(combined_path): + return LocalEvaluationModuleFactory( + combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + elif is_relative_path(path) and path.count("/") <= 1 and not force_local_path: + try: + # load a canonical evaluation module from hub + if path.count("/") == 0: + # if no type provided look through all possible modules + if module_type is None: + for current_type in ["metric", "comparison", "measurement"]: + try: + return HubEvaluationModuleFactory( + f"evaluate-{current_type}/{path}", + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + except ConnectionError: + pass + raise FileNotFoundError + # if module_type provided load specific module_type + else: + return HubEvaluationModuleFactory( + f"evaluate-{module_type}/{path}", + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + # load community evaluation module from hub + elif path.count("/") == 1: + return HubEvaluationModuleFactory( + path, + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + except Exception as e1: # noqa: all the attempts failed, before raising the error we should check if the module is already cached. + # if it's a canonical module we need to check if it's any of the types + if path.count("/") == 0: + for current_type in ["metric", "comparison", "measurement"]: + try: + return CachedEvaluationModuleFactory( + f"evaluate-{current_type}--{path}", dynamic_modules_path=dynamic_modules_path + ).get_module() + except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist. + pass + # if it's a community module we just need to check on path + elif path.count("/") == 1: + try: + return CachedEvaluationModuleFactory( + path.replace("/", "--"), dynamic_modules_path=dynamic_modules_path + ).get_module() + except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist. + pass + if not isinstance(e1, (ConnectionError, FileNotFoundError)): + raise e1 from None + raise FileNotFoundError( + f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}. " + f"Module '{path}' doesn't exist on the Hugging Face Hub either." + ) from None + else: + raise FileNotFoundError(f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}.") + + +def load( + path: str, + config_name: Optional[str] = None, + module_type: Optional[str] = None, + process_id: int = 0, + num_process: int = 1, + cache_dir: Optional[str] = None, + experiment_id: Optional[str] = None, + keep_in_memory: bool = False, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + revision: Optional[Union[str, Version]] = None, + **init_kwargs, +) -> EvaluationModule: + """Load a [`~evaluate.EvaluationModule`]. + + Args: + + path (`str`): + Path to the evaluation processing script with the evaluation builder. Can be either: + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. `'./metrics/rouge'` or `'./metrics/rouge/rouge.py'` + - a evaluation module identifier on the HuggingFace evaluate repo e.g. `'rouge'` or `'bleu'` that are in either `'metrics/'`, + `'comparisons/'`, or `'measurements/'` depending on the provided `module_type` + config_name (`str`, *optional*): + Selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset). + module_type (`str`, default `'metric'`): + Type of evaluation module, can be one of `'metric'`, `'comparison'`, or `'measurement'`. + process_id (`int`, *optional*): + For distributed evaluation: id of the process. + num_process (`int`, *optional*): + For distributed evaluation: total number of processes. + cache_dir (`str`, *optional*): + Path to store the temporary predictions and references (default to `~/.cache/huggingface/evaluate/`). + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + keep_in_memory (`bool`): + Whether to store the temporary results in memory (defaults to `False`). + download_config ([`~evaluate.DownloadConfig`], *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`], defaults to `REUSE_DATASET_IF_EXISTS`): + Download/generate mode. + revision (`Union[str, evaluate.Version]`, *optional*): + If specified, the module will be loaded from the datasets repository + at this version. By default it is set to the local version of the lib. Specifying a version that is different from + your local version of the lib might cause compatibility issues. + + Returns: + [`evaluate.EvaluationModule`] + + Example: + + ```py + >>> from evaluate import load + >>> accuracy = load("accuracy") + ``` + """ + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + evaluation_module = evaluation_module_factory( + path, module_type=module_type, revision=revision, download_config=download_config, download_mode=download_mode + ) + evaluation_cls = import_main_class(evaluation_module.module_path) + evaluation_instance = evaluation_cls( + config_name=config_name, + process_id=process_id, + num_process=num_process, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + experiment_id=experiment_id, + hash=evaluation_module.hash, + **init_kwargs, + ) + + if module_type and module_type != evaluation_instance.module_type: + raise TypeError( + f"No module of module type '{module_type}' not found for '{path}' locally, or on the Hugging Face Hub. Found module of module type '{evaluation_instance.module_type}' instead." + ) + + # Download and prepare resources for the metric + evaluation_instance.download_and_prepare(download_config=download_config) + + return evaluation_instance diff --git a/venv/lib/python3.10/site-packages/evaluate/module.py b/venv/lib/python3.10/site-packages/evaluate/module.py new file mode 100644 index 0000000000000000000000000000000000000000..ca38b9b1584690bfed2658a717eeb972576d0846 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/module.py @@ -0,0 +1,1034 @@ +# Copyright 2020 The HuggingFace Datasets Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" EvaluationModule base class.""" +import collections +import itertools +import os +import types +import uuid +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa +from datasets import DatasetInfo, DownloadConfig, DownloadManager +from datasets.arrow_dataset import Dataset +from datasets.arrow_reader import ArrowReader +from datasets.arrow_writer import ArrowWriter +from datasets.features import Features, Sequence, Value +from datasets.features.features import _check_non_null_non_empty_recursive +from datasets.utils.filelock import BaseFileLock, FileLock, Timeout +from datasets.utils.py_utils import copyfunc, temp_seed, zip_dict + +from . import config +from .info import EvaluationModuleInfo +from .naming import camelcase_to_snakecase +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +class FileFreeLock(BaseFileLock): + """Thread lock until a file **cannot** be locked""" + + def __init__(self, lock_file, *args, **kwargs): + self.filelock = FileLock(lock_file) + super().__init__(lock_file, *args, **kwargs) + self._lock_file_fd = None + + def _acquire(self): + try: + self.filelock.acquire(timeout=0.01, poll_interval=0.02) # Try to lock once + except Timeout: + # We couldn't acquire the lock, the file is locked! + self._lock_file_fd = self.filelock.lock_file + else: + # We were able to acquire the lock, the file is not yet locked! + self.filelock.release() + self._lock_file_fd = None + + def _release(self): + self._lock_file_fd = None + + @property + def is_locked(self) -> bool: + return self._lock_file_fd is not None + + +# lists - summarize long lists similarly to NumPy +# arrays/tensors - let the frameworks control formatting +def summarize_if_long_list(obj): + if type(obj) is not list or len(obj) <= 6: + return f"{obj}" + + def format_chunk(chunk): + return ", ".join(repr(x) for x in chunk) + + return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]" + + +class EvaluationModuleInfoMixin: + """This base class exposes some attributes of EvaluationModuleInfo + at the base level of the EvaluationModule for easy access. + """ + + def __init__(self, info: EvaluationModuleInfo): + self._module_info = info + + @property + def info(self): + """:class:`evaluate.EvaluationModuleInfo` object containing all the metadata in the evaluation module.""" + return self._module_info + + @property + def name(self) -> str: + return self._module_info.module_name + + @property + def experiment_id(self) -> Optional[str]: + return self._module_info.experiment_id + + @property + def description(self) -> str: + return self._module_info.description + + @property + def citation(self) -> str: + return self._module_info.citation + + @property + def features(self) -> Features: + return self._module_info.features + + @property + def inputs_description(self) -> str: + return self._module_info.inputs_description + + @property + def homepage(self) -> Optional[str]: + return self._module_info.homepage + + @property + def license(self) -> str: + return self._module_info.license + + @property + def codebase_urls(self) -> Optional[List[str]]: + return self._module_info.codebase_urls + + @property + def reference_urls(self) -> Optional[List[str]]: + return self._module_info.reference_urls + + @property + def streamable(self) -> bool: + return self._module_info.streamable + + @property + def format(self) -> Optional[str]: + return self._module_info.format + + @property + def module_type(self) -> str: + return self._module_info.module_type + + +class EvaluationModule(EvaluationModuleInfoMixin): + """A `EvaluationModule` is the base class and common API for metrics, comparisons, and measurements. + + Args: + config_name (`str`): + This is used to define a hash specific to a module computation script and prevents the module's data + to be overridden when the module loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute module in distributed setups (in particular non-additive modules like F1). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute module in distributed setups (in particular non-additive metrics like F1). + seed (`int`, optional): + If specified, this will temporarily set numpy's random seed when [`~evaluate.EvaluationModule.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute module in distributed setups (in particular non-additive metrics like F1). + hash (`str`): + Used to identify the evaluation module according to the hashed file contents. + max_concurrent_cache_files (`int`): + Max number of concurrent module cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + def __init__( + self, + config_name: Optional[str] = None, + keep_in_memory: bool = False, + cache_dir: Optional[str] = None, + num_process: int = 1, + process_id: int = 0, + seed: Optional[int] = None, + experiment_id: Optional[str] = None, + hash: str = None, + max_concurrent_cache_files: int = 10000, + timeout: Union[int, float] = 100, + **kwargs, + ): + # prepare info + self.config_name = config_name or "default" + info = self._info() + info.module_name = camelcase_to_snakecase(self.__class__.__name__) + info.config_name = self.config_name + info.experiment_id = experiment_id or "default_experiment" + EvaluationModuleInfoMixin.__init__(self, info) # For easy access on low level + + # Safety checks on num_process and process_id + if not isinstance(process_id, int) or process_id < 0: + raise ValueError("'process_id' should be a number greater than 0") + if not isinstance(num_process, int) or num_process <= process_id: + raise ValueError("'num_process' should be a number greater than process_id") + if keep_in_memory and num_process != 1: + raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).") + + self.num_process = num_process + self.process_id = process_id + self.max_concurrent_cache_files = max_concurrent_cache_files + + self.keep_in_memory = keep_in_memory + self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE) + self.data_dir = self._build_data_dir() + if seed is None: + _, seed, pos, *_ = np.random.get_state() + self.seed: int = seed[pos] if pos < 624 else seed[0] + else: + self.seed: int = seed + self.timeout: Union[int, float] = timeout + + # Update 'compute' and 'add' docstring + # methods need to be copied otherwise it changes the docstrings of every instance + self.compute = types.MethodType(copyfunc(self.compute), self) + self.add_batch = types.MethodType(copyfunc(self.add_batch), self) + self.add = types.MethodType(copyfunc(self.add), self) + self.compute.__func__.__doc__ += self.info.inputs_description + self.add_batch.__func__.__doc__ += self.info.inputs_description + self.add.__func__.__doc__ += self.info.inputs_description + + # self.arrow_schema = pa.schema(field for field in self.info.features.type) + self.selected_feature_format = None + self.buf_writer = None + self.writer = None + self.writer_batch_size = None + self.data = None + + # This is the cache file we store our predictions/references in + # Keep it None for now so we can (cloud)pickle the object + self.cache_file_name = None + self.filelock = None + self.rendez_vous_lock = None + + # This is all the cache files on which we have a lock when we are in a distributed setting + self.file_paths = None + self.filelocks = None + + # This fingerprints the evaluation module according to the hashed contents of the module code + self._hash = hash + + def __len__(self): + """Return the number of examples (predictions or predictions/references pair) + currently stored in the evaluation module's cache. + """ + return 0 if self.writer is None else len(self.writer) + + def __repr__(self): + return ( + f'EvaluationModule(name: "{self.name}", module_type: "{self.module_type}", ' + f'features: {self.features}, usage: """{self.inputs_description}""", ' + f"stored examples: {len(self)})" + ) + + def _build_data_dir(self): + """Path of this evaluation module in cache_dir: + Will be: + self._data_dir_root/self.name/self.config_name/self.hash (if not none)/ + If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped. + """ + builder_data_dir = self._data_dir_root + builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name) + os.makedirs(builder_data_dir, exist_ok=True) + return builder_data_dir + + def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]: + """Create a new cache file. If the default cache file is used, we generated a new hash.""" + file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow") + filelock = None + for i in range(self.max_concurrent_cache_files): + filelock = FileLock(file_path + ".lock") + try: + filelock.acquire(timeout=timeout) + except Timeout: + # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup) + # We raise an error + if self.num_process != 1: + raise ValueError( + f"Error in _create_cache_file: another evaluation module instance is already using the local cache file at {file_path}. " + f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " + f"between distributed evaluation module instances." + ) from None + if i == self.max_concurrent_cache_files - 1: + raise ValueError( + f"Cannot acquire lock, too many evaluation module instance are operating concurrently on this file system." + f"You should set a larger value of max_concurrent_cache_files when creating the evaluation module " + f"(current value is {self.max_concurrent_cache_files})." + ) from None + # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name. + file_uuid = str(uuid.uuid4()) + file_path = os.path.join( + self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow" + ) + else: + break + + return file_path, filelock + + def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]: + """Get a lock on all the cache files in a distributed setup. + We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds). + """ + if self.num_process == 1: + if self.cache_file_name is None: + raise ValueError( + "Evaluation module cache file doesn't exist. Please make sure that you call `add` or `add_batch` " + "at least once before calling `compute`." + ) + file_paths = [self.cache_file_name] + else: + file_paths = [ + os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow") + for process_id in range(self.num_process) + ] + + # Let's acquire a lock on each process files to be sure they are finished writing + filelocks = [] + for process_id, file_path in enumerate(file_paths): + if process_id == 0: # process 0 already has its lock file + filelocks.append(self.filelock) + else: + filelock = FileLock(file_path + ".lock") + try: + filelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Cannot acquire lock on cached file {file_path} for process {process_id}." + ) from None + else: + filelocks.append(filelock) + + return file_paths, filelocks + + def _check_all_processes_locks(self): + expected_lock_file_names = [ + os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock") + for process_id in range(self.num_process) + ] + for expected_lock_file_name in expected_lock_file_names: + nofilelock = FileFreeLock(expected_lock_file_name) + try: + nofilelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." + ) from None + else: + nofilelock.release() + + def _check_rendez_vous(self): + expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock") + nofilelock = FileFreeLock(expected_lock_file_name) + try: + nofilelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." + ) from None + else: + nofilelock.release() + lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") + rendez_vous_lock = FileLock(lock_file_name) + try: + rendez_vous_lock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None + else: + rendez_vous_lock.release() + + def _finalize(self): + """Close all the writing process and load/gather the data + from all the nodes if main node or all_process is True. + """ + if self.writer is not None: + self.writer.finalize() + self.writer = None + # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data + if self.filelock is not None and self.process_id > 0: + self.filelock.release() + + if self.keep_in_memory: + # Read the predictions and references + reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.selected_feature_format)) + self.data = Dataset.from_buffer(self.buf_writer.getvalue()) + + elif self.process_id == 0: + # Let's acquire a lock on each node files to be sure they are finished writing + file_paths, filelocks = self._get_all_cache_files() + + # Read the predictions and references + try: + reader = ArrowReader(path="", info=DatasetInfo(features=self.selected_feature_format)) + self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths])) + except FileNotFoundError: + raise ValueError( + "Error in finalize: another evaluation module instance is already using the local cache file. " + "Please specify an experiment_id to avoid collision between distributed evaluation module instances." + ) from None + + # Store file paths and locks and we will release/delete them after the computation. + self.file_paths = file_paths + self.filelocks = filelocks + + def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]: + """Compute the evaluation module. + + Usage of positional arguments is not allowed to prevent mistakes. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + **kwargs (optional): + Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`] + method (see details in the docstring). + + Return: + `dict` or `None` + + - Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`). + - `None` if the evaluation module is not run on the main process (`process_id != 0`). + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1]) + ``` + """ + all_kwargs = {"predictions": predictions, "references": references, **kwargs} + if predictions is None and references is None: + missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs} + all_kwargs.update(missing_kwargs) + else: + missing_inputs = [k for k in self._feature_names() if k not in all_kwargs] + if missing_inputs: + raise ValueError( + f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}" + ) + inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()} + compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()} + + if any(v is not None for v in inputs.values()): + self.add_batch(**inputs) + self._finalize() + + self.cache_file_name = None + self.filelock = None + self.selected_feature_format = None + + if self.process_id == 0: + self.data.set_format(type=self.info.format) + + inputs = {input_name: self.data[input_name][:] for input_name in self._feature_names()} + with temp_seed(self.seed): + output = self._compute(**inputs, **compute_kwargs) + + if self.buf_writer is not None: + self.buf_writer = None + del self.data + self.data = None + else: + # Release locks and delete all the cache files. Process 0 is released last. + for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))): + logger.info(f"Removing {file_path}") + del self.data + self.data = None + del self.writer + self.writer = None + os.remove(file_path) + filelock.release() + + return output + else: + return None + + def add_batch(self, *, predictions=None, references=None, **kwargs): + """Add a batch of predictions and references for the evaluation module's stack. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]): + ... accuracy.add_batch(references=refs, predictions=preds) + ``` + """ + bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()] + if bad_inputs: + raise ValueError( + f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}" + ) + batch = {"predictions": predictions, "references": references, **kwargs} + batch = {input_name: batch[input_name] for input_name in self._feature_names()} + if self.writer is None: + self.selected_feature_format = self._infer_feature_from_batch(batch) + self._init_writer() + try: + for key, column in batch.items(): + if len(column) > 0: + self._enforce_nested_string_type(self.selected_feature_format[key], column[0]) + batch = self.selected_feature_format.encode_batch(batch) + self.writer.write_batch(batch) + except (pa.ArrowInvalid, TypeError): + if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch): + col0 = next(iter(batch)) + bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0] + error_msg = ( + f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})" + ) + elif set(self.selected_feature_format) != {"references", "predictions"}: + error_msg = ( + f"Module inputs don't match the expected format.\n" + f"Expected format: {self.selected_feature_format },\n" + ) + error_msg_inputs = ",\n".join( + f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" + for input_name in self.selected_feature_format + ) + error_msg += error_msg_inputs + else: + error_msg = ( + f"Predictions and/or references don't match the expected format.\n" + f"Expected format: {self.selected_feature_format },\n" + f"Input predictions: {summarize_if_long_list(predictions)},\n" + f"Input references: {summarize_if_long_list(references)}" + ) + raise ValueError(error_msg) from None + + def add(self, *, prediction=None, reference=None, **kwargs): + """Add one prediction and reference for the evaluation module's stack. + + Args: + prediction (`list/array/tensor`, *optional*): + Predictions. + reference (`list/array/tensor`, *optional*): + References. + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> accuracy.add(references=[0,1], predictions=[1,0]) + ``` + """ + bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()] + if bad_inputs: + raise ValueError( + f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}" + ) + example = {"predictions": prediction, "references": reference, **kwargs} + example = {input_name: example[input_name] for input_name in self._feature_names()} + if self.writer is None: + self.selected_feature_format = self._infer_feature_from_example(example) + self._init_writer() + try: + self._enforce_nested_string_type(self.selected_feature_format, example) + example = self.selected_feature_format.encode_example(example) + self.writer.write(example) + except (pa.ArrowInvalid, TypeError): + error_msg = ( + f"Evaluation module inputs don't match the expected format.\n" + f"Expected format: {self.selected_feature_format},\n" + ) + error_msg_inputs = ",\n".join( + f"Input {input_name}: {summarize_if_long_list(example[input_name])}" + for input_name in self.selected_feature_format + ) + error_msg += error_msg_inputs + raise ValueError(error_msg) from None + + def _infer_feature_from_batch(self, batch): + if isinstance(self.features, Features): + return self.features + else: + example = dict([(k, v[0]) for k, v in batch.items()]) + return self._infer_feature_from_example(example) + + def _infer_feature_from_example(self, example): + if isinstance(self.features, Features): + return self.features + else: + for features in self.features: + try: + self._enforce_nested_string_type(features, example) + features.encode_example(example) + return features + except (ValueError, TypeError): + continue + feature_strings = "\n".join([f"Feature option {i}: {feature}" for i, feature in enumerate(self.features)]) + error_msg = ( + f"Predictions and/or references don't match the expected format.\n" + f"Expected format:\n{feature_strings},\n" + f"Input predictions: {summarize_if_long_list(example['predictions'])},\n" + f"Input references: {summarize_if_long_list(example['references'])}" + ) + raise ValueError(error_msg) from None + + def _feature_names(self): + if isinstance(self.features, list): + feature_names = list(self.features[0].keys()) + else: + feature_names = list(self.features.keys()) + return feature_names + + def _init_writer(self, timeout=1): + if self.num_process > 1: + if self.process_id == 0: + file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") + self.rendez_vous_lock = FileLock(file_path) + try: + self.rendez_vous_lock.acquire(timeout=timeout) + except TimeoutError: + raise ValueError( + f"Error in _init_writer: another evalution module instance is already using the local cache file at {file_path}. " + f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " + f"between distributed evaluation module instances." + ) from None + + if self.keep_in_memory: + self.buf_writer = pa.BufferOutputStream() + self.writer = ArrowWriter( + features=self.selected_feature_format, stream=self.buf_writer, writer_batch_size=self.writer_batch_size + ) + else: + self.buf_writer = None + + # Get cache file name and lock it + if self.cache_file_name is None or self.filelock is None: + cache_file_name, filelock = self._create_cache_file() # get ready + self.cache_file_name = cache_file_name + self.filelock = filelock + + self.writer = ArrowWriter( + features=self.selected_feature_format, + path=self.cache_file_name, + writer_batch_size=self.writer_batch_size, + ) + # Setup rendez-vous here if + if self.num_process > 1: + if self.process_id == 0: + self._check_all_processes_locks() # wait for everyone to be ready + self.rendez_vous_lock.release() # let everyone go + else: + self._check_rendez_vous() # wait for master to be ready and to let everyone go + + def _info(self) -> EvaluationModuleInfo: + """Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details. + + Warning: This function is only called once and the result is cached for all + following .info() calls. + + Returns: + info: (EvaluationModuleInfo) The EvaluationModule information + """ + raise NotImplementedError + + def download_and_prepare( + self, + download_config: Optional[DownloadConfig] = None, + dl_manager: Optional[DownloadManager] = None, + ): + """Downloads and prepares evaluation module for reading. + + Args: + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + dl_manager ([`DownloadManager`], *optional*): + Specific download manager to use. + + Example: + + ```py + >>> import evaluate + ``` + """ + if dl_manager is None: + if download_config is None: + download_config = DownloadConfig() + download_config.cache_dir = os.path.join(self.data_dir, "downloads") + download_config.force_download = False + + dl_manager = DownloadManager( + dataset_name=self.name, download_config=download_config, data_dir=self.data_dir + ) + + self._download_and_prepare(dl_manager) + + def _download_and_prepare(self, dl_manager): + """Downloads and prepares resources for the evaluation module. + + This is the internal implementation to overwrite called when user calls + `download_and_prepare`. It should download all required resources for the evaluation module. + + Args: + dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data. + """ + return None + + def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]: + """This method defines the common API for all the evaluation module in the library""" + raise NotImplementedError + + def __del__(self): + if hasattr(self, "filelock") and self.filelock is not None: + self.filelock.release() + if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None: + self.rendez_vous_lock.release() + if hasattr(self, "writer"): # in case it was already deleted + del self.writer + if hasattr(self, "data"): # in case it was already deleted + del self.data + + def _enforce_nested_string_type(self, schema, obj): + """ + Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string. + Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error. + """ + # Nested structures: we allow dict, list, tuples, sequences + if isinstance(schema, dict): + return [self._enforce_nested_string_type(sub_schema, o) for k, (sub_schema, o) in zip_dict(schema, obj)] + + elif isinstance(schema, (list, tuple)): + sub_schema = schema[0] + return [self._enforce_nested_string_type(sub_schema, o) for o in obj] + elif isinstance(schema, Sequence): + # We allow to reverse list of dict => dict of list for compatiblity with tfds + if isinstance(schema.feature, dict): + if isinstance(obj, (list, tuple)): + # obj is a list of dict + for k, dict_tuples in zip_dict(schema.feature, *obj): + for sub_obj in dict_tuples[1:]: + if _check_non_null_non_empty_recursive(sub_obj, dict_tuples[0]): + self._enforce_nested_string_type(dict_tuples[0], sub_obj) + break + return None + else: + # obj is a single dict + for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): + for sub_obj in sub_objs: + if _check_non_null_non_empty_recursive(sub_obj, sub_schema): + self._enforce_nested_string_type(sub_schema, sub_obj) + break + return None + # schema.feature is not a dict + if isinstance(obj, str): # don't interpret a string as a list + raise ValueError(f"Got a string but expected a list instead: '{obj}'") + if obj is None: + return None + else: + if len(obj) > 0: + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt, schema.feature): + break + if not isinstance(first_elmt, list): + return self._enforce_nested_string_type(schema.feature, first_elmt) + + elif isinstance(schema, Value): + if pa.types.is_string(schema.pa_type) and not isinstance(obj, str): + raise TypeError(f"Expected type str but got {type(obj)}.") + + +class Metric(EvaluationModule): + """A Metric is the base class and common API for all metrics. + + Args: + config_name (`str`): + This is used to define a hash specific to a metric computation script and prevents the metric's data + to be overridden when the metric loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + seed (`int`, *optional*): + If specified, this will temporarily set numpy's random seed when [`~evaluate.Metric.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + max_concurrent_cache_files (`int`): + Max number of concurrent metric cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + +class Comparison(EvaluationModule): + """A Comparison is the base class and common API for all comparisons. + + Args: + config_name (`str`): + This is used to define a hash specific to a comparison computation script and prevents the comparison's data + to be overridden when the comparison loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute comparisons in distributed setups (in particular non-additive comparisons). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute comparisons in distributed setups (in particular non-additive comparisons). + seed (`int`, *optional*): + If specified, this will temporarily set numpy's random seed when [`~evaluate.Comparison.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute comparisons in distributed setups (in particular non-additive comparisons). + max_concurrent_cache_files (`int`): + Max number of concurrent comparison cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + +class Measurement(EvaluationModule): + """A Measurement is the base class and common API for all measurements. + + Args: + config_name (`str`): + This is used to define a hash specific to a measurement computation script and prevents the measurement's data + to be overridden when the measurement loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute measurements in distributed setups (in particular non-additive measurements). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute measurements in distributed setups (in particular non-additive measurements). + seed (`int`, *optional*): + If specified, this will temporarily set numpy's random seed when [`~evaluate.Measurement.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute measurements in distributed setups (in particular non-additive measurements). + max_concurrent_cache_files (`int`): + Max number of concurrent measurement cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + +class CombinedEvaluations: + def __init__(self, evaluation_modules, force_prefix=False): + from .loading import load # avoid circular imports + + self.evaluation_module_names = None + if isinstance(evaluation_modules, list): + self.evaluation_modules = evaluation_modules + elif isinstance(evaluation_modules, dict): + self.evaluation_modules = list(evaluation_modules.values()) + self.evaluation_module_names = list(evaluation_modules.keys()) + loaded_modules = [] + + for module in self.evaluation_modules: + if isinstance(module, str): + module = load(module) + loaded_modules.append(module) + self.evaluation_modules = loaded_modules + + if self.evaluation_module_names is None: + self.evaluation_module_names = [module.name for module in self.evaluation_modules] + + self.force_prefix = force_prefix + + def add(self, prediction=None, reference=None, **kwargs): + """Add one prediction and reference for each evaluation module's stack. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + >>> for ref, pred in zip([0,1,0,1], [1,0,0,1]): + ... clf_metrics.add(references=ref, predictions=pred) + ``` + """ + for evaluation_module in self.evaluation_modules: + batch = {"predictions": prediction, "references": reference, **kwargs} + batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()} + evaluation_module.add(**batch) + + def add_batch(self, predictions=None, references=None, **kwargs): + """Add a batch of predictions and references for each evaluation module's stack. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + + Example: + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + >>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]): + ... clf_metrics.add(references=refs, predictions=preds) + ``` + """ + for evaluation_module in self.evaluation_modules: + batch = {"predictions": predictions, "references": references, **kwargs} + batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()} + evaluation_module.add_batch(**batch) + + def compute(self, predictions=None, references=None, **kwargs): + """Compute each evaluation module. + + Usage of positional arguments is not allowed to prevent mistakes. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + **kwargs (*optional*): + Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`] + method (see details in the docstring). + + Return: + `dict` or `None` + + - Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`). + - `None` if the evaluation module is not run on the main process (`process_id != 0`). + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + >>> clf_metrics.compute(predictions=[0,1], references=[1,1]) + {'accuracy': 0.5, 'f1': 0.6666666666666666} + ``` + """ + results = [] + + for evaluation_module in self.evaluation_modules: + batch = {"predictions": predictions, "references": references, **kwargs} + results.append(evaluation_module.compute(**batch)) + + return self._merge_results(results) + + def _merge_results(self, results): + merged_results = {} + results_keys = list(itertools.chain.from_iterable([r.keys() for r in results])) + duplicate_keys = {item for item, count in collections.Counter(results_keys).items() if count > 1} + + duplicate_names = [ + item for item, count in collections.Counter(self.evaluation_module_names).items() if count > 1 + ] + duplicate_counter = {name: 0 for name in duplicate_names} + + for module_name, result in zip(self.evaluation_module_names, results): + for k, v in result.items(): + if k not in duplicate_keys and not self.force_prefix: + merged_results[f"{k}"] = v + elif module_name in duplicate_counter: + merged_results[f"{module_name}_{duplicate_counter[module_name]}_{k}"] = v + else: + merged_results[f"{module_name}_{k}"] = v + + if module_name in duplicate_counter: + duplicate_counter[module_name] += 1 + + return merged_results + + +def combine(evaluations, force_prefix=False): + """Combines several metrics, comparisons, or measurements into a single `CombinedEvaluations` object that + can be used like a single evaluation module. + + If two scores have the same name, then they are prefixed with their module names. + And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix. + + Args: + evaluations (`Union[list, dict]`): + A list or dictionary of evaluation modules. The modules can either be passed + as strings or loaded `EvaluationModule`s. If a dictionary is passed its keys are the names used and the values the modules. + The names are used as prefix in case there are name overlaps in the returned results of each module or if `force_prefix=True`. + force_prefix (`bool`, *optional*, defaults to `False`): + If `True` all scores from the modules are prefixed with their name. If + a dictionary is passed the keys are used as name otherwise the module's name. + + Examples: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + ``` + """ + + return CombinedEvaluations(evaluations, force_prefix=force_prefix) diff --git a/venv/lib/python3.10/site-packages/evaluate/naming.py b/venv/lib/python3.10/site-packages/evaluate/naming.py new file mode 100644 index 0000000000000000000000000000000000000000..6335cf1b0ff47f0f2a409d6641fd5c528a31e949 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/naming.py @@ -0,0 +1,82 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Utilities for file names.""" + +import itertools +import os +import re + + +_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])") +_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])") + +_single_underscore_re = re.compile(r"(?>> import evaluate + >>> result = {"bleu": 0.7} + >>> params = {"model": "gpt-2"} + >>> evaluate.save("./results/", **result, **params) + ``` + """ + current_time = datetime.now() + + file_path = _setup_path(path_or_file, current_time) + + data["_timestamp"] = current_time.isoformat() + data["_git_commit_hash"] = _git_commit_hash() + data["_evaluate_version"] = __version__ + data["_python_version"] = sys.version + data["_interpreter_path"] = sys.executable + + with FileLock(str(file_path) + ".lock"): + with open(file_path, "w") as f: + json.dump(data, f) + + # cleanup lock file + try: + os.remove(str(file_path) + ".lock") + except FileNotFoundError: + pass + + return file_path + + +def _setup_path(path_or_file, current_time): + path_or_file = Path(path_or_file) + is_file = len(path_or_file.suffix) > 0 + if is_file: + folder = path_or_file.parent + file_name = path_or_file.name + else: + folder = path_or_file + file_name = "result-" + current_time.strftime("%Y_%m_%d-%H_%M_%S") + ".json" + folder.mkdir(parents=True, exist_ok=True) + return folder / file_name + + +def _git_commit_hash(): + res = subprocess.run("git rev-parse --is-inside-work-tree".split(), cwd="./", stdout=subprocess.PIPE) + if res.stdout.decode().strip() == "true": + res = subprocess.run("git rev-parse HEAD".split(), cwd=os.getcwd(), stdout=subprocess.PIPE) + return res.stdout.decode().strip() + else: + return None diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/__init__.py b/venv/lib/python3.10/site-packages/evaluate/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fac692eeb80def645bce9901324902ac9e1899f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/utils/__init__.py @@ -0,0 +1,39 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +# Lint as: python3 +"""Util import.""" + +__all__ = [ + "disable_progress_bar", + "enable_progress_bar", + "is_progress_bar_enabled", + "infer_gradio_input_types", + "json_to_string_type", + "parse_readme", + "parse_gradio_data", + "parse_test_cases", + "launch_gradio_widget", +] + +from .gradio import ( + infer_gradio_input_types, + json_to_string_type, + launch_gradio_widget, + parse_gradio_data, + parse_readme, + parse_test_cases, +) +from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..460ca3276e3848ecff7be62441772a9c30ea1738 Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/file_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/file_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a3216ce4693e041d08e35c823dc01f07102028b Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/file_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/gradio.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/gradio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90550bc68d82ce45628fac641c0a19a1aefc90ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/gradio.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed616a53197e3f5c792354b9c36661320bc5519e Binary files /dev/null and b/venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/file_utils.py b/venv/lib/python3.10/site-packages/evaluate/utils/file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..53bbf1c5f10c701c9443267d051ca822358a93b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/utils/file_utils.py @@ -0,0 +1,611 @@ +""" +Utilities for working with the local dataset cache. +This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp +Copyright by the AllenNLP authors. +""" + +import copy +import io +import json +import os +import posixpath +import re +import shutil +import sys +import tempfile +import time +import urllib +from contextlib import closing, contextmanager +from functools import partial +from hashlib import sha256 +from pathlib import Path +from typing import List, Optional, Type, TypeVar, Union +from urllib.parse import urlparse + +import requests +from datasets import DownloadConfig +from datasets.utils.extract import ExtractManager +from datasets.utils.filelock import FileLock + +from .. import __version__, config +from . import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +INCOMPLETE_SUFFIX = ".incomplete" + +T = TypeVar("T", str, Path) + + +def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str: + """ + Add hf_modules_cache to the python path. + By default hf_modules_cache='~/.cache/huggingface/modules'. + It can also be set with the environment variable HF_MODULES_CACHE. + This is used to add modules such as `datasets_modules` + """ + hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE + hf_modules_cache = str(hf_modules_cache) + if hf_modules_cache not in sys.path: + sys.path.append(hf_modules_cache) + + os.makedirs(hf_modules_cache, exist_ok=True) + if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")): + with open(os.path.join(hf_modules_cache, "__init__.py"), "w"): + pass + return hf_modules_cache + + +def is_remote_url(url_or_filename: str) -> bool: + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp") + + +def is_local_path(url_or_filename: str) -> bool: + # On unix the scheme of a local path is empty (for both absolute and relative), + # while on windows the scheme is the drive name (ex: "c") for absolute paths. + # for details on the windows behavior, see https://bugs.python.org/issue42215 + return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") + + +def is_relative_path(url_or_filename: str) -> bool: + return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) + + +def relative_to_absolute_path(path: T) -> T: + """Convert relative path to absolute path.""" + abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) + return Path(abs_path_str) if isinstance(path, Path) else abs_path_str + + +def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str: + if dataset: + endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX + else: + endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX + return "/".join((endpoint, identifier, filename)) + + +def head_hf_s3( + identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0 +) -> Union[requests.Response, Exception]: + return http_head( + hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset), + max_retries=max_retries, + ) + + +def hf_hub_url(path: str, name: str, revision: Optional[str] = None) -> str: + revision = revision or config.HUB_DEFAULT_VERSION + return config.HUB_EVALUATE_URL.format(path=path, name=name, revision=revision) + + +def url_or_path_join(base_name: str, *pathnames: str) -> str: + if is_remote_url(base_name): + return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) + else: + return Path(base_name, *pathnames).as_posix() + + +def url_or_path_parent(url_or_path: str) -> str: + if is_remote_url(url_or_path): + return url_or_path[: url_or_path.rindex("/")] + else: + return os.path.dirname(url_or_path) + + +def hash_url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name + so that TF 2.0 can identify it as a HDF5 file + (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) + """ + url_bytes = url.encode("utf-8") + url_hash = sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode("utf-8") + etag_hash = sha256(etag_bytes) + filename += "." + etag_hash.hexdigest() + + if url.endswith(".py"): + filename += ".py" + + return filename + + +def cached_path( + url_or_filename, + download_config=None, + **download_kwargs, +) -> str: + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + ValueError: if it couldn't parse the url or filename correctly + requests.exceptions.ConnectionError: in case of internet connection issue + """ + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + + cache_dir = download_config.cache_dir or config.DOWNLOADED_EVALUATE_PATH + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + if isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + + if is_remote_url(url_or_filename): + # URL, so get it from the cache (downloading if necessary) + output_path = get_from_cache( + url_or_filename, + cache_dir=cache_dir, + force_download=download_config.force_download, + proxies=download_config.proxies, + resume_download=download_config.resume_download, + user_agent=download_config.user_agent, + local_files_only=download_config.local_files_only, + use_etag=download_config.use_etag, + max_retries=download_config.max_retries, + token=download_config.token, + download_desc=download_config.download_desc, + ) + elif os.path.exists(url_or_filename): + # File, and it exists. + output_path = url_or_filename + elif is_local_path(url_or_filename): + # File, but it doesn't exist. + raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") + else: + # Something unknown + raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") + + if output_path is None: + return output_path + + if download_config.extract_compressed_file: + output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( + output_path, force_extract=download_config.force_extract + ) + + return output_path + + +def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: + ua = f"datasets/{__version__}; python/{config.PY_VERSION}" + ua += f"; pyarrow/{config.PYARROW_VERSION}" + if config.TORCH_AVAILABLE: + ua += f"; torch/{config.TORCH_VERSION}" + if config.TF_AVAILABLE: + ua += f"; tensorflow/{config.TF_VERSION}" + if config.JAX_AVAILABLE: + ua += f"; jax/{config.JAX_VERSION}" + if isinstance(user_agent, dict): + ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def get_authentication_headers_for_url(url: str, token: Optional[Union[str, bool]] = None) -> dict: + """Handle the HF authentication""" + headers = {} + if url.startswith(config.HF_ENDPOINT): + if token is False: + token = None + elif isinstance(token, str): + token = token + else: + from huggingface_hub import hf_api + + token = hf_api.HfFolder.get_token() + if token: + headers["authorization"] = f"Bearer {token}" + return headers + + +class OfflineModeIsEnabled(ConnectionError): + pass + + +def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): + """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_EVALUATE_OFFLINE is True.""" + if config.HF_EVALUATE_OFFLINE: + raise OfflineModeIsEnabled( + "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) + ) + + +def _retry( + func, + func_args: Optional[tuple] = None, + func_kwargs: Optional[dict] = None, + exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException, + status_codes: Optional[List[int]] = None, + max_retries: int = 0, + base_wait_time: float = 0.5, + max_wait_time: float = 2, +): + func_args = func_args or () + func_kwargs = func_kwargs or {} + retry = 0 + while True: + try: + return func(*func_args, **func_kwargs) + except exceptions as err: + if retry >= max_retries or (status_codes and err.response.status_code not in status_codes): + raise err + else: + sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff + logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]") + time.sleep(sleep_time) + retry += 1 + + +def _request_with_retry( + method: str, + url: str, + max_retries: int = 0, + base_wait_time: float = 0.5, + max_wait_time: float = 2, + timeout: float = 10.0, + **params, +) -> requests.Response: + """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff. + + Note that if the environment variable HF_EVALUATE_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised. + + Args: + method (str): HTTP method, such as 'GET' or 'HEAD'. + url (str): The URL of the resource to fetch. + max_retries (int): Maximum number of retries, defaults to 0 (no retries). + base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between + retries then grows exponentially, capped by max_wait_time. + max_wait_time (float): Maximum amount of time between two retries, in seconds. + **params: Params to pass to :obj:`requests.request`. + """ + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + tries, success = 0, False + while not success: + tries += 1 + try: + response = requests.request(method=method.upper(), url=url, timeout=timeout, **params) + success = True + except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err: + if tries > max_retries: + raise err + else: + logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]") + sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff + time.sleep(sleep_time) + return response + + +def ftp_head(url, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + r.read(1) + except Exception: + return False + return True + + +def ftp_get(url, temp_file, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + logger.info(f"Getting through FTP {url} into {temp_file.name}") + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + shutil.copyfileobj(r, temp_file) + except urllib.error.URLError as e: + raise ConnectionError(e) from None + + +def http_get( + url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None +): + headers = copy.deepcopy(headers) or {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + if resume_size > 0: + headers["Range"] = f"bytes={resume_size:d}-" + response = _request_with_retry( + method="GET", + url=url, + stream=True, + proxies=proxies, + headers=headers, + cookies=cookies, + max_retries=max_retries, + timeout=timeout, + ) + if response.status_code == 416: # Range not satisfiable + return + content_length = response.headers.get("Content-Length") + total = resume_size + int(content_length) if content_length is not None else None + with logging.tqdm( + unit="B", + unit_scale=True, + total=total, + initial=resume_size, + desc=desc or "Downloading", + disable=not logging.is_progress_bar_enabled(), + ) as progress: + for chunk in response.iter_content(chunk_size=1024): + progress.update(len(chunk)) + temp_file.write(chunk) + + +def http_head( + url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0 +) -> requests.Response: + headers = copy.deepcopy(headers) or {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + response = _request_with_retry( + method="HEAD", + url=url, + proxies=proxies, + headers=headers, + cookies=cookies, + allow_redirects=allow_redirects, + timeout=timeout, + max_retries=max_retries, + ) + return response + + +def request_etag(url: str, token: Optional[Union[str, bool]] = None) -> Optional[str]: + headers = get_authentication_headers_for_url(url, token=token) + response = http_head(url, headers=headers, max_retries=3) + response.raise_for_status() + etag = response.headers.get("ETag") if response.ok else None + return etag + + +def get_from_cache( + url, + cache_dir=None, + force_download=False, + proxies=None, + etag_timeout=100, + resume_download=False, + user_agent=None, + local_files_only=False, + use_etag=True, + max_retries=0, + token=None, + download_desc=None, +) -> str: + """ + Given a URL, look for the corresponding file in the local cache. + If it's not there, download it. Then return the path to the cached file. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + """ + if cache_dir is None: + cache_dir = config.HF_EVALUATE_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + os.makedirs(cache_dir, exist_ok=True) + + connected = False + response = None + cookies = None + etag = None + head_error = None + + # Try a first time to file the file on the local file system without eTag (None) + # if we don't ask for 'force_download' then we spare a request + filename = hash_url_to_filename(url, etag=None) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download and not use_etag: + return cache_path + + # Prepare headers for authentication + headers = get_authentication_headers_for_url(url, token=token) + if user_agent is not None: + headers["user-agent"] = user_agent + + # We don't have the file locally or we need an eTag + if not local_files_only: + if url.startswith("ftp://"): + connected = ftp_head(url) + try: + response = http_head( + url, + allow_redirects=True, + proxies=proxies, + timeout=etag_timeout, + max_retries=max_retries, + headers=headers, + ) + if response.status_code == 200: # ok + etag = response.headers.get("ETag") if use_etag else None + for k, v in response.cookies.items(): + # In some edge cases, we need to get a confirmation token + if k.startswith("download_warning") and "drive.google.com" in url: + url += "&confirm=" + v + cookies = response.cookies + connected = True + # Fix Google Drive URL to avoid Virus scan warning + if "drive.google.com" in url and "confirm=" not in url: + url += "&confirm=t" + # In some edge cases, head request returns 400 but the connection is actually ok + elif ( + (response.status_code == 400 and "firebasestorage.googleapis.com" in url) + or (response.status_code == 405 and "drive.google.com" in url) + or ( + response.status_code == 403 + and ( + re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url) + or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url) + ) + ) + or (response.status_code == 403 and "ndownloader.figstatic.com" in url) + ): + connected = True + logger.info(f"Couldn't get ETag version for url {url}") + elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None: + raise ConnectionError( + f"Unauthorized for URL {url}. Please use the parameter ``token=True`` after logging in with ``huggingface-cli login``" + ) + except (OSError, requests.exceptions.Timeout) as e: + # not connected + head_error = e + pass + + # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. + # try to get the last downloaded one + if not connected: + if os.path.exists(cache_path) and not force_download: + return cache_path + if local_files_only: + raise FileNotFoundError( + f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been" + " disabled. To enable file online look-ups, set 'local_files_only' to False." + ) + elif response is not None and response.status_code == 404: + raise FileNotFoundError(f"Couldn't find file at {url}") + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + if head_error is not None: + raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") + elif response is not None: + raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})") + else: + raise ConnectionError(f"Couldn't reach {url}") + + # Try a second time + filename = hash_url_to_filename(url, etag) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download: + return cache_path + + # From now on, connected is True. + # Prevent parallel downloads of the same file with a lock. + lock_path = cache_path + ".lock" + with FileLock(lock_path): + + if resume_download: + incomplete_path = cache_path + ".incomplete" + + @contextmanager + def _resumable_file_manager(): + with open(incomplete_path, "a+b") as f: + yield f + + temp_file_manager = _resumable_file_manager + if os.path.exists(incomplete_path): + resume_size = os.stat(incomplete_path).st_size + else: + resume_size = 0 + else: + temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) + resume_size = 0 + + # Download to temporary file, then copy to cache dir once finished. + # Otherwise you get corrupt cache entries if the download gets interrupted. + with temp_file_manager() as temp_file: + logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") + + # GET file object + if url.startswith("ftp://"): + ftp_get(url, temp_file) + else: + http_get( + url, + temp_file, + proxies=proxies, + resume_size=resume_size, + headers=headers, + cookies=cookies, + max_retries=max_retries, + desc=download_desc, + ) + + logger.info(f"storing {url} in cache at {cache_path}") + shutil.move(temp_file.name, cache_path) + + logger.info(f"creating metadata file for {cache_path}") + meta = {"url": url, "etag": etag} + meta_path = cache_path + ".json" + with open(meta_path, "w", encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + return cache_path + + +def add_start_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") + return fn + + return docstring_decorator + + +def add_end_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) + return fn + + return docstring_decorator + + +def estimate_dataset_size(paths): + return sum(path.stat().st_size for path in paths) + + +def readline(f: io.RawIOBase): + # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 + res = bytearray() + while True: + b = f.read(1) + if not b: + break + res += b + if res.endswith(b"\n"): + break + return bytes(res) diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/gradio.py b/venv/lib/python3.10/site-packages/evaluate/utils/gradio.py new file mode 100644 index 0000000000000000000000000000000000000000..3b73d9c67e711caad66edaf0f66808fff296aabd --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/utils/gradio.py @@ -0,0 +1,131 @@ +import json +import os +import re +import sys +from pathlib import Path + +import numpy as np +from datasets import Value + +from .logging import get_logger + + +logger = get_logger(__name__) + +REGEX_YAML_BLOCK = re.compile(r"---[\n\r]+([\S\s]*?)[\n\r]+---[\n\r]") + + +def infer_gradio_input_types(feature_types): + """ + Maps metric feature types to input types for gradio Dataframes: + - float/int -> numbers + - string -> strings + - any other -> json + Note that json is not a native gradio type but will be treated as string that + is then parsed as a json. + """ + input_types = [] + for feature_type in feature_types: + input_type = "json" + if isinstance(feature_type, Value): + if feature_type.dtype.startswith("int") or feature_type.dtype.startswith("float"): + input_type = "number" + elif feature_type.dtype == "string": + input_type = "str" + input_types.append(input_type) + return input_types + + +def json_to_string_type(input_types): + """Maps json input type to str.""" + return ["str" if i == "json" else i for i in input_types] + + +def parse_readme(filepath): + """Parses a repositories README and removes""" + if not os.path.exists(filepath): + return "No README.md found." + with open(filepath, "r") as f: + text = f.read() + match = REGEX_YAML_BLOCK.search(text) + if match: + text = text[match.end() :] + return text + + +def parse_gradio_data(data, input_types): + """Parses data from gradio Dataframe for use in metric.""" + metric_inputs = {} + data.replace("", np.nan, inplace=True) + data.dropna(inplace=True) + for feature_name, input_type in zip(data, input_types): + if input_type == "json": + metric_inputs[feature_name] = [json.loads(d) for d in data[feature_name].to_list()] + elif input_type == "str": + metric_inputs[feature_name] = [d.strip('"') for d in data[feature_name].to_list()] + else: + metric_inputs[feature_name] = data[feature_name] + return metric_inputs + + +def parse_test_cases(test_cases, feature_names, input_types): + """ + Parses test cases to be used in gradio Dataframe. Note that an apostrophe is added + to strings to follow the format in json. + """ + if len(test_cases) == 0: + return None + examples = [] + for test_case in test_cases: + parsed_cases = [] + for feat, input_type in zip(feature_names, input_types): + if input_type == "json": + parsed_cases.append([str(element) for element in test_case[feat]]) + elif input_type == "str": + parsed_cases.append(['"' + element + '"' for element in test_case[feat]]) + else: + parsed_cases.append(test_case[feat]) + examples.append([list(i) for i in zip(*parsed_cases)]) + return examples + + +def launch_gradio_widget(metric): + """Launches `metric` widget with Gradio.""" + + try: + import gradio as gr + except ImportError as error: + logger.error("To create a metric widget with Gradio make sure gradio is installed.") + raise error + + local_path = Path(sys.path[0]) + # if there are several input types, use first as default. + if isinstance(metric.features, list): + (feature_names, feature_types) = zip(*metric.features[0].items()) + else: + (feature_names, feature_types) = zip(*metric.features.items()) + gradio_input_types = infer_gradio_input_types(feature_types) + + def compute(data): + return metric.compute(**parse_gradio_data(data, gradio_input_types)) + + iface = gr.Interface( + fn=compute, + inputs=gr.inputs.Dataframe( + headers=feature_names, + col_count=len(feature_names), + row_count=1, + datatype=json_to_string_type(gradio_input_types), + ), + outputs=gr.outputs.Textbox(label=metric.name), + description=( + metric.info.description + "\nIf this is a text-based metric, make sure to wrap you input in double quotes." + " Alternatively you can use a JSON-formatted list as input." + ), + title=f"Metric: {metric.name}", + article=parse_readme(local_path / "README.md"), + # TODO: load test cases and use them to populate examples + # examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)] + ) + + iface.launch() diff --git a/venv/lib/python3.10/site-packages/evaluate/utils/logging.py b/venv/lib/python3.10/site-packages/evaluate/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..8df58d3dcfb4c8b903b78c244387561dc659e423 --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/utils/logging.py @@ -0,0 +1,234 @@ +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Logging utilities. """ + +import logging +import os +from logging import CRITICAL # NOQA +from logging import DEBUG # NOQA +from logging import ERROR # NOQA +from logging import FATAL # NOQA +from logging import INFO # NOQA +from logging import NOTSET # NOQA +from logging import WARN # NOQA +from logging import WARNING # NOQA +from typing import Optional + +from tqdm import auto as tqdm_lib + + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + + +def _get_default_logging_level(): + """ + If EVALUATE_VERBOSITY env var is set to one of the valid choices return that as the new default level. + If it is not - fall back to ``_default_log_level`` + """ + env_level_str = os.getenv("EVALUATE_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option EVALUATE_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(_get_default_logging_level()) + + +def _reset_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(logging.NOTSET) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """Return a logger with the specified name.""" + if name is None: + name = _get_library_name() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """Return the current level for the Hugging Face Evaluate library's root logger. + Returns: + Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`. + + + + Hugging Face Evaluate library has following logging levels: + - `evaluate.logging.CRITICAL`, `evaluate.logging.FATAL` + - `evaluate.logging.ERROR` + - `evaluate.logging.WARNING`, `evaluate.logging.WARN` + - `evaluate.logging.INFO` + - `evaluate.logging.DEBUG` + + + """ + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """Set the level for the Hugging Face Evaluate library's root logger. + Args: + verbosity: + Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`. + """ + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """Set the level for the Hugging Face Evaluate library's root logger to `INFO`. + + This will display most of the logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.INFO)`. + """ + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """Set the level for the Hugging Face Evaluate library's root logger to `WARNING`. + + This will display only the warning and errors logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.WARNING)`. + """ + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """Set the level for the Hugging Face Evaluate library's root logger to `DEBUG`. + + This will display all the logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.DEBUG)`. + """ + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """Set the level for the Hugging Face Evaluate library's root logger to `ERROR`. + + This will display only the errors logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.ERROR)`. + """ + return set_verbosity(ERROR) + + +def disable_propagation() -> None: + """Disable propagation of the library log outputs. + Note that log propagation is disabled by default. + """ + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """Enable propagation of the library log outputs. + Please disable the Hugging Face Evaluate library's default handler to prevent double logging if the root logger has + been configured. + """ + _get_library_root_logger().propagate = True + + +# Configure the library root logger at the module level (singleton-like) +_configure_library_root_logger() + + +class EmptyTqdm: + """Dummy tqdm which doesn't do anything.""" + + def __init__(self, *args, **kwargs): # pylint: disable=unused-argument + self._iterator = args[0] if args else None + + def __iter__(self): + return iter(self._iterator) + + def __getattr__(self, _): + """Return empty function.""" + + def empty_fn(*args, **kwargs): # pylint: disable=unused-argument + return + + return empty_fn + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + return + + +_tqdm_active = True + + +class _tqdm_cls: + def __call__(self, *args, **kwargs): + if _tqdm_active: + return tqdm_lib.tqdm(*args, **kwargs) + else: + return EmptyTqdm(*args, **kwargs) + + def set_lock(self, *args, **kwargs): + self._lock = None + if _tqdm_active: + return tqdm_lib.tqdm.set_lock(*args, **kwargs) + + def get_lock(self): + if _tqdm_active: + return tqdm_lib.tqdm.get_lock() + + +tqdm = _tqdm_cls() + + +def is_progress_bar_enabled() -> bool: + """Return a boolean indicating whether tqdm progress bars are enabled.""" + global _tqdm_active + return bool(_tqdm_active) + + +def enable_progress_bar(): + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = True + + +def disable_progress_bar(): + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = False diff --git a/venv/lib/python3.10/site-packages/evaluate/visualization.py b/venv/lib/python3.10/site-packages/evaluate/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be8605805e4d11ef93a2911ab32afe934a78bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/evaluate/visualization.py @@ -0,0 +1,230 @@ +import textwrap + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + + +class ComplexRadar: + """Create a complex radar chart with different scales for each variable + Args: + fig (`matplotlib.figure`) : A matplotlib figure object to add the axes on. + variables (`list`) : a list of variables to. plot + ranges (`list` of `tuples`): A list of ranges (min, max) for each variable + n_ring_levels (`int): Number of ordinate or ring levels to draw. + Default: 5. + show_scales (`bool`): Indicates if we the ranges for each variable are plotted. + Default: True. + format_cfg (`dict`): A dictionary with formatting configurations. + Default: None. + Returns: + `matplotlib.figure.Figure`: a radar plot. + """ + + def __init__(self, fig, variables, ranges, n_ring_levels=5, show_scales=True, format_cfg=None): + + self.format_cfg = format_cfg + + # Calculate angles and create for each variable an axes + # Consider here the trick with having the first axes element twice (len+1) + angles = np.arange(0, 360, 360.0 / len(variables)) + axes = [ + fig.add_axes([0.1, 0.1, 0.9, 0.9], polar=True, label="axes{}".format(i), **self.format_cfg["axes_args"]) + for i in range(len(variables) + 1) + ] + + # Ensure clockwise rotation (first variable at the top N) + for ax in axes: + ax.set_theta_zero_location("N") + ax.set_theta_direction(-1) + ax.set_axisbelow(True) + + # Writing the ranges on each axes + for i, ax in enumerate(axes): + + # Here we do the trick by repeating the first iteration + j = 0 if (i == 0 or i == 1) else i - 1 + ax.set_ylim(*ranges[j]) + # Set endpoint to True if you like to have values right before the last circle + grid = np.linspace(*ranges[j], num=n_ring_levels, endpoint=self.format_cfg["incl_endpoint"]) + gridlabel = ["{}".format(round(x, 2)) for x in grid] + gridlabel[0] = "" # remove values from the center + lines, labels = ax.set_rgrids( + grid, labels=gridlabel, angle=angles[j], **self.format_cfg["rgrid_tick_lbls_args"] + ) + + ax.set_ylim(*ranges[j]) + ax.spines["polar"].set_visible(False) + ax.grid(visible=False) + + if show_scales is False: + ax.set_yticklabels([]) + + # Set all axes except the first one unvisible + for ax in axes[1:]: + ax.patch.set_visible(False) + ax.xaxis.set_visible(False) + + # Setting the attributes + self.angle = np.deg2rad(np.r_[angles, angles[0]]) + self.ranges = ranges + self.ax = axes[0] + self.ax1 = axes[1] + self.plot_counter = 0 + + # Draw (inner) circles and lines + self.ax.yaxis.grid(**self.format_cfg["rad_ln_args"]) + # Draw outer circle + self.ax.spines["polar"].set(**self.format_cfg["outer_ring"]) + # Draw angle lines + self.ax.xaxis.grid(**self.format_cfg["angle_ln_args"]) + + # ax1 is the duplicate of axes[0] (self.ax) + # Remove everything from ax1 except the plot itself + self.ax1.axis("off") + self.ax1.set_zorder(9) + + # Create the outer labels for each variable + l, text = self.ax.set_thetagrids(angles, labels=variables) + + # Beautify them + labels = [t.get_text() for t in self.ax.get_xticklabels()] + labels = [ + "\n".join( + textwrap.wrap( + label, + self.format_cfg["theta_tick_lbls_txt_wrap"], + break_long_words=self.format_cfg["theta_tick_lbls_brk_lng_wrds"], + ) + ) + for label in labels + ] + self.ax.set_xticklabels(labels, **self.format_cfg["theta_tick_lbls"]) + + for t, a in zip(self.ax.get_xticklabels(), angles): + if a == 0: + t.set_ha("center") + elif a > 0 and a < 180: + t.set_ha("left") + elif a == 180: + t.set_ha("center") + else: + t.set_ha("right") + + self.ax.tick_params(axis="both", pad=self.format_cfg["theta_tick_lbls_pad"]) + + def _scale_data(self, data, ranges): + """Scales data[1:] to ranges[0]""" + for d, (y1, y2) in zip(data[1:], ranges[1:]): + assert (y1 <= d <= y2) or (y2 <= d <= y1) + x1, x2 = ranges[0] + d = data[0] + sdata = [d] + for d, (y1, y2) in zip(data[1:], ranges[1:]): + sdata.append((d - y1) / (y2 - y1) * (x2 - x1) + x1) + return sdata + + def plot(self, data, *args, **kwargs): + """Plots a line""" + sdata = self._scale_data(data, self.ranges) + self.ax1.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kwargs) + self.plot_counter = self.plot_counter + 1 + + def use_legend(self, *args, **kwargs): + """Shows a legend""" + self.ax1.legend(*args, **kwargs) + + +def radar_plot(data, model_names, invert_range=[], config=None, fig=None): + """Create a complex radar chart with different scales for each variable + Source: https://towardsdatascience.com/how-to-create-and-visualize-complex-radar-charts-f7764d0f3652 + + Args: + data (`List[dict]`): the results (list of metric + value pairs). + E.g. data = [{"accuracy": 0.9, "precision":0.8},{"accuracy": 0.7, "precision":0.6}] + names (`List[dict]`): model names. + E.g. names = ["model1", "model 2", ...] + invert_range (`List[dict]`, optional): the metrics to invert (in cases when smaller is better, e.g. speed) + E.g. invert_range=["latency_in_seconds"] + config (`dict`, optional) : a specification of the formatting configurations, namely: + + - rad_ln_args (`dict`, default `{"visible": True}`): The visibility of the radial (circle) lines. + + - outer_ring (`dict`, default `{"visible": True}`): The visibility of the outer ring. + + - angle_ln_args (`dict`, default `{"visible": True}`): The visibility of the angle lines. + + - rgrid_tick_lbls_args (`dict`, default `{"fontsize": 12}`): The font size of the tick labels on the scales. + + - theta_tick_lbls (`dict`, default `{"fontsize": 12}`): The font size of the variable labels on the plot. + + - theta_tick_lbls_pad (`int`, default `3`): The padding of the variable labels on the plot. + + - theta_tick_lbls_brk_lng_wrds (`bool`, default `True` ): Whether long words in the label are broken up or not. + + - theta_tick_lbls_txt_wrap (`int`, default `15`): Text wrap for tick labels + + - incl_endpoint (`bool`, default `False`): Include value endpoints on calse + + - marker (`str`, default `"o"`): the shape of the marker used in the radar plot. + + - markersize (`int`, default `3`): the shape of the marker used in the radar plot. + + - legend_loc (`str`, default `"upper right"`): the location of the legend in the radar plot. Must be one of: 'upper left', 'upper right', 'lower left', 'lower right'. + + - bbox_to_anchor (`tuple`, default `(2, 1)`: anchor for the legend. + fig (`matplotlib.figure.Figure`, optional): figure used to plot the radar plot. + + Returns: + `matplotlib.figure.Figure` + """ + data = pd.DataFrame(data) + data.index = model_names + variables = data.keys() + if all(x in variables for x in invert_range) is False: + raise ValueError("All of the metrics in `invert_range` should be in the data provided.") + min_max_per_variable = data.describe().T[["min", "max"]] + min_max_per_variable["min"] = min_max_per_variable["min"] - 0.1 * ( + min_max_per_variable["max"] - min_max_per_variable["min"] + ) + min_max_per_variable["max"] = min_max_per_variable["max"] + 0.1 * ( + min_max_per_variable["max"] - min_max_per_variable["min"] + ) + + ranges = list(min_max_per_variable.itertuples(index=False, name=None)) + ranges = [ + (max_value, min_value) if var in invert_range else (min_value, max_value) + for var, (min_value, max_value) in zip(variables, ranges) + ] + format_cfg = { + "axes_args": {}, + "rad_ln_args": {"visible": True}, + "outer_ring": {"visible": True}, + "angle_ln_args": {"visible": True}, + "rgrid_tick_lbls_args": {"fontsize": 12}, + "theta_tick_lbls": {"fontsize": 12}, + "theta_tick_lbls_pad": 3, + "theta_tick_lbls_brk_lng_wrds": True, + "theta_tick_lbls_txt_wrap": 15, + "incl_endpoint": False, + "marker": "o", + "markersize": 3, + "legend_loc": "upper right", + "bbox_to_anchor": (2, 1), + } + if config is not None: + format_cfg.update(config) + if fig is None: + fig = plt.figure() + radar = ComplexRadar( + fig, + variables, + ranges, + n_ring_levels=3, + show_scales=True, + format_cfg=format_cfg, + ) + for g in zip(data.index): + radar.plot(data.loc[g].values, label=g, marker=format_cfg["marker"], markersize=format_cfg["markersize"]) + radar.use_legend(**{"loc": format_cfg["legend_loc"], "bbox_to_anchor": format_cfg["bbox_to_anchor"]}) + return fig diff --git a/venv/lib/python3.10/site-packages/lm_eval/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..317c0291b96f68e5dafe73fa0d704bd33e0eaa9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/__init__.py @@ -0,0 +1 @@ +from .evaluator import evaluate, simple_evaluate diff --git a/venv/lib/python3.10/site-packages/lm_eval/__main__.py b/venv/lib/python3.10/site-packages/lm_eval/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..711df07a47f259796c0d259f490ef9ea3c80a90c --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/__main__.py @@ -0,0 +1,454 @@ +import argparse +import json +import logging +import os +import sys +from functools import partial +from typing import Union + +from lm_eval import evaluator, utils +from lm_eval.evaluator import request_caching_arg_to_dict +from lm_eval.loggers import EvaluationTracker, WandbLogger +from lm_eval.tasks import TaskManager +from lm_eval.utils import handle_non_serializable, make_table, simple_parse_args_string + + +def _int_or_none_list_arg_type( + min_len: int, max_len: int, defaults: str, value: str, split_char: str = "," +): + def parse_value(item): + item = item.strip().lower() + if item == "none": + return None + try: + return int(item) + except ValueError: + raise argparse.ArgumentTypeError(f"{item} is not an integer or None") + + items = [parse_value(v) for v in value.split(split_char)] + num_items = len(items) + + if num_items == 1: + # Makes downstream handling the same for single and multiple values + items = items * max_len + elif num_items < min_len or num_items > max_len: + raise argparse.ArgumentTypeError( + f"Argument requires {max_len} integers or None, separated by '{split_char}'" + ) + elif num_items != max_len: + logging.warning( + f"Argument requires {max_len} integers or None, separated by '{split_char}'. " + "Missing values will be filled with defaults." + ) + default_items = [parse_value(v) for v in defaults.split(split_char)] + items.extend( + default_items[num_items:] + ) # extend items list with missing defaults + + return items + + +def check_argument_types(parser: argparse.ArgumentParser): + """ + Check to make sure all CLI args are typed, raises error if not + """ + for action in parser._actions: + if action.dest != "help" and not action.const: + if action.type is None: + raise ValueError( + f"Argument '{action.dest}' doesn't have a type specified." + ) + else: + continue + + +def setup_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`" + ) + parser.add_argument( + "--tasks", + "-t", + default=None, + type=str, + metavar="task1,task2", + help="To get full list of tasks, use the command lm-eval --tasks list", + ) + parser.add_argument( + "--model_args", + "-a", + default="", + type=str, + help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`", + ) + parser.add_argument( + "--num_fewshot", + "-f", + type=int, + default=None, + metavar="N", + help="Number of examples in few-shot context", + ) + parser.add_argument( + "--batch_size", + "-b", + type=str, + default=1, + metavar="auto|auto:N|N", + help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.", + ) + parser.add_argument( + "--max_batch_size", + type=int, + default=None, + metavar="N", + help="Maximal batch size to try with --batch_size auto.", + ) + parser.add_argument( + "--device", + type=str, + default=None, + help="Device to use (e.g. cuda, cuda:0, cpu).", + ) + parser.add_argument( + "--output_path", + "-o", + default=None, + type=str, + metavar="DIR|DIR/file.json", + help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.", + ) + parser.add_argument( + "--limit", + "-L", + type=float, + default=None, + metavar="N|0 argparse.Namespace: + check_argument_types(parser) + return parser.parse_args() + + +def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: + if not args: + # we allow for args to be passed externally, else we parse them ourselves + parser = setup_parser() + args = parse_eval_args(parser) + + if args.wandb_args: + wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args)) + + eval_logger = utils.eval_logger + eval_logger.setLevel(getattr(logging, f"{args.verbosity}")) + eval_logger.info(f"Verbosity set to {args.verbosity}") + os.environ["TOKENIZERS_PARALLELISM"] = "false" + + # update the evaluation tracker args with the output path and the HF token + if args.output_path: + args.hf_hub_log_args += f",output_path={args.output_path}" + if os.environ.get("HF_TOKEN", None): + args.hf_hub_log_args += f",token={os.environ.get('HF_TOKEN')}" + evaluation_tracker_args = simple_parse_args_string(args.hf_hub_log_args) + evaluation_tracker = EvaluationTracker(**evaluation_tracker_args) + + if args.predict_only: + args.log_samples = True + if (args.log_samples or args.predict_only) and not args.output_path: + raise ValueError( + "Specify --output_path if providing --log_samples or --predict_only" + ) + + if args.fewshot_as_multiturn and args.apply_chat_template is False: + raise ValueError( + "If fewshot_as_multiturn is set, apply_chat_template must be set to True." + ) + + if ( + args.num_fewshot is None or args.num_fewshot == 0 + ) and args.fewshot_as_multiturn: + raise ValueError( + "If fewshot_as_multiturn is set, num_fewshot must be greater than 0." + ) + + if args.include_path is not None: + eval_logger.info(f"Including path: {args.include_path}") + task_manager = TaskManager(args.verbosity, include_path=args.include_path) + + if "push_samples_to_hub" in evaluation_tracker_args and not args.log_samples: + eval_logger.warning( + "Pushing samples to the Hub requires --log_samples to be set. Samples will not be pushed to the Hub." + ) + + if args.limit: + eval_logger.warning( + " --limit SHOULD ONLY BE USED FOR TESTING." + "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT." + ) + + if args.tasks is None: + eval_logger.error("Need to specify task to evaluate.") + sys.exit() + elif args.tasks == "list": + eval_logger.info( + "Available Tasks:\n - {}".format("\n - ".join(task_manager.all_tasks)) + ) + sys.exit() + else: + if os.path.isdir(args.tasks): + import glob + + task_names = [] + yaml_path = os.path.join(args.tasks, "*.yaml") + for yaml_file in glob.glob(yaml_path): + config = utils.load_yaml_config(yaml_file) + task_names.append(config) + else: + task_list = args.tasks.split(",") + task_names = task_manager.match_tasks(task_list) + for task in [task for task in task_list if task not in task_names]: + if os.path.isfile(task): + config = utils.load_yaml_config(task) + task_names.append(config) + task_missing = [ + task for task in task_list if task not in task_names and "*" not in task + ] # we don't want errors if a wildcard ("*") task name was used + + if task_missing: + missing = ", ".join(task_missing) + eval_logger.error( + f"Tasks were not found: {missing}\n" + f"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks", + ) + raise ValueError( + f"Tasks not found: {missing}. Try `lm-eval --tasks list` for list of available tasks, or '--verbosity DEBUG' to troubleshoot task registration issues." + ) + + # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args + if args.trust_remote_code: + eval_logger.info( + "Passed `--trust_remote_code`, setting environment variable `HF_DATASETS_TRUST_REMOTE_CODE=true`" + ) + # HACK: import datasets and override its HF_DATASETS_TRUST_REMOTE_CODE value internally, + # because it's already been determined based on the prior env var before launching our + # script--`datasets` gets imported by lm_eval internally before these lines can update the env. + import datasets + + datasets.config.HF_DATASETS_TRUST_REMOTE_CODE = True + + args.model_args = args.model_args + ",trust_remote_code=True" + + eval_logger.info(f"Selected Tasks: {task_names}") + + request_caching_args = request_caching_arg_to_dict( + cache_requests=args.cache_requests + ) + + results = evaluator.simple_evaluate( + model=args.model, + model_args=args.model_args, + tasks=task_names, + num_fewshot=args.num_fewshot, + batch_size=args.batch_size, + max_batch_size=args.max_batch_size, + device=args.device, + use_cache=args.use_cache, + limit=args.limit, + check_integrity=args.check_integrity, + write_out=args.write_out, + log_samples=args.log_samples, + evaluation_tracker=evaluation_tracker, + system_instruction=args.system_instruction, + apply_chat_template=args.apply_chat_template, + fewshot_as_multiturn=args.fewshot_as_multiturn, + gen_kwargs=args.gen_kwargs, + task_manager=task_manager, + verbosity=args.verbosity, + predict_only=args.predict_only, + random_seed=args.seed[0], + numpy_random_seed=args.seed[1], + torch_random_seed=args.seed[2], + fewshot_random_seed=args.seed[3], + **request_caching_args, + ) + + if results is not None: + if args.log_samples: + samples = results.pop("samples") + dumped = json.dumps( + results, indent=2, default=handle_non_serializable, ensure_ascii=False + ) + if args.show_config: + print(dumped) + + batch_sizes = ",".join(map(str, results["config"]["batch_sizes"])) + + # Add W&B logging + if args.wandb_args: + try: + wandb_logger.post_init(results) + wandb_logger.log_eval_result() + if args.log_samples: + wandb_logger.log_eval_samples(samples) + except Exception as e: + eval_logger.info(f"Logging to Weights and Biases failed due to {e}") + + evaluation_tracker.save_results_aggregated( + results=results, samples=samples if args.log_samples else None + ) + + if args.log_samples: + for task_name, config in results["configs"].items(): + evaluation_tracker.save_results_samples( + task_name=task_name, samples=samples[task_name] + ) + + if ( + evaluation_tracker.push_results_to_hub + or evaluation_tracker.push_samples_to_hub + ): + evaluation_tracker.recreate_metadata_card() + + print( + f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, " + f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}" + ) + print(make_table(results)) + if "groups" in results: + print(make_table(results, "groups")) + + if args.wandb_args: + # Tear down wandb run once all the logging is done. + wandb_logger.run.finish() + + +if __name__ == "__main__": + cli_evaluate() diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..265de9e92b7336768f4450b7af206e550492edbe Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88f9b11afa8373080f43ab8dc3007450d3b349dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__main__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2724a98f708c05619fb95a4034ab088c6b63d53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/__main__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f768be7e83142adf25b6523eab4545fea933473d Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1885fa81d7b4ec992c00cff44ec422d4d3ca86ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9b8bf9596ce2f1e4d83427028cea98a927072d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator_utils.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76b0a8dce810c21b4454f8cf1da7f119f5f6c691 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/evaluator_utils.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c137891b849a0f9ed24389c160d45a80d3f5db9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/__pycache__/utils.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48c35a701d4ba7edf8539f09d6c669c68e83acd9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea3d9d64d064a13d0ed1b543b17f18dee2d0b663 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa2322390a01da48592c76a2b64ad9f61752a2c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/filter.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/filter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8b5e0f86a4fff2573bf9c3c5c91e71344756dfa Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/filter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/filter.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/filter.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25416b3dde665fcd8733e72b8f425c276f049868 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/filter.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/instance.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/instance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dc4375e1795144f913ce8adda9d733763b98761 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/instance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/instance.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/instance.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd2ea42ce0f755d86e32597826531f2f5ce0402c Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/instance.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/metrics.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..052af6c515ac914a779121c133065ed08a76381d Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/metrics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/metrics.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/metrics.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e233e3b357f81cc5b98f9272702d54f19d9ee3de Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/metrics.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0cb92e9943d6ecfa08b4fc4454bf28af5d598ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/model.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/model.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78f0074a144b78df784b19a330416b81c43138d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/model.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8488dbfbe35179479133dc584c97bd1274ec4a55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/registry.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/registry.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0feae4fa59da9d7c2d9c172c64f12f9fd6d3d8ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/registry.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/samplers.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/samplers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20bb2b66290d0094f7d56359b5d7b1b3dc48e1c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/samplers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/samplers.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/samplers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4412b372c1cba96dc2b2167a63206b766cbea5b Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/samplers.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/task.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/task.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a4d8fced859e54f280615451e72270c8398f21a Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/task.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/task.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/task.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30782cbb1c93320084617a5cecb4e60b17cbfdf5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/api/__pycache__/task.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/filter.py b/venv/lib/python3.10/site-packages/lm_eval/api/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..8d9db6821724c497c4a27116a1238e3b8d32ae29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/api/filter.py @@ -0,0 +1,56 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Callable, Iterable, List, Union + +from lm_eval.api.instance import Instance + + +class Filter(ABC): + """ + Filter classes operate on a per-task level. + They take all model outputs (`instance.resps` for all `task.instances`) + across all instances of a task, and perform operations. + In a single run, one can configure any number of separate filters or lists of filters. + + """ + + def __init__(self, **kwargs) -> None: + """ + Can define custom behavior here, if an individual instantiation of a Filter class should have state. + """ + + @abstractmethod + def apply(self, resps: Union[List, Iterable], docs: List[dict]) -> Iterable: + """ + Defines the operation to perform on a list of the `inst.resps` properties of `Instance` objects. + Should return the list of (filtered) response lists *in the same order as they were input*, e.g. + if pass in [, ] should return + [, ] + """ + return resps + + +@dataclass +class FilterEnsemble: + """ + FilterEnsemble creates a pipeline applying multiple filters. + Its intended usage is to stack multiple post-processing steps in order. + `task.apply_filters` should use a list of FilterEnsemble classes that it stores, to apply each + pipeline separately. + """ + + name: str + filters: List[Callable[[], Filter]] + + def apply(self, instances: List[Instance]) -> None: + resps, docs = zip(*((inst.resps, inst.doc) for inst in instances)) + resps, docs = list(resps), list(docs) + + for f in self.filters: + # apply filters in sequence + resps = f().apply(resps, docs) + + # add the end results after filtering to filtered_requests of their respective source instances. + # has key `self.name`: each FilterEnsemble applied in a given run should use a different name. + for inst, resp in zip(instances, resps): + inst.filtered_resps[self.name] = resp diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/instance.py b/venv/lib/python3.10/site-packages/lm_eval/api/instance.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c6afa0644e729ba441728c72a2469fdad07b8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/api/instance.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass, field +from typing import Literal, Optional, Tuple + + +OutputType = Literal[ + "loglikelihood", "loglikelihood_rolling", "generate_until", "multiple_choice" +] + + +@dataclass +class Instance: + request_type: OutputType + doc: dict + arguments: tuple + idx: int + metadata: Tuple[Optional[str], Optional[int], Optional[int]] = field( + default_factory=lambda: (None, None, None) + ) + resps: list = field(default_factory=list) + filtered_resps: dict = field(default_factory=dict) + + # initialized after init + task_name: Optional[str] = None + doc_id: Optional[int] = None + repeats: Optional[int] = None + + def __post_init__(self) -> None: + # unpack metadata field + self.task_name, self.doc_id, self.repeats = self.metadata + + @property + def args(self): + """ + Returns (string,) where `string` is the string to calculate loglikelihood over + """ + return ( + self.arguments if isinstance(self.arguments, tuple) else (self.arguments,) + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/metrics.py b/venv/lib/python3.10/site-packages/lm_eval/api/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..1daf3847def7af61d9917c5cd3529e056694dc26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/api/metrics.py @@ -0,0 +1,568 @@ +import logging +import math +import random +import re +import string +from collections.abc import Iterable +from typing import List + +import numpy as np +import sacrebleu +import sklearn.metrics + +from lm_eval.api.registry import register_aggregation, register_metric + + +eval_logger = logging.getLogger("lm-eval") + + +# Register Aggregations First +@register_aggregation("bypass") +def bypass_agg(arr): + return 999 + + +@register_aggregation("mean") +def mean(arr): + return sum(arr) / len(arr) + + +@register_aggregation("median") +def median(arr): + return arr[len(arr) // 2] + + +# Certain metrics must be calculated across all documents in a benchmark. +# We use them as aggregation metrics, paired with no-op passthrough metric fns. +@register_aggregation("perplexity") +def perplexity(items): + return math.exp(-mean(items)) + + +@register_aggregation("weighted_perplexity") +def weighted_perplexity(items): + return math.exp(-weighted_mean(items)) + + +@register_aggregation("bits_per_byte") +def bits_per_byte(items): + return -weighted_mean(items) / math.log(2) + + +@register_aggregation("f1") +def f1_score(items): + unzipped_list = list(zip(*items)) + golds = unzipped_list[0] + preds = unzipped_list[1] + fscore = sklearn.metrics.f1_score(golds, preds) + + return np.max(fscore) + + +@register_aggregation("matthews_corrcoef") +def matthews_corrcoef(items): + unzipped_list = list(zip(*items)) + golds = unzipped_list[0] + preds = unzipped_list[1] + # print(preds) + return sklearn.metrics.matthews_corrcoef(golds, preds) + + +@register_aggregation("bleu") +def bleu(items): + """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric + for evaluating a generated sentence to a reference sentence. It counts matching + n-grams in the candidate translation to n-grams in the reference text, where + 1-gram or unigram would be each token and a bigram comparison would be each + word pair. The comparison is made regardless of word order + Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/ + Paper: https://www.aclweb.org/anthology/P02-1040/ + + Higher is better + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_bleu(preds, refs).score + + +@register_aggregation("chrf") +def chrf(items): + """chrF++ is a tool for automatic evaluation of machine translation output + based on character n-gram precision and recall enhanced with word n-grams. + Source: https://github.com/m-popovic/chrF + Paper: https://www.aclweb.org/anthology/W15-3049.pdf + + Higher is better # TODO I think + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_chrf(preds, refs).score + + +@register_aggregation("ter") +def ter(items): + """Translation Error Rate is an error metric for machine translation that + measures the number of edits required to change a system output into one + of the references + Source: http://www.cs.umd.edu/~snover/tercom/ + Paper: http://mt-archive.info/AMTA-2006-Snover.pdf + + Lower is better + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_ter(preds, refs).score + + +@register_aggregation("brier_score") +def brier_score(items): # This is a passthrough function + gold, predictions = list(zip(*items)) + bs, num_class = np.array(predictions).shape + + gold = list(gold) + gold_one_hot = np.eye(num_class)[gold] + return np.mean(np.sum((predictions - gold_one_hot) ** 2, axis=1)) + + +@register_metric( + metric="brier_score", + higher_is_better=False, + output_type=["multiple_choice"], + aggregation="brier_score", +) +def brier_score_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc", + higher_is_better=True, + output_type=["loglikelihood", "multiple_choice"], + aggregation="mean", +) +def acc_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc_norm", + higher_is_better=True, + output_type=["loglikelihood", "multiple_choice"], + aggregation="mean", +) +def acc_norm_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc_mutual_info", + higher_is_better=True, + output_type="multiple_choice", + aggregation="mean", +) +def acc_mutual_info_fn(items): # This is a passthrough function + return items + + +### the code used in the `exact_match_hf_evaluate` function is ported from +### https://github.com/huggingface/evaluate/blob/main/metrics/exact_match/exact_match.py +### which is under the apache license. + +# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +def exact_match_hf_evaluate( + predictions, + references, + regexes_to_ignore=None, + ignore_case=False, + ignore_punctuation=False, + ignore_numbers=False, +): + if regexes_to_ignore is not None: + for s in regexes_to_ignore: + predictions = np.array([re.sub(s, "", x) for x in predictions]) + references = np.array([re.sub(s, "", x) for x in references]) + else: + predictions = np.asarray(predictions) + references = np.asarray(references) + + if ignore_case: + predictions = np.char.lower(predictions) + references = np.char.lower(references) + + if ignore_punctuation: + repl_table = string.punctuation.maketrans("", "", string.punctuation) + predictions = np.char.translate(predictions, table=repl_table) + references = np.char.translate(references, table=repl_table) + + if ignore_numbers: + repl_table = string.digits.maketrans("", "", string.digits) + predictions = np.char.translate(predictions, table=repl_table) + references = np.char.translate(references, table=repl_table) + + score_list = predictions == references + + return {"exact_match": np.mean(score_list)} + + +### + + +@register_metric( + metric="exact_match", + higher_is_better=True, + output_type="generate_until", + aggregation="mean", +) +def exact_match_fn(**kwargs): + return exact_match_hf_evaluate(**kwargs) + + +@register_metric( + metric="perplexity", + higher_is_better=False, + output_type="loglikelihood", + aggregation="perplexity", +) +def perplexity_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="word_perplexity", + higher_is_better=False, + output_type="loglikelihood_rolling", + aggregation="weighted_perplexity", +) +def word_perplexity_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="byte_perplexity", + higher_is_better=False, + output_type="loglikelihood_rolling", + aggregation="weighted_perplexity", +) +def byte_perplexity_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="bits_per_byte", + higher_is_better=False, + output_type="loglikelihood_rolling", + aggregation="bits_per_byte", +) +def bits_per_byte_fn(items): # This is a passthrough function + return items + + +def pop_stddev(arr): + mu = mean(arr) + return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr)) + + +def sample_stddev(arr): + mu = mean(arr) + return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1)) + + +def mean_stderr(arr): + return sample_stddev(arr) / math.sqrt(len(arr)) + + +@register_metric( + metric="bypass", + higher_is_better=True, + output_type=["loglikelihood", "multiple_choice", "generate_until"], + aggregation="bypass", +) +def bypass(items): + return None + + +@register_metric( + metric="mcc", + higher_is_better=True, + output_type="multiple_choice", + aggregation="matthews_corrcoef", +) +def mcc_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="f1", + higher_is_better=True, + output_type="multiple_choice", + aggregation="f1", +) +def f1_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="bleu", + higher_is_better=True, + output_type="generate_until", + aggregation="bleu", +) +def bleu_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="chrf", + higher_is_better=True, + output_type="generate_until", + aggregation="chrf", +) +def chrf_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="ter", + higher_is_better=True, + output_type="generate_until", + aggregation="ter", +) +def ter_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc_all", + higher_is_better=True, + output_type="loglikelihood", + aggregation="mean", +) +def acc_all(items): + # Only count as correct if all answers are labeled correctly for each question + question_scoring_dict = {} + preds = list(zip(*items))[0] + docs = list(zip(*items))[1] + + for doc, pred in zip(docs, preds): + paragraph_id = doc["idx"]["paragraph"] + question_id = doc["idx"]["question"] + if (paragraph_id, question_id) not in question_scoring_dict: + question_scoring_dict[(paragraph_id, question_id)] = [] + + gold_label = doc["label"] == 1 + + question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred) + acc = np.mean([int(all(x)) for x in question_scoring_dict.values()]) + return acc + + +def acc_all_stderr(items): + # Only count as correct if all answers are labeled correctly for each question + question_scoring_dict = {} + preds = list(zip(*items))[0] + docs = list(zip(*items))[1] + + for doc, pred in zip(docs, preds): + question_id = doc["idx"]["question"] + if question_id not in question_scoring_dict: + question_scoring_dict[question_id] = [] + + gold_label = doc["label"] == 1 + question_scoring_dict[question_id].append(gold_label == pred) + + acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()]) + return acc + + +def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + """Compute max metric between prediction and each ground truth.""" + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + + +def weighted_mean(items): + a, b = zip(*items) + return sum(a) / sum(b) + + +def is_non_str_iterable(obj): + return isinstance(obj, Iterable) and not isinstance(obj, str) + + +def _sacreformat(refs, preds): + """Format refs and preds for sacrebleu corpus calculation. It is very particular""" + # Sacrebleu expects (List[str], List[List[str]) + # e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...]) + + # Note [ref1_stream] is the first reference for each pred. + # So lists are size N and (M, N) for N preds and M possible refs for each pred + # This is a different order of dimensions that I would expect + + # We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds + # Must become List[List[str]] with the inner list corresponding to preds + if not is_non_str_iterable(refs): + refs = list(refs) + if not is_non_str_iterable(refs[0]): + refs = [[ref] for ref in refs] + refs = list(zip(*refs)) + # Note the number of refs in each ref list much match the number of preds + + # We expect preds to be List[str] or List[List[str]]. Must become List[str] + if not is_non_str_iterable(preds): + preds = list(preds) + if is_non_str_iterable(preds[0]): + assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}" + preds = [pred[0] for pred in preds] + + return refs, preds + + +# stderr stuff + + +class _bootstrap_internal: + def __init__(self, f, n) -> None: + self.f = f + self.n = n + + def __call__(self, v): + i, xs = v + rnd = random.Random() + rnd.seed(i) + res = [] + for _ in range(self.n): + res.append(self.f(rnd.choices(xs, k=len(xs)))) + return res + + +def bootstrap_stderr(f, xs, iters): + import multiprocessing as mp + + pool = mp.Pool(mp.cpu_count()) + # this gives a biased estimate of the stderr (i.e w/ the mean, it gives something + # equivalent to stderr calculated without Bessel's correction in the stddev. + # Unfortunately, I haven't been able to figure out what the right correction is + # to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but + # that would be ad-hoc and I can't prove that that would actually be an unbiased estimator) + # Thankfully, shouldn't matter because our samples are pretty big usually anyways + res = [] + chunk_size = min(1000, iters) + from tqdm import tqdm + + print("bootstrapping for stddev:", f.__name__) + for bootstrap in tqdm( + pool.imap( + _bootstrap_internal(f, chunk_size), + [(i, xs) for i in range(iters // chunk_size)], + ), + total=iters // chunk_size, + ): + # sample w replacement + res.extend(bootstrap) + + pool.close() + return sample_stddev(res) + + +def stderr_for_metric(metric, bootstrap_iters: int): + if bootstrap_iters <= 0: + # return no function (don't compute stderr) if bootstrap iters = 0 + return None + + bootstrappable = [ + median, + matthews_corrcoef, + f1_score, + perplexity, + bleu, + chrf, + ter, + ] + + if metric in bootstrappable: + return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters) + + stderr = {mean: mean_stderr, acc_all: acc_all_stderr} + + return stderr.get(metric, None) + + +def pooled_sample_stderr(stderrs: List[float], sizes: List[int]): + # Used to aggregate bootstrapped stderrs across subtasks in a group, + # when we are weighting by the size of each subtask. + # + + assert len(stderrs) == len(sizes) + + # formula source: https://en.wikipedia.org/wiki/Pooled_variance + # and: https://stats.stackexchange.com/a/4841331 + # this empirically seems to match running `stderr_for_metric` on all instances + # from the subtasks concatenated with each other. + pooled_sample_var = ( + sum([(size - 1) * stderr**2 * size for size, stderr in zip(sizes, stderrs)]) + ) / (sum(sizes) - len(sizes)) + + return np.sqrt(pooled_sample_var / sum(sizes)) + + +def combined_sample_stderr(stderrs: List[float], sizes: List[int], metrics=None): + assert ( + metrics is not None + ), "Need to pass a list of each subtask's metric for this stderr aggregation" + assert len(stderrs) == len(sizes) and len(sizes) == len(metrics) + + # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1390 for more documentation. + # This formula depends on sample means. + # removed because it seems to give erroneously huge stderrs for groupings of tasks + # and does not seem to match up with bootstrap-calculated stderrs for groups. + + ### don't use this unless a statistician has told you it's the right thing to do ### + + # accumulators: we'll aggregate pairwise N - 1 times + variance = stderrs[0] ** 2 + curr_size = sizes[0] + curr_score = metrics[0] + + for stderr, size, score in zip(stderrs[1:], sizes[1:], metrics[1:]): + curr_score = ((curr_score * curr_size) + (score * size)) / ( + curr_size + size + ) # NOTE: this assumes our aggregation fn is "mean" + + variance = ((curr_size - 1) * variance + (size - 1) * (stderr**2)) / ( + curr_size + size - 1 + ) + curr_size * size / ((curr_size + size) * (curr_size + size - 1)) * ( + curr_score - score + ) ** 2 + + return np.sqrt(variance) + + +def aggregate_subtask_metrics(metrics, sizes, weight_by_size=True): + # A helper function that is used to aggregate + # subtask scores cross-task. + # TODO: does not hold for non-mean aggregations + if not weight_by_size: + sizes = [1] * len(sizes) + + assert len(metrics) == len(sizes) + + return sum([metric * size for metric, size in zip(metrics, sizes)]) / sum(sizes) diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/model.py b/venv/lib/python3.10/site-packages/lm_eval/api/model.py new file mode 100644 index 0000000000000000000000000000000000000000..5eecc617fbd530072557fac23a47af289ac85a98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/api/model.py @@ -0,0 +1,381 @@ +import abc +import hashlib +import json +import logging +import os +from typing import Dict, List, Optional, Tuple, Type, TypeVar + +import transformers +from sqlitedict import SqliteDict +from tqdm import tqdm + +from lm_eval import utils + + +eval_logger = logging.getLogger("lm-eval") + +T = TypeVar("T", bound="LM") + + +class LM(abc.ABC): + def __init__(self) -> None: + """Defines the interface that should be implemented by all LM subclasses. + LMs are assumed to take text (strings) as input and yield strings as output + (inputs/outputs should be tokenization-agnostic.) + + """ + # set rank and world size to a single process, by default. + self._rank = 0 + self._world_size = 1 + self.cache_hook = CacheHook(None) + + @abc.abstractmethod + def loglikelihood(self, requests) -> List[Tuple[float, bool]]: + """Compute log-likelihood of generating a continuation from a context. + Downstream tasks should attempt to use loglikelihood instead of other + LM calls whenever possible. + + :param requests: list[Instance] + A list of Instance objects, with property `args` which returns a tuple (context, continuation). + `context: str` + Context string. Implementations of LM must be able to handle an + empty context string. + `continuation: str` + The continuation over which log likelihood will be calculated. If + there is a word boundary, the space should be in the continuation. + For example, context="hello" continuation=" world" is correct. + + :return: list[tuple[float, bool]] + A list of pairs (logprob, isgreedy) + `logprob: float` + The log probability of `continuation`. + `isgreedy`: + Whether `continuation` would be generated by greedy sampling from `context`. + """ + pass + + @abc.abstractmethod + def loglikelihood_rolling(self, requests) -> List[Tuple[float]]: + """Compute full log-likelihood of a string, with no truncation, for perplexity computation + - We will use the full max context length of the model. + - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to + the max context length. + - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations + which may simply concatenate multiple documents together. + - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into + multiple chunks, the last input will still a full-sized context. + Example: + Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ] + Prefix: BOS/EOS + Max context length: 4 + Resulting input/prediction pairs: + + INPUT: BOS 0 1 2 + PRED: 0 1 2 3 + + INPUT: 3 4 5 6 + PRED: 4 5 6 7 + + INPUT: 5 6 7 8 + PRED: 8 9 + + Observe that: + 1. Each token is predicted exactly once + 2. For the last pair, we provide the full context, but only score the last two tokens + + :param requests: list[Instance] + A list of Instance objects with property `args` which returns a tuple (context,). + string: str + String for which we are computing overall loglikelihood + :return: list[tuple[float]] + A list of tuples (logprob,) + logprob: float + The log probability of `context` conditioned on the BOS/EOS token. + Can also be overridden for custom cases by `prefix_token_id`. + """ + pass + + # TODO: Add an optional max length + @abc.abstractmethod + def generate_until(self, requests) -> List[str]: + """Generate greedily until a stopping sequence + + :param requests: list[Instance] + A list of Instance objects with property `args` which returns a tuple (context, until). + context: str + Context string + until: [str] + The string sequences to generate until. These string sequences + may each span across multiple tokens, or may be part of one token. + :return: list[str] + A list of strings continuation + continuation: str + The generated continuation. + """ + pass + + def apply_chat_template(self, chat_history: List[Dict[str, str]]) -> str: + """ + Defines how to transform few-shot examples provided as chat history into a format that can be used as input to the LM. + + :param chat_history: list[dict[str, str]] + A list of dictionaries with keys 'role' and 'content'. + Values are strings representing the role name and the content of the message, respectively. + :return: str + A string representing the chat history in a format that can be used as input to the LM. + """ + raise NotImplementedError( + "To use this model with chat templates, please implement the 'apply_chat_template' method for your model type." + ) + + @classmethod + def create_from_arg_string( + cls: Type[T], arg_string: str, additional_config: Optional[dict] = None + ) -> T: + """ + Creates an instance of the LM class using the given argument string and additional config. + + Parameters: + - arg_string: A string containing arguments in the format key1=value1,key2=value2. + - additional_config: Optional dictionary containing additional configuration parameters. + + Returns: + - Instance of the LM class. + """ + additional_config = {} if additional_config is None else additional_config + args = utils.simple_parse_args_string(arg_string) + args2 = {k: v for k, v in additional_config.items() if v is not None} + return cls(**args, **args2) + + @classmethod + def create_from_arg_obj( + cls: Type[T], arg_dict: dict, additional_config: Optional[dict] = None + ) -> T: + """ + Creates an instance of the LM class using the given arg_obj + + Parameters: + - arg_obj: A dict containing arguments in the format key1=value1,key2=value2. + - additional_config: Optional dictionary containing additional configuration parameters. + + Returns: + - Instance of the LM class. + """ + + additional_config = {} if additional_config is None else additional_config + additional_config = { + k: v for k, v in additional_config.items() if v is not None + } + + return cls(**arg_dict, **additional_config) + + @property + def rank(self): + # used in the case of parallelism. Hardcoded to + # ensure no errors arise using API models which do + # not support multi-device parallelism nor expect it. + return self._rank + + @property + def world_size(self): + # used in the case of parallelism. Hardcoded to + # ensure no errors arise using API models which do + # not support multi-device parallelism nor expect it. + return self._world_size + + @property + def tokenizer_name(self) -> str: + """Must be defined for LM subclasses which implement Chat Templating. + Should return the name of the tokenizer or chat template used. + Used only to properly fingerprint caches when requests are being cached with `--cache_requests`, otherwise not used. + """ + raise NotImplementedError( + "To use this model with chat templates, please implement the 'tokenizer_name' property." + ) + + @property + def chat_template(self) -> str: + """Must be defined for LM subclasses that implement Chat Templating. + Should return the structure of the chat template applied to user/assistant messages. + This is used only to save in the experiment results for reproducibility. + """ + raise NotImplementedError( + "To use this model with chat templates, please implement the 'chat_template' property." + ) + + def set_cache_hook(self, cache_hook) -> None: + self.cache_hook = cache_hook + + +### SQLite-based caching of LM responses +def hash_args(attr, args): + dat = json.dumps([attr] + list(args)) + return hashlib.sha256(dat.encode("utf-8")).hexdigest() + + +class CacheHook: + def __init__(self, cachinglm) -> None: + if cachinglm is None: + self.dbdict = None + return + + self.dbdict = cachinglm.dbdict + + def add_partial(self, attr, req, res) -> None: + if self.dbdict is None: + return + hsh = hash_args(attr, req) + self.dbdict[hsh] = res + + +class CachingLM: + def __init__(self, lm, cache_db) -> None: + """LM wrapper that returns cached results if they exist, and uses the underlying LM if not. + + :param lm: LM + Underlying LM + :param cache_db: str + Path to cache db + """ + self.lm = lm + self.cache_db = cache_db + if os.path.dirname(cache_db): + os.makedirs(os.path.dirname(cache_db), exist_ok=True) + self.dbdict = SqliteDict(cache_db, autocommit=True) + + # add hook to lm + lm.set_cache_hook(self.get_cache_hook()) + + def __getattr__(self, attr: str): + lm_attr = getattr(self.lm, attr) + if attr not in ["loglikelihood", "loglikelihood_rolling", "generate_until"]: + eval_logger.debug(f"Passing through attribute '{attr}' to underlying LM") + return lm_attr + + def fn(requests): + res = [] + remaining_reqs = [] + warned = False + # figure out which ones are cached and which ones are new + eval_logger.info( + f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..." + ) + for req in tqdm(requests, desc="Checking cached requests"): + hsh = hash_args(attr, req.args) + if attr == "generate_until" and req.args[1].get("do_sample", False): + # when we are doing non-greedy generation, don't use the cache + # (else every "randomly sampled" generation would be identical for repeats > 1). + if not warned: + eval_logger.warning( + f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests." + ) + warned = True + res.append(None) + remaining_reqs.append(req) + elif hsh in self.dbdict: + ob = self.dbdict[hsh] + + assert ob is not None + + res.append(ob) + else: + res.append(None) + remaining_reqs.append(req) + eval_logger.info( + f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}" + ) + # actually run the LM on the requests that do not have cached results + rem_res = getattr(self.lm, attr)(remaining_reqs) + + # stick the new ones back into the list and also cache any of the new ones + resptr = 0 + for req, r in zip(remaining_reqs, rem_res): + while res[resptr] is not None: + resptr += 1 + + res[resptr] = r + + # caching + hsh = hash_args(attr, req.args) + self.dbdict[hsh] = r + self.dbdict.commit() + + return res + + return fn + + def get_cache_hook(self): + return CacheHook(self) + + +class TemplateLM(LM): + """ + A class acting as intermediary between the LM base class + and boilerplate often included in other LM subclasses. + """ + + @property + @abc.abstractmethod + def eot_token_id(self): + pass + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + return self.eot_token_id + + @abc.abstractmethod + def tok_encode(self, string: str, **kwargs): + pass + + @abc.abstractmethod + def _loglikelihood_tokens(self, requests, **kwargs): + pass + + def _encode_pair(self, context, continuation): + n_spaces = len(context) - len(context.rstrip()) + if n_spaces > 0: + continuation = context[-n_spaces:] + continuation + context = context[:-n_spaces] + + model_class = getattr(self, "AUTO_MODEL_CLASS", None) + + if model_class == transformers.AutoModelForSeq2SeqLM: + context_enc = self.tok_encode(context) + continuation_enc = self.tok_encode(continuation, add_special_tokens=False) + else: + whole_enc = self.tok_encode(context + continuation) + context_enc = self.tok_encode(context) + + context_enc_len = len(context_enc) + continuation_enc = whole_enc[context_enc_len:] + + return context_enc, continuation_enc + + def loglikelihood( + self, requests, disable_tqdm: bool = False + ) -> List[Tuple[float, bool]]: + new_reqs = [] + for context, continuation in [req.args for req in requests]: + if context == "": + # BOS or EOS as context + context_enc, continuation_enc = ( + [self.prefix_token_id], + self.tok_encode(continuation), + ) + else: + context_enc, continuation_enc = self._encode_pair(context, continuation) + + new_reqs.append(((context, continuation), context_enc, continuation_enc)) + + return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm) + + @abc.abstractmethod + def loglikelihood_rolling( + self, requests, disable_tqdm: bool = False + ) -> List[Tuple[float, bool]]: + pass + + @abc.abstractmethod + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + pass diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/registry.py b/venv/lib/python3.10/site-packages/lm_eval/api/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..7446a429e61d9b287c384b5be5db2a258ea83ae8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/api/registry.py @@ -0,0 +1,192 @@ +import logging +from typing import Callable, Dict + +import evaluate as hf_evaluate + +from lm_eval.api.model import LM + + +eval_logger = logging.getLogger("lm-eval") + +MODEL_REGISTRY = {} + + +def register_model(*names): + # either pass a list or a single alias. + # function receives them as a tuple of strings + + def decorate(cls): + for name in names: + assert issubclass( + cls, LM + ), f"Model '{name}' ({cls.__name__}) must extend LM class" + + assert ( + name not in MODEL_REGISTRY + ), f"Model named '{name}' conflicts with existing model! Please register with a non-conflicting alias instead." + + MODEL_REGISTRY[name] = cls + return cls + + return decorate + + +def get_model(model_name): + try: + return MODEL_REGISTRY[model_name] + except KeyError: + raise ValueError( + f"Attempted to load model '{model_name}', but no model for this name found! Supported model names: {', '.join(MODEL_REGISTRY.keys())}" + ) + + +TASK_REGISTRY = {} +GROUP_REGISTRY = {} +ALL_TASKS = set() +func2task_index = {} + + +def register_task(name): + def decorate(fn): + assert ( + name not in TASK_REGISTRY + ), f"task named '{name}' conflicts with existing registered task!" + + TASK_REGISTRY[name] = fn + ALL_TASKS.add(name) + func2task_index[fn.__name__] = name + return fn + + return decorate + + +def register_group(name): + def decorate(fn): + func_name = func2task_index[fn.__name__] + if name in GROUP_REGISTRY: + GROUP_REGISTRY[name].append(func_name) + else: + GROUP_REGISTRY[name] = [func_name] + ALL_TASKS.add(name) + return fn + + return decorate + + +OUTPUT_TYPE_REGISTRY = {} +METRIC_REGISTRY = {} +METRIC_AGGREGATION_REGISTRY = {} +AGGREGATION_REGISTRY: Dict[str, Callable[[], Dict[str, Callable]]] = {} +HIGHER_IS_BETTER_REGISTRY = {} +FILTER_REGISTRY = {} + +DEFAULT_METRIC_REGISTRY = { + "loglikelihood": [ + "perplexity", + "acc", + ], + "loglikelihood_rolling": ["word_perplexity", "byte_perplexity", "bits_per_byte"], + "multiple_choice": ["acc", "acc_norm"], + "generate_until": ["exact_match"], +} + + +def register_metric(**args): + # TODO: do we want to enforce a certain interface to registered metrics? + def decorate(fn): + assert "metric" in args + name = args["metric"] + + for key, registry in [ + ("metric", METRIC_REGISTRY), + ("higher_is_better", HIGHER_IS_BETTER_REGISTRY), + ("aggregation", METRIC_AGGREGATION_REGISTRY), + ]: + if key in args: + value = args[key] + assert ( + value not in registry + ), f"{key} named '{value}' conflicts with existing registered {key}!" + + if key == "metric": + registry[name] = fn + elif key == "aggregation": + registry[name] = AGGREGATION_REGISTRY[value] + else: + registry[name] = value + + return fn + + return decorate + + +def get_metric(name: str, hf_evaluate_metric=False) -> Callable: + if not hf_evaluate_metric: + if name in METRIC_REGISTRY: + return METRIC_REGISTRY[name] + else: + eval_logger.warning( + f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..." + ) + + try: + metric_object = hf_evaluate.load(name) + return metric_object.compute + except Exception: + eval_logger.error( + f"{name} not found in the evaluate library! Please check https://huggingface.co/evaluate-metric", + ) + + +def register_aggregation(name: str): + def decorate(fn): + assert ( + name not in AGGREGATION_REGISTRY + ), f"aggregation named '{name}' conflicts with existing registered aggregation!" + + AGGREGATION_REGISTRY[name] = fn + return fn + + return decorate + + +def get_aggregation(name: str) -> Callable[[], Dict[str, Callable]]: + try: + return AGGREGATION_REGISTRY[name] + except KeyError: + eval_logger.warning(f"{name} not a registered aggregation metric!") + + +def get_metric_aggregation(name: str) -> Callable[[], Dict[str, Callable]]: + try: + return METRIC_AGGREGATION_REGISTRY[name] + except KeyError: + eval_logger.warning(f"{name} metric is not assigned a default aggregation!") + + +def is_higher_better(metric_name) -> bool: + try: + return HIGHER_IS_BETTER_REGISTRY[metric_name] + except KeyError: + eval_logger.warning( + f"higher_is_better not specified for metric '{metric_name}'!" + ) + + +def register_filter(name): + def decorate(cls): + if name in FILTER_REGISTRY: + eval_logger.info( + f"Registering filter `{name}` that is already in Registry {FILTER_REGISTRY}" + ) + FILTER_REGISTRY[name] = cls + return cls + + return decorate + + +def get_filter(filter_name: str) -> type: + try: + return FILTER_REGISTRY[filter_name] + except KeyError: + eval_logger.warning(f"filter `{filter_name}` is not registered!") diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/samplers.py b/venv/lib/python3.10/site-packages/lm_eval/api/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..74513bbc9775258906cb267f618823f595330620 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/api/samplers.py @@ -0,0 +1,166 @@ +import datasets + + +class ContextSampler: + def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None: + self.rnd = rnd + if not self.rnd: + raise ValueError( + "A `random.Random` generator argument must be provided to `rnd` of FewShotSampler!" + ) + + self.task = task + self.config = task._config + + self.target_delimiter = self.config.target_delimiter + self.fewshot_delimiter = self.config.fewshot_delimiter + + self.doc_to_text = self.task.doc_to_text + self.doc_to_target = self.task.doc_to_target + self.doc_to_choice = self.task.doc_to_choice + + self.docs = docs # HF dataset split, provided by task._fewshot_docs() + if fewshot_indices: # subset few-shot docs from + if not isinstance(self.docs, datasets.Dataset): + raise ValueError( + "Got `fewshot_indices` but fewshot_docs are not a HF dataset. Don't use both `fewshot_indices` and a user-defined few-shot sample list simultaneously" + ) + self.docs = self.docs.select(fewshot_indices) + + def get_context(self, doc, num_fewshot): + # draw an extra fewshot sample if using same split as evaluating on + n_samples = ( + num_fewshot + 1 + if self.config.fewshot_split == self.config.test_split + else num_fewshot + ) + + # draw `n_samples` docs from fewshot_docs + fewshotex = self.sample(n_samples) + + # get rid of the doc that's the one we're evaluating, if it's in the fewshot + # TODO: should we just stop people from using fewshot from same split as evaluating? + selected_docs = [x for x in fewshotex if x != doc][:num_fewshot] + + labeled_examples = "" + for doc in selected_docs: + doc_content = self.doc_to_text(doc) + doc_target = self.doc_to_target(doc) + labeled_examples += ( + doc_content + if self.config.doc_to_choice is None or isinstance(doc_content, str) + else self.doc_to_choice(doc)[doc_content] + ) + labeled_examples += self.target_delimiter + labeled_examples += ( + str(doc_target[0]) + if isinstance(doc_target, list) + else doc_target + if self.config.doc_to_choice is None or isinstance(doc_target, str) + else str(self.doc_to_choice(doc)[doc_target]) + ) + labeled_examples += self.fewshot_delimiter + + return labeled_examples + + def get_chat_context( + self, + doc, + num_fewshot, + fewshot_as_multiturn: bool = False, + ): + chat_history = [] + # draw an extra fewshot sample if using same split as evaluating on + n_samples = ( + num_fewshot + 1 + if self.config.fewshot_split == self.config.test_split + else num_fewshot + ) + # draw `n_samples` docs from fewshot_docs + fewshotex = self.sample(n_samples) + + # get rid of the doc that's the one we're evaluating, if it's in the fewshot + # TODO: should we just stop people from using fewshot from same split as evaluating? + selected_docs = [x for x in fewshotex if x != doc][:num_fewshot] + + if fewshot_as_multiturn: + for doc in selected_docs: + doc_content = self.doc_to_text(doc) + doc_target = self.doc_to_target(doc) + chat_history.append( + { + "role": "user", + "content": doc_content + if self.config.doc_to_choice is None + or isinstance(doc_content, str) + else self.doc_to_choice(doc)[doc_content], + } + ) + chat_history.append( + { + "role": "assistant", + "content": str(doc_target[0]) + if isinstance(doc_target, list) + else doc_target + if self.config.doc_to_choice is None + or isinstance(doc_target, str) + else str(self.doc_to_choice(doc)[doc_target]), + } + ) + else: + # get fewshot context as one user turn + chat_history.append( + {"role": "user", "content": self.get_context(doc, num_fewshot)} + ) + + return chat_history + + def sample(self, n): + """ + Draw `n` samples from our fewshot docs. This method should be overridden by subclasses. + """ + + return self.rnd.sample(self.docs, n) + + +class FirstNSampler(ContextSampler): + def sample(self, n) -> None: + """ + Draw the first `n` samples in order from the specified split. + Used for tasks with "canonical" ordered fewshot examples, such as MMLU and CMMLU. + """ + assert ( + n <= len(self.docs) + ), f"Error: number of fewshot samples requested exceeds the {len(self.docs)} that are available." + return self.docs[:n] + + +class BalancedSampler(ContextSampler): + def sample(self, n) -> None: + """ + TODO: this should return approximately class-balanced samples from our fewshot examples. + TODO: what order should they be in? maybe random? + """ + + pass + + +class ManualSampler(ContextSampler): + def sample(self, n) -> None: + """ """ + pass + + +SAMPLER_REGISTRY = { + "default": ContextSampler, + "first_n": FirstNSampler, +} + + +def get_sampler(name): + try: + return SAMPLER_REGISTRY[name] + except KeyError: + raise ValueError( + f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}" + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/api/task.py b/venv/lib/python3.10/site-packages/lm_eval/api/task.py new file mode 100644 index 0000000000000000000000000000000000000000..ccfda5090641c1b58dbaad8e6ff06bdbad7df8f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/api/task.py @@ -0,0 +1,1653 @@ +import abc +import ast +import logging +import random +import re +from collections.abc import Callable +from copy import deepcopy +from dataclasses import asdict, dataclass +from inspect import getsource +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + Literal, + Mapping, + Optional, + Tuple, + Union, +) + +import datasets +import numpy as np +from tqdm import tqdm + +from lm_eval import utils +from lm_eval.api import samplers +from lm_eval.api.instance import Instance, OutputType +from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity +from lm_eval.api.registry import ( + AGGREGATION_REGISTRY, + DEFAULT_METRIC_REGISTRY, + get_aggregation, + get_metric, + get_metric_aggregation, + is_higher_better, +) +from lm_eval.caching.cache import load_from_cache, save_to_cache +from lm_eval.filters import build_filter_ensemble +from lm_eval.prompts import get_prompt + + +ALL_OUTPUT_TYPES = [ + "loglikelihood", + "multiple_choice", + "loglikelihood_rolling", + "generate_until", +] + +eval_logger = logging.getLogger("lm-eval") + + +@dataclass +class TaskConfig(dict): + # task naming/registry + task: Optional[str] = None + task_alias: Optional[str] = None + group: Optional[Union[str, list]] = None + group_alias: Optional[Union[str, list]] = None + # HF dataset options. + # which dataset to use, + # and what splits for what purpose + dataset_path: Optional[str] = None + dataset_name: Optional[str] = None + dataset_kwargs: Optional[dict] = None + training_split: Optional[str] = None + validation_split: Optional[str] = None + test_split: Optional[str] = None + fewshot_split: Optional[str] = ( + None # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?) + ) + # formatting / prompting options. + # see docs/advanced_task_guide.md for more info + process_docs: Optional[Callable] = None + doc_to_text: Optional[Union[Callable, str]] = None + doc_to_target: Optional[Union[Callable, str]] = None + doc_to_choice: Optional[Union[Callable, str, dict, list]] = None + process_results: Optional[Union[Callable, str]] = None + use_prompt: Optional[str] = None + description: str = "" + target_delimiter: str = " " + fewshot_delimiter: str = "\n\n" + fewshot_config: Optional[dict] = None + # runtime configuration options + num_fewshot: Optional[int] = None + # scoring options + metric_list: Optional[list] = None + output_type: OutputType = "generate_until" + generation_kwargs: Optional[dict] = None + repeats: int = 1 + filter_list: Optional[Union[str, list]] = None + should_decontaminate: bool = False + doc_to_decontamination_query: Optional[str] = None + metadata: Optional[dict] = ( + None # by default, not used in the code. allows for users to pass arbitrary info to tasks + ) + + def __post_init__(self) -> None: + if self.generation_kwargs is not None: + if self.output_type != "generate_until": + eval_logger.warning( + f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!" + ) + + if "temperature" in self.generation_kwargs: + self.generation_kwargs["temperature"] = float( + self.generation_kwargs["temperature"] + ) + + if "until" not in self.generation_kwargs: + self.generation_kwargs["until"] = [self.fewshot_delimiter] + else: + if self.output_type == "generate_until": + # ensure that we greedily generate in absence of explicit arguments otherwise + self.generation_kwargs = { + "until": ( + None + if self.fewshot_delimiter is None + else [self.fewshot_delimiter] + ), + "do_sample": False, + } + + def __getitem__(self, item): + return getattr(self, item) + + def __setitem__(self, item, value): + return setattr(self, item, value) + + def to_dict(self, keep_callable: bool = False) -> dict: + """dumps the current config as a dictionary object, as a printable format. + null fields will not be printed. + Used for dumping results alongside full task configuration + + :return: dict + A printable dictionary version of the TaskConfig object. + + # TODO: should any default value in the TaskConfig not be printed? + """ + cfg_dict = asdict(self) + # remove values that are `None` + for k, v in list(cfg_dict.items()): + if v is None: + cfg_dict.pop(k) + elif k == "metric_list": + for metric_dict in v: + for metric_key, metric_value in metric_dict.items(): + if callable(metric_value): + metric_dict[metric_key] = self.serialize_function( + metric_value, keep_callable=keep_callable + ) + cfg_dict[k] = v + elif callable(v): + cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable) + return cfg_dict + + def serialize_function( + self, value: Union[Callable, str], keep_callable=False + ) -> Union[Callable, str]: + """Serializes a given function or string. + + If 'keep_callable' is True, the original callable is returned. + Otherwise, attempts to return the source code of the callable using 'getsource'. + """ + if keep_callable: + return value + else: + try: + return getsource(value) + except (TypeError, OSError): + return str(value) + + +class Task(abc.ABC): + """A task represents an entire benchmark including its dataset, problems, + answers, and evaluation methods. See BoolQ for a simple example implementation + + A `doc` can be any python object which represents one instance of evaluation. + This is usually a dictionary e.g. + {"question": ..., "answer": ...} or + {"question": ..., question, answer) + """ + + VERSION: Optional[Union[int, str]] = None + + # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub + # or a path to a custom `datasets` loading script. + DATASET_PATH: Optional[str] = None + + # The name of a subset within `DATASET_PATH`. + DATASET_NAME: Optional[str] = None + + OUTPUT_TYPE: Optional[OutputType] = None + + def __init__( + self, + data_dir: Optional[str] = None, + cache_dir: Optional[str] = None, + download_mode: Optional[datasets.DownloadMode] = None, + config: Optional[Mapping] = None, # Union[dict, TaskConfig] + ) -> None: + """ + :param data_dir: str + Stores the path to a local folder containing the `Task`'s data files. + Use this to specify the path to manually downloaded data (usually when + the dataset is not publicly accessible). + :param cache_dir: str + The directory to read/write the `Task` dataset. This follows the + HuggingFace `datasets` API with the default cache directory located at: + `~/.cache/huggingface/datasets` + NOTE: You can change the cache location globally for a given process + to another directory: + `export HF_DATASETS_CACHE="/path/to/another/directory"` + :param download_mode: datasets.DownloadMode + How to treat pre-existing `Task` downloads and data. + - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS` + Reuse download and reuse dataset. + - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS` + Reuse download with fresh dataset. + - `datasets.DownloadMode.FORCE_REDOWNLOAD` + Fresh download and fresh dataset. + """ + self.download(data_dir, cache_dir, download_mode) + self._training_docs: Optional[list] = None + self._fewshot_docs: Optional[list] = None + self._instances: Optional[List[Instance]] = None + + self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig() + + self._filters = [build_filter_ensemble("none", [["take_first", None]])] + self.fewshot_rnd: Optional[random.Random] = ( + None # purposely induce errors in case of improper usage + ) + + def download( + self, + data_dir: Optional[str] = None, + cache_dir: Optional[str] = None, + download_mode=None, + ) -> None: + """Downloads and returns the task dataset. + Override this method to download the dataset from a custom API. + + :param data_dir: str + Stores the path to a local folder containing the `Task`'s data files. + Use this to specify the path to manually downloaded data (usually when + the dataset is not publicly accessible). + :param cache_dir: str + The directory to read/write the `Task` dataset. This follows the + HuggingFace `datasets` API with the default cache directory located at: + `~/.cache/huggingface/datasets` + NOTE: You can change the cache location globally for a given process + by setting the shell environment variable, `HF_DATASETS_CACHE`, + to another directory: + `export HF_DATASETS_CACHE="/path/to/another/directory"` + :param download_mode: datasets.DownloadMode + How to treat pre-existing `Task` downloads and data. + - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS` + Reuse download and reuse dataset. + - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS` + Reuse download with fresh dataset. + - `datasets.DownloadMode.FORCE_REDOWNLOAD` + Fresh download and fresh dataset. + """ + self.dataset = datasets.load_dataset( + path=self.DATASET_PATH, + name=self.DATASET_NAME, + data_dir=data_dir, + cache_dir=cache_dir, + download_mode=download_mode, + ) + + @property + def config(self) -> TaskConfig: + """Returns the TaskConfig associated with this class.""" + return self._config + + @abc.abstractmethod + def has_training_docs(self): + """Whether the task has a training set""" + pass + + @abc.abstractmethod + def has_validation_docs(self): + """Whether the task has a validation set""" + pass + + @abc.abstractmethod + def has_test_docs(self): + """Whether the task has a test set""" + pass + + def training_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + return [] + + def validation_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + return [] + + def test_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + return [] + + def fewshot_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + if self.has_training_docs(): + return self.training_docs() + elif self.has_validation_docs(): + return self.validation_docs() + else: + eval_logger.warning( + f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False" + ", using test_docs as fewshot_docs but this is not recommended." + ) + return self.test_docs() + + def _process_doc(self, doc: dict) -> dict: + """ + Override this to process (detokenize, strip, replace, etc.) individual + documents. This can be used in a map over documents of a data split. + E.g. `map(self._process_doc, self.dataset["validation"])` + + :return: dict + The processed version of the specified `doc`. + """ + return doc + + @property + def instances(self) -> List[Instance]: + """After calling `task.build_all_requests()`, tasks + maintain a list of the dataset instances which will be evaluated. + """ + return self._instances + + def fewshot_examples(self, k, rnd): + if self._training_docs is None: + self._training_docs = list(self.training_docs()) + + return rnd.sample(self._training_docs, k) + + def doc_to_decontamination_query(self, doc): + raise NotImplementedError( + "Override doc_to_decontamination_query with document specific decontamination query." + ) + + @abc.abstractmethod + def doc_to_text(self, doc): + pass + + @abc.abstractmethod + def doc_to_target(self, doc): + pass + + def build_all_requests( + self, + *, + limit: Union[int, None] = None, + rank: int = 0, + world_size: int = 1, + cache_requests: bool = False, + rewrite_requests_cache: bool = False, + system_instruction: Optional[str] = None, + apply_chat_template: bool = False, + fewshot_as_multiturn: bool = False, + chat_template: Optional[Callable] = None, + tokenizer_name: str = "", + ) -> None: + """Build a set of Instances for a task, and store them in task.instances""" + + # used with caching + og_limit = limit + + cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}" + cache_key += "-chat_template" if apply_chat_template else "" + cache_key += "-fewshot_as_multiturn" if fewshot_as_multiturn else "" + cache_key += ( + f"-system_prompt_hash{utils.hash_string(system_instruction)}" + if system_instruction is not None + else "" + ) + cache_key += f"-tokenizer{tokenizer_name}" + + cached_instances = load_from_cache(file_name=cache_key) + + if cache_requests and cached_instances and not rewrite_requests_cache: + cached_instances = cached_instances[:limit] + + flattened_instances = [ + instance + for instance_group in cached_instances + for instance in instance_group + ] + + self._instances = flattened_instances + return + + eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...") + + instances = [] + + # process all documents when caching is specified for simplicity + if ( + cache_requests + and (not cached_instances or rewrite_requests_cache) + and limit is not None + ): + limit = None + + doc_id_docs = list( + self.doc_iterator(rank=rank, limit=limit, world_size=world_size) + ) + + num_docs = len(doc_id_docs) + + for doc_id, doc in tqdm( + doc_id_docs, + total=num_docs, + ): + # sample fewshot context #TODO: need to offset doc_id by rank now! + fewshot_ctx = self.fewshot_context( + doc, + 0 if self.config.num_fewshot is None else self.config.num_fewshot, + system_instruction, + apply_chat_template, + fewshot_as_multiturn, + chat_template, + ) + + # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute + inst = self.construct_requests( + doc=doc, + ctx=fewshot_ctx, + metadata=(self.config["task"], doc_id, self.config.repeats), + ) + + if not isinstance(inst, list): + inst = [inst] + + instances.append(inst) + + # now flatten, this is to allow slicing to work with pickles + + sliced_instances = instances[:og_limit] + + flattened_instances = [ + instance + for instance_group in sliced_instances + for instance in instance_group + ] + + self._instances = flattened_instances + + if len(self._instances) == 0: + raise ValueError("task.build_requests() did not find any docs!") + + if cache_requests and (not cached_instances or rewrite_requests_cache): + save_to_cache(file_name=cache_key, obj=instances) + + @abc.abstractmethod + def construct_requests(self, doc, ctx, **kwargs): + """Uses RequestFactory to construct Requests and returns an iterable of + Requests which will be sent to the LM. + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param ctx: str + The context string, generated by fewshot_context. This includes the natural + language description, as well as the few shot examples, and the question + part of the document for `doc`. + :param doc_idx: int + The index of a document within `self.test_docs()` or `self.validation_docs()`, + whichever is the main split used. + :param repeats: int + TODO: update this docstring + The number of times each instance in a dataset is inferred on. Defaults to 1, + can be increased for techniques like majority voting. + """ + pass + + @abc.abstractmethod + def process_results(self, doc, results): + """Take a single document and the LM results and evaluates, returning a + dict where keys are the names of submetrics and values are the values of + the metric for that one document + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param results: + The results of the requests created in construct_requests. + """ + pass + + @abc.abstractmethod + def aggregation(self): + """ + :returns: {str: [metric_score] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metric scores + """ + pass + + @abc.abstractmethod + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + pass + + def get_config(self, key: str) -> Any: + return getattr(self._config, key, None) + + @classmethod + def count_bytes(cls, doc): + """Used for byte-level perplexity metrics in rolling loglikelihood""" + return len(doc.encode("utf-8")) + + @classmethod + def count_words(cls, doc): + """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!""" + return len(re.split(r"\s+", doc)) + + @utils.positional_deprecated + def fewshot_context( + self, + doc, + num_fewshot, + rnd=None, + description=None, + ): + """Returns a fewshot context string that is made up of a prepended description + (if provided), the `num_fewshot` number of examples, and an appended prompt example. + + :param doc: str + The document as returned from training_docs, validation_docs, or test_docs. + :param num_fewshot: int + The number of fewshot examples to provide in the returned context string. + :param rnd: random.Random + The pseudo-random number generator used to randomly sample examples. + WARNING: This is currently a required arg although it's optionalized with a default `None`. + :param description: str + The task's description that will be prepended to the fewshot examples. + :returns: str + The fewshot context. + """ + if rnd is None: + if self.fewshot_rnd is not None: + rnd = self.fewshot_rnd + else: + raise ValueError( + "A `random.Random` generator argument must be provided to `rnd`" + ) + + description = description if description else "" + + if num_fewshot == 0: + labeled_examples = "" + else: + # for sets with no training docs, draw from other set *but ensure no overlap with current doc* + if self.has_training_docs(): + fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd) + else: + if self._fewshot_docs is None: + self._fewshot_docs = list( + self.validation_docs() + if self.has_validation_docs() + else self.test_docs() + ) + + fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1) + + # get rid of the doc that's the one we're evaluating, if it's in the fewshot + fewshotex = [x for x in fewshotex if x != doc][:num_fewshot] + + labeled_examples = ( + "\n\n".join( + [ + self.doc_to_text(doc) + self.doc_to_target(doc) + for doc in fewshotex + ] + ) + + "\n\n" + ) + + example = self.doc_to_text(doc) + return description + labeled_examples + example + + def apply_filters(self) -> Optional[List[Instance]]: + """Iterates over FilterEnsembles and applies them to instances""" + if hasattr(self, "_filters"): + for f in self._filters: + f.apply(self._instances) + else: + eval_logger.warning("No filter defined, passing through instances") + return self._instances + + def dump_config(self) -> dict: + """Returns the config as a dictionary.""" + # TODO: this should only return the overrides applied to a non-YAML task's configuration. + # (num_fewshot) + return self.config.to_dict() + + def set_config(self, key: str, value: Any, update: bool = False) -> None: + """Set or update the configuration for a given key.""" + if key is None: + raise ValueError("Key must be provided.") + + if update: + current_value = getattr(self._config, key, {}) + if not isinstance(current_value, dict): + raise TypeError( + f"Expected a dict for key '{key}', got {type(current_value).__name__} instead." + ) + current_value.update(value) + else: + setattr(self._config, key, value) + + def override_metric(self, metric_name: str) -> None: + """ + Override the default metrics used for evaluation with custom metrics. + + Parameters: + - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics. + """ + ( + self._metric_fn_list, + self._aggregation_list, + self._metric_fn_kwargs, + self._higher_is_better, + ) = ({}, {}, {}, {}) + self._metric_fn_list[metric_name] = get_metric(metric_name) + self._aggregation_list[metric_name] = get_metric_aggregation(metric_name) + self._higher_is_better[metric_name] = is_higher_better(metric_name) + self._metric_fn_kwargs[metric_name] = {} + if not isinstance(self, ConfigurableTask): + self.process_results = lambda x, y: {metric_name: get_metric(metric_name)} + self.aggregation = lambda: { + metric_name: get_metric_aggregation(metric_name) + } + setattr(self._config, "metric_list", [{"metric": metric_name}]) + setattr(self._config, "process_results", None) + + def set_fewshot_seed(self, seed: Optional[int] = None) -> None: + self.fewshot_rnd = random.Random(seed) + if hasattr(self, "sampler"): + self.sampler.rnd = self.fewshot_rnd + + @property + def eval_docs(self) -> Union[datasets.Dataset, List[dict]]: + if self.has_test_docs(): + return self.test_docs() + elif self.has_validation_docs(): + return self.validation_docs() + else: + raise ValueError( + f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!" + ) + + def doc_iterator( + self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1 + ) -> Iterator[Tuple[int, Any]]: + limit = int(limit) if limit else None + doc_iterator = utils.create_iterator( + enumerate(self.eval_docs), + rank=int(rank), + limit=limit, + world_size=int(world_size), + ) + return doc_iterator + + +class ConfigurableTask(Task): + VERSION = "Yaml" + OUTPUT_TYPE = None + CONFIG = None + + def __init__( + self, + data_dir=None, + cache_dir=None, + download_mode=None, + config: Optional[dict] = None, + ) -> None: # TODO no super() call here + # Get pre-configured attributes + self._config = self.CONFIG + + # Use new configurations if there was no preconfiguration + if self.config is None: + self._config = TaskConfig(**config) + # Overwrite configs + else: + if config is not None: + self._config.__dict__.update(config) + + if self.config is None: + raise ValueError( + "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg" + ) + + if isinstance(self.config.metadata, dict): + if "version" in self.config.metadata: + self.VERSION = self.config.metadata["version"] + + if self.config.output_type is not None: + if self.config.output_type not in ALL_OUTPUT_TYPES: + raise ValueError( + f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'" + ) + self.OUTPUT_TYPE = self.config.output_type + + if self.config.dataset_path is not None: + self.DATASET_PATH = self.config.dataset_path + + if self.config.dataset_name is not None: + self.DATASET_NAME = self.config.dataset_name + + self._metric_fn_list = {} + self._metric_fn_kwargs = {} + self._aggregation_list = {} + self._higher_is_better = {} + + if self.config.metric_list is None: + # TODO: handle this in TaskConfig.__post_init__ ? + _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type] + + for metric_name in _metric_list: + self._metric_fn_list[metric_name] = get_metric(metric_name) + self._metric_fn_kwargs[metric_name] = {} + self._aggregation_list[metric_name] = get_metric_aggregation( + metric_name + ) + self._higher_is_better[metric_name] = is_higher_better(metric_name) + else: + for metric_config in self.config.metric_list: + if "metric" not in metric_config: + raise ValueError( + "'metric' key not provided for an entry in 'metric_list', must be specified!" + ) + metric_name = metric_config["metric"] + kwargs = { + key: metric_config[key] + for key in metric_config + if key + not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"] + } + hf_evaluate_metric = ( + "hf_evaluate" in metric_config + and metric_config["hf_evaluate"] is True + ) + + if self.config.process_results is not None: + self._metric_fn_list[metric_name] = None + self._metric_fn_kwargs[metric_name] = {} + elif callable(metric_name): + metric_fn = metric_name.__call__ + metric_name = metric_name.__name__ + self._metric_fn_list[metric_name] = metric_fn + self._metric_fn_kwargs[metric_name] = kwargs + else: + self._metric_fn_list[metric_name] = get_metric( + metric_name, hf_evaluate_metric + ) + self._metric_fn_kwargs[metric_name] = kwargs + + if "aggregation" in metric_config: + agg_name = metric_config["aggregation"] + if isinstance(agg_name, str): + self._aggregation_list[metric_name] = get_aggregation(agg_name) + elif callable(agg_name): # noqa: E721 + self._aggregation_list[metric_name] = metric_config[ + "aggregation" + ] + else: + INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()} + metric_agg = get_metric_aggregation(metric_name) + eval_logger.warning( + f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. " + f"using default " + f"aggregation={INV_AGG_REGISTRY[metric_agg]}" + ) + self._aggregation_list[metric_name] = metric_agg + + if "higher_is_better" in metric_config: + self._higher_is_better[metric_name] = metric_config[ + "higher_is_better" + ] + else: + eval_logger.warning( + f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. " + f"using default " + f"higher_is_better={is_higher_better(metric_name)}" + ) + self._higher_is_better[metric_name] = is_higher_better(metric_name) + + self.download(self.config.dataset_kwargs) + self._training_docs = None + self._fewshot_docs = None + + if self.config.filter_list is not None: + self._filters = [] + for filter_config in self.config.filter_list: + filter_name = filter_config["name"] + filter_functions = filter_config["filter"] + components = [] + for function in filter_functions: + kwargs = { + key: function[key] for key in function if key != "function" + } + components.append([function["function"], kwargs]) + filter_pipeline = build_filter_ensemble(filter_name, components) + self._filters.append(filter_pipeline) + else: + self._filters = [build_filter_ensemble("none", [["take_first", None]])] + + if self.config.use_prompt is not None: + eval_logger.info(f"loading prompt {self.config.use_prompt}") + self.prompt = get_prompt( + self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME + ) + else: + self.prompt = None + + if self.fewshot_docs() is not None: + self.fewshot_rnd = ( + random.Random() + ) # setting with no seed, to be overridden at a later time + config_sampler: Union[str, Callable] = ( + self.config.fewshot_config.get("sampler", "default") + if self.config.fewshot_config + else "default" + ) + if isinstance(config_sampler, str): + self.sampler = samplers.get_sampler(config_sampler)( + list(self.fewshot_docs()), self, rnd=self.fewshot_rnd + ) + elif callable(config_sampler) and issubclass( + config_sampler, samplers.ContextSampler + ): + self.sampler = config_sampler( + docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd + ) + else: + raise TypeError( + f"fewshot_config.sampler should be a string or callable of ContextSampler type, " + f"not {type(config_sampler)}" + ) + + self.task_docs = self.eval_docs + + # Test One Doc + self.features = list(self.task_docs.features.keys()) + self.multiple_input = 0 + self.multiple_target = 0 + test_doc = self.task_docs[0] + test_text = self.doc_to_text(test_doc) + test_target = self.doc_to_target(test_doc) + + if self.config.doc_to_choice is not None: + test_choice = self.doc_to_choice(test_doc) + if not isinstance(test_choice, list): + eval_logger.error("doc_to_choice must return list") + else: + num_choice = len(test_choice) + + if isinstance(test_text, int): + self.multiple_input = num_choice + else: + test_choice = None + + if isinstance(test_target, list): + self.multiple_target = len(test_target) + else: + if (isinstance(test_target, int)) and (test_choice is not None): + test_target = test_choice[test_target] + else: + test_target = str(test_target) + + if test_choice is not None: + check_choices = test_choice + else: + check_choices = [test_target] + if self.config.doc_to_choice is not None: + for choice in check_choices: + choice_has_whitespace = True if choice[0].isspace() else False + delimiter_has_whitespace = ( + True + if self.config.target_delimiter.rstrip() + != self.config.target_delimiter + else False + ) + + if delimiter_has_whitespace and choice_has_whitespace: + eval_logger.debug( + f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace' + ) + elif (not delimiter_has_whitespace) and (not choice_has_whitespace): + eval_logger.debug( + f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace' + ) + + def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None: + self.dataset = datasets.load_dataset( + path=self.DATASET_PATH, + name=self.DATASET_NAME, + **dataset_kwargs if dataset_kwargs is not None else {}, + ) + + def has_training_docs(self) -> bool: + if self.config.training_split is not None: + return True + else: + return False + + def has_validation_docs(self) -> bool: + if self.config.validation_split is not None: + return True + else: + return False + + def has_test_docs(self) -> bool: + if self.config.test_split is not None: + return True + else: + return False + + def training_docs(self) -> datasets.Dataset: + if self.has_training_docs(): + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.training_split] + ) + return self.dataset[self.config.training_split] + + def validation_docs(self) -> datasets.Dataset: + if self.has_validation_docs(): + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.validation_split] + ) + return self.dataset[self.config.validation_split] + + def test_docs(self) -> datasets.Dataset: + if self.has_test_docs(): + if self.config.process_docs is not None: + return self.config.process_docs(self.dataset[self.config.test_split]) + return self.dataset[self.config.test_split] + + def fewshot_docs(self): + if self.config.fewshot_split is not None: + if self.config.process_docs is not None: + return self.config.process_docs(self.dataset[self.config.fewshot_split]) + return self.dataset[self.config.fewshot_split] + elif ( + self.config.fewshot_config is not None + and self.config.fewshot_config.get("samples", None) is not None + ): + if isinstance(self.config.fewshot_config["samples"], list): + return self.config.fewshot_config["samples"] + elif callable(self.config.fewshot_config["samples"]): + return self.config.fewshot_config["samples"]() + else: + raise Exception( + "`fewshot_config['samples']` was incorrectly defined in the configuration. It should be either a list of samples as a dict, or function returning this list." + ) + else: + if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0): + eval_logger.warning( + f"Task '{self.config.task}': " + "num_fewshot > 0 but fewshot_split is None. " + "using preconfigured rule." + ) + return super().fewshot_docs() + + @staticmethod + def append_target_question( + labeled_examples: List[Dict[str, str]], + question: str, + fewshot_as_multiturn: bool = False, + ) -> None: + """Adds a target question to the labeled examples list. + If fewshot_as_multiturn is True, or labeled_examples is empty, or the last entry is a system turn, appends the question as a new user entry. + Otherwise, it is appended to the last user entry, ensuring that the conversation alternates between the user and the assistant. + """ + if not fewshot_as_multiturn: + # if no messages or last message is system, append as new user entry + if len(labeled_examples) == 0 or labeled_examples[-1]["role"] == "system": + labeled_examples.append({"role": "user", "content": question}) + # if last message is user, append to it to avoid two user messages in a row + else: + labeled_examples[-1]["content"] += question + else: + # if fewshot_as_multiturn is True, append as next user entry (last is always assistant) + labeled_examples.append({"role": "user", "content": question}) + + @utils.positional_deprecated + def fewshot_context( + self, + doc: str, + num_fewshot: int, + system_instruction: Optional[str] = None, + apply_chat_template: bool = False, + fewshot_as_multiturn: bool = False, + chat_template: Optional[Callable] = None, + ) -> str: + """Returns a fewshot context string that is made up of a prepended description + (if provided), the `num_fewshot` number of examples, and an appended prompt example. + + :param doc: str + The document as returned from training_docs, validation_docs, or test_docs. + :param num_fewshot: int + The number of fewshot examples to provide in the returned context string. + :param system_instruction: str + System instruction to be applied to the prompt. + :param apply_chat_template: bool + Whether to apply the chat template to the fewshot context. + :param fewshot_as_multiturn: bool + Whether to provide the fewshot examples as a multiturn conversation or a single user turn. + :param chat_template: Callable + Chat template to be applied to the fewshot context. + :returns: str + The fewshot context. + """ + + if apply_chat_template: + labeled_examples = [] + else: + labeled_examples = "" + + # get task description + if description := self.config.description: + description = utils.apply_template(self.config.description, doc) + + # create system prompt based on the provided system instruction and description + if system_instruction is not None and description: + system_prompt = ( + f"{system_instruction}{self.sampler.fewshot_delimiter}{description}" + ) + elif system_instruction is not None: + system_prompt = system_instruction + elif description: + system_prompt = description + else: + system_prompt = "" + + # add system prompt if specified + if system_prompt: + if apply_chat_template: + labeled_examples.append({"role": "system", "content": system_prompt}) + else: + labeled_examples = system_prompt + + # if few-shot - append examples after the system prompt + if num_fewshot > 0: + if apply_chat_template: + labeled_examples.extend( + self.sampler.get_chat_context( + doc, num_fewshot, fewshot_as_multiturn + ) + ) + else: + labeled_examples += self.sampler.get_context(doc, num_fewshot) + + example = self.doc_to_text(doc) + if apply_chat_template: + if self.multiple_input: + return chat_template(labeled_examples) + if isinstance(example, str): + self.append_target_question( + labeled_examples, example, fewshot_as_multiturn + ) + # for loglikelihood create a list of questions with appended choices + elif isinstance(example, list): + labeled_examples_list = [] + # copy chat history for each example and append the answer + for ex in example: + chat = deepcopy(labeled_examples) + self.append_target_question(chat, ex, fewshot_as_multiturn) + labeled_examples_list.append(chat_template(chat)) + return labeled_examples_list + # if example is an integer, append the choice or convert to string + elif isinstance(example, int): + if self.config.doc_to_choice is not None: + choices = self.doc_to_choice(doc) + self.append_target_question( + labeled_examples, choices[example], fewshot_as_multiturn + ) + else: + self.append_target_question( + labeled_examples, str(example), fewshot_as_multiturn + ) + # return lm.apply_chat_template(labeled_examples) + return chat_template(labeled_examples) + else: + if self.multiple_input: + return labeled_examples + if isinstance(example, str): + return labeled_examples + example + elif isinstance(example, list): + return [labeled_examples + ex for ex in example] + elif isinstance(example, int): + if self.config.doc_to_choice is not None: + choices = self.doc_to_choice(doc) + return labeled_examples + choices[example] + else: + return labeled_examples + str(example) + + def apply_filters(self): + """Iterates over FilterEnsembles and applies them to instances""" + if hasattr(self, "_filters"): + for f in self._filters: + f.apply(self._instances) + else: + eval_logger.warning("No filter defined, passing through instances") + return self._instances + + def should_decontaminate(self): + return self.config.should_decontaminate + + def doc_to_decontamination_query(self, doc): + if self.config.should_decontaminate: + if self.config.doc_to_decontamination_query is None: + return self.doc_to_text(doc) + else: + doc_to_decontamination_query = self.config.doc_to_decontamination_query + if doc_to_decontamination_query in self.features: + return doc[doc_to_decontamination_query] + elif callable(doc_to_decontamination_query): + return doc_to_decontamination_query(doc) + else: + return ast.literal_eval( + utils.apply_template( + self.config.doc_to_decontamination_query, doc + ) + ) + + def _process_doc(self, doc: dict) -> dict: + """ + Override this to process (detokenize, strip, replace, etc.) individual + documents. This can be used in a map over documents of a data split. + E.g. `map(self._process_doc, self.dataset["validation"])` + + :return: dict + The processed version of the specified `doc`. + """ + return doc + + def doc_to_text(self, doc): + if self.prompt is not None: + doc_to_text = self.prompt + else: + doc_to_text = self.config.doc_to_text + + if isinstance(doc_to_text, int): + return doc_to_text + elif isinstance(doc_to_text, str): + if doc_to_text in self.features: + # if self.config.doc_to_choice is not None: + # return self.doc_to_choice(doc)[doc[doc_to_text]] + # else: + return doc[doc_to_text] + else: + text_string = utils.apply_template(doc_to_text, doc) + if text_string.isdigit() and self._config.doc_to_choice is not None: + return ast.literal_eval(text_string) + else: + return text_string + elif callable(doc_to_text): + return doc_to_text(doc) + # Used when applying a Promptsource template + elif hasattr(doc_to_text, "apply"): + applied_prompt = doc_to_text.apply(doc) + if len(applied_prompt) == 2: + return applied_prompt[0] + else: + eval_logger.warning("Applied prompt returns empty string") + return self.config.fewshot_delimiter + else: + print(type(doc_to_text)) + raise TypeError + + def doc_to_target(self, doc: Mapping) -> Union[int, str, list]: + if self.prompt is not None: + doc_to_target = self.prompt + else: + doc_to_target = self.config.doc_to_target + + if isinstance(doc_to_target, int): + return doc_to_target + elif isinstance(doc_to_target, str): + if doc_to_target in self.features: + # if self.config.doc_to_choice is not None: + # return self.doc_to_choice(doc)[doc[doc_to_target]] + # else: + return doc[doc_to_target] + else: + target_string = utils.apply_template(doc_to_target, doc) + if target_string.isdigit() and self._config.doc_to_choice is not None: + return ast.literal_eval(target_string) + elif ( + len(target_string) >= 2 + and (target_string[0] == "[") + and (target_string[-1] == "]") + ): + try: + return ast.literal_eval(target_string) + except (SyntaxError, ValueError): + return target_string + else: + return target_string + elif isinstance(doc_to_target, list): + return doc_to_target + elif callable(doc_to_target): + return doc_to_target(doc) + # Used when applying a Promptsource template + elif hasattr(doc_to_target, "apply"): + applied_prompt = doc_to_target.apply(doc) + if len(applied_prompt) == 2: + return applied_prompt[1] + else: + eval_logger.warning("Applied prompt returns empty string") + return self.config.fewshot_delimiter + else: + raise TypeError + + def doc_to_choice(self, doc: Any) -> List[str]: + if self.prompt is not None: + doc_to_choice = self.prompt + elif self.config.doc_to_choice is None: + eval_logger.error("doc_to_choice was called but not set in config") + else: + doc_to_choice = self.config.doc_to_choice + + if isinstance(doc_to_choice, str): + if doc_to_choice in self.features: + return doc[doc_to_choice] + else: + return ast.literal_eval(utils.apply_template(doc_to_choice, doc)) + elif isinstance(doc_to_choice, list): + return doc_to_choice + elif isinstance(doc_to_choice, dict): + return list(doc_to_choice.values()) + elif callable(doc_to_choice): + return doc_to_choice(doc) + elif hasattr(doc_to_choice, "get_answer_choices_list"): + return doc_to_choice.get_answer_choices_list(doc) + else: + raise TypeError + + def construct_requests( + self, doc: dict, ctx: str, **kwargs + ) -> Union[List[Instance], Instance]: + if self.OUTPUT_TYPE == "loglikelihood": + arguments = (ctx, self.doc_to_target(doc)) + elif self.OUTPUT_TYPE == "loglikelihood_rolling": + arguments = (self.doc_to_target(doc),) + elif self.OUTPUT_TYPE == "multiple_choice": + choices = self.doc_to_choice(doc) + target_delimiter = self.config.target_delimiter + if self.multiple_input: + # If there are multiple inputs, choices are placed in the ctx + cont = self.doc_to_target(doc) + arguments = [ + (ctx + choice, f"{target_delimiter}{cont}") for choice in choices + ] + else: + # Otherwise they are placed in the continuation + arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices] + + request_list = [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=arg, + idx=i, + **kwargs, + ) + for i, arg in enumerate(arguments) + ] + # TODO: we should raise a warning telling users this will at most ~2x runtime. + if "acc_mutual_info" in self._metric_fn_list.keys(): + # if we are calculating multiple choice accuracy + # using mutual information instead of raw loglikelihood as metric, need unconditional lls. + + # here mutual info refers to calculating + # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice)) + # in other words normalizing by subtracting the unconditional logprob of each choice. + request_list.extend( + [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=("", "{}".format(choice)), + idx=i, + **kwargs, + ) + for i, choice in enumerate(choices) + ] + ) + return request_list + + elif self.OUTPUT_TYPE == "generate_until": + arguments = (ctx, deepcopy(self.config.generation_kwargs)) + + return Instance( + request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs + ) + + def process_results(self, doc, results): + if callable(self.config.process_results): + return self.config.process_results(doc, results) + + result_dict = {} + use_metric = list(self._metric_fn_list.keys()) + if self.OUTPUT_TYPE == "loglikelihood": + results = results[0] + ll, is_greedy = results + return { + **({"perplexity": ll} if "perplexity" in use_metric else {}), + **({"acc": int(is_greedy)} if "acc" in use_metric else {}), + } + elif self.OUTPUT_TYPE == "loglikelihood_rolling": + (loglikelihood,) = results + _words = self.count_words(self.doc_to_target(doc)) + _bytes = self.count_bytes(self.doc_to_target(doc)) + return { + **( + {"word_perplexity": (loglikelihood, _words)} + if "word_perplexity" in use_metric + else {} + ), + **( + {"byte_perplexity": (loglikelihood, _bytes)} + if "byte_perplexity" in use_metric + else {} + ), + **( + {"bits_per_byte": (loglikelihood, _bytes)} + if "bits_per_byte" in use_metric + else {} + ), + } + elif self.OUTPUT_TYPE == "multiple_choice": + lls, is_greedy = zip(*results) + + # retrieve choices in List[str] form, to compute choice lengths, etc. + choices = self.doc_to_choice(doc) + completion_len = np.array([float(len(i)) for i in choices]) + + if ( + 2 * len(choices) == len(lls) + and "acc_mutual_info" in self._metric_fn_list.keys() + ): + # then we are doing mutual info. + # this stores the "dryrun" / unconditional answer loglikelihoods + lls_unconditional = lls[1::2] + if len(lls_unconditional) != len(choices): + raise ValueError + # and this stores our "regular" conditional loglikelihoods + lls = lls[::2] + + pred = np.argmax(lls) + pred_norm = np.argmax(lls / completion_len) + + if self.multiple_input: + gold = self.doc_to_text(doc) + else: + gold = self.doc_to_target(doc) + + gold_index_error = False + if isinstance(gold, list): + gold = [i if i < len(choices) else -100 for i in gold] + if -100 in gold: + gold_index_error = True + else: + if isinstance(gold, int): + gold = gold if gold < len(choices) else -100 + elif isinstance(gold, str): + gold = choices.index(gold) if gold in choices else -100 + + if gold == -100: + gold_index_error = True + + if gold_index_error: + eval_logger.warning( + f"Label index was not in within range of available choices," + f"Sample:\n\n{doc}\n\n" + ) + + if self.multiple_target: + acc = 1.0 if pred in gold else 0.0 + acc_norm = 1.0 if pred_norm in gold else 0.0 + exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold])) + else: + acc = 1.0 if pred == gold else 0.0 + acc_norm = 1.0 if pred_norm == gold else 0.0 + # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly + exact_match = int(is_greedy[gold]) if gold != -100 else 0 + + prob_norm = utils.softmax(lls) + + # TODO use keyword arguments to the metric? + # gold, pred, norm stuff, the original lls, + result_dict = { + **({"acc": acc} if "acc" in use_metric else {}), + **({"f1": (gold, pred)} if "f1" in use_metric else {}), + **({"mcc": (gold, pred)} if "mcc" in use_metric else {}), + **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}), + **({"exact_match": exact_match} if "exact_match" in use_metric else {}), + **( + {"brier_score": (gold, prob_norm)} + if "brier_score" in use_metric + else {} + ), + } + + if "acc_mutual_info" in use_metric: + lls_mutual_info = [ + ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional) + ] + acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0 + result_dict["acc_mutual_info"] = acc_mutual_info + + elif self.OUTPUT_TYPE == "generate_until": + gold = self.doc_to_target(doc) + result = results[0] + if self.config.doc_to_choice is not None: + # If you set doc_to_choice, + # it assumes that doc_to_target returns a number. + choices = self.doc_to_choice(doc) + gold = choices[gold] + # we expect multiple_targets to be a list. + elif self.multiple_target: + gold = list(gold) + elif type(gold) != type(result): + # cast gold to the same type as result + gold = type(result)(gold) + + for metric in self._metric_fn_list.keys(): + if self.multiple_target: + # in the case where we have multiple targets, + # return true if any are true + # TODO: this may break for multipLe_target, non zero-or-1 metrics + scores = [] + if not isinstance(gold, list): + # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer + # print(gold) + gold = [gold] + if metric == "exact_match": + result = [result for _ in range(len(gold))] + scores = self._metric_fn_list[metric]( + references=gold, + predictions=result, + **self._metric_fn_kwargs[metric], + )[metric] + result_score = 1.0 if scores > 0.0 else 0.0 + else: + for gold_option in gold: + try: + result_score = self._metric_fn_list[metric]( + references=[gold_option], + predictions=[result], + **self._metric_fn_kwargs[metric], + ) + except ( + TypeError + ): # TODO: this is hacky and I don't want to do it + result_score = self._metric_fn_list[metric]( + [gold_option, result] + ) + if isinstance(result_score, dict): + # TODO: this handles the case where HF evaluate returns a dict. + result_score = result_score[metric] + scores.append(result_score) + if any(scores): + result_score = 1.0 + else: + result_score = 0.0 + else: + try: + result_score = self._metric_fn_list[metric]( + references=[gold], + predictions=[result], + **self._metric_fn_kwargs[metric], + ) + except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics + result_score = self._metric_fn_list[metric]([gold, result]) + if isinstance(result_score, dict): + # TODO: this handles the case where HF evaluate returns a dict. + result_score = result_score[metric] + result_dict[metric] = result_score + else: + raise ValueError( + f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ", + "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'", + ) + + return result_dict + + def aggregation(self) -> dict: + return self._aggregation_list + + def higher_is_better(self) -> dict: + return self._higher_is_better + + def get_config(self, key: str) -> Any: + return getattr(self._config, key, None) + + def __repr__(self): + return ( + f"ConfigurableTask(task_name={getattr(self.config, 'task', None)}," + f"group_name={getattr(self.config, 'group', None)}," + f"output_type={self.OUTPUT_TYPE}," + f"num_fewshot={getattr(self.config, 'num_fewshot', None)}," + f"num_samples={len(self.eval_docs)})" + ) + + +class MultipleChoiceTask(Task): + OUTPUT_TYPE = "loglikelihood" + + def doc_to_target(self, doc: dict) -> str: + return " " + doc["choices"][doc["gold"]] + + def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]: + # TODO: add mutual info here? + return [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=(ctx, " {}".format(choice)), + idx=i, + **kwargs, + ) + for i, choice in enumerate(doc["choices"]) + ] + + def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict: + results = [ + res[0] for res in results + ] # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere? + gold = doc["gold"] + + acc = 1.0 if np.argmax(results) == gold else 0.0 + completion_len = np.array([float(len(i)) for i in doc["choices"]]) + acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0 + + return { + "acc": acc, + "acc_norm": acc_norm, + } + + def higher_is_better(self) -> dict: + return { + "acc": True, + "acc_norm": True, + } + + def aggregation(self) -> dict: + return { + "acc": mean, + "acc_norm": mean, + } + + +class PerplexityTask(Task): + OUTPUT_TYPE = "loglikelihood_rolling" + + def has_training_docs(self) -> bool: + return False + + def fewshot_examples(self, k: int, rnd) -> List: + if k != 0: + raise ValueError( + "The number of fewshot examples must be 0 for perplexity tasks." + ) + return [] + + def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]: + if num_fewshot != 0: + raise ValueError( + "The number of fewshot examples must be 0 for perplexity tasks." + ) + + return "" + + def higher_is_better(self) -> dict: + return { + "word_perplexity": False, + "byte_perplexity": False, + "bits_per_byte": False, + } + + def doc_to_decontamination_query(self, doc): + return doc + + def doc_to_text(self, doc) -> str: + return "" + + def doc_to_target(self, doc): + return doc + + def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs): + if bool(ctx): + raise ValueError + + return Instance( + request_type=self.OUTPUT_TYPE, + doc=doc, + arguments=(self.doc_to_target(doc),), + idx=0, + **kwargs, + ) + + def process_results(self, doc: dict, results: Tuple[float]) -> dict: + (loglikelihood,) = results + words = self.count_words(self.doc_to_target(doc)) + bytes_ = self.count_bytes(self.doc_to_target(doc)) + return { + "word_perplexity": (loglikelihood, words), + "byte_perplexity": (loglikelihood, bytes_), + "bits_per_byte": (loglikelihood, bytes_), + } + + def aggregation(self) -> dict: + return { + "word_perplexity": weighted_perplexity, + "byte_perplexity": weighted_perplexity, + "bits_per_byte": bits_per_byte, + } + + @classmethod + def count_bytes(cls, doc) -> int: + return len(doc.encode("utf-8")) + + @classmethod + def count_words(cls, doc) -> int: + """Downstream tasks with custom word boundaries should override this!""" + return len(re.split(r"\s+", doc)) diff --git a/venv/lib/python3.10/site-packages/lm_eval/caching/__pycache__/cache.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/caching/__pycache__/cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..877bed1e0af8723e3279d9249107d6238999d902 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/caching/__pycache__/cache.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/caching/__pycache__/cache.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/caching/__pycache__/cache.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..859c5ee0b2175e994695ccae3e5224f6204a6091 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/caching/__pycache__/cache.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/caching/cache.py b/venv/lib/python3.10/site-packages/lm_eval/caching/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..63691435215a05894d206f3f8218ab23c5d2e250 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/caching/cache.py @@ -0,0 +1,55 @@ +import hashlib +import os + +import dill + +from lm_eval.utils import eval_logger + + +MODULE_DIR = os.path.dirname(os.path.realpath(__file__)) + +OVERRIDE_PATH = os.getenv("LM_HARNESS_CACHE_PATH") + + +PATH = OVERRIDE_PATH if OVERRIDE_PATH else f"{MODULE_DIR}/.cache" + +# This should be sufficient for uniqueness +HASH_INPUT = "EleutherAI-lm-evaluation-harness" + +HASH_PREFIX = hashlib.sha256(HASH_INPUT.encode("utf-8")).hexdigest() + +FILE_SUFFIX = f".{HASH_PREFIX}.pickle" + + +def load_from_cache(file_name): + try: + path = f"{PATH}/{file_name}{FILE_SUFFIX}" + + with open(path, "rb") as file: + cached_task_dict = dill.loads(file.read()) + return cached_task_dict + + except Exception: + eval_logger.debug(f"{file_name} is not cached, generating...") + pass + + +def save_to_cache(file_name, obj): + if not os.path.exists(PATH): + os.mkdir(PATH) + + file_path = f"{PATH}/{file_name}{FILE_SUFFIX}" + + eval_logger.debug(f"Saving {file_path} to cache...") + with open(file_path, "wb") as file: + file.write(dill.dumps(obj)) + + +# NOTE the "key" param is to allow for flexibility +def delete_cache(key: str = ""): + files = os.listdir(PATH) + + for file in files: + if file.startswith(key) and file.endswith(FILE_SUFFIX): + file_path = f"{PATH}/{file}" + os.unlink(file_path) diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5339e0f685e998c142be58e8033ad5e6d0e8d55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/archiver.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/archiver.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e5ea673e3898cd0d1f91211ffcfa61182126a7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/archiver.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/decontaminate.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/decontaminate.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30e7f2dd7a8cff43bbc25686280c207d9c985d00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/decontaminate.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/janitor.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/janitor.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e157b0dfb739b059969d8b8d16b1d0b78e3ca16 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/decontamination/__pycache__/janitor.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/archiver.py b/venv/lib/python3.10/site-packages/lm_eval/decontamination/archiver.py new file mode 100644 index 0000000000000000000000000000000000000000..fa8a715f78e4cccef9f930e5cf448c4481730c2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/decontamination/archiver.py @@ -0,0 +1,171 @@ +import datetime +import io +import json +import mmap +import os +from pathlib import Path +from typing import Any + +import jsonlines +import tqdm +import zstandard + + +def json_serial(obj: Any) -> str: + """JSON serializer for objects not serializable by default json code""" + + if isinstance(obj, (datetime.datetime,)): + return obj.isoformat() + raise TypeError("Type %s not serializable" % type(obj)) + + +# Modified version of lm_dataformat Archive for single file. +class Archive: + def __init__(self, file_path: str, compression_level: int = 3) -> None: + self.file_path = file_path + dir_name = os.path.dirname(file_path) + if dir_name: + os.makedirs(dir_name, exist_ok=True) + self.fh = open(self.file_path, "wb") + self.cctx = zstandard.ZstdCompressor(level=compression_level) + self.compressor = self.cctx.stream_writer(self.fh) + + def add_data(self, data, meta=None) -> None: + if meta is None: + meta = {} + self.compressor.write( + json.dumps({"text": data, "meta": meta}, default=json_serial).encode( + "UTF-8" + ) + + b"\n" + ) + + def commit(self) -> None: + self.compressor.flush(zstandard.FLUSH_FRAME) + self.fh.flush() + self.fh.close() + + +# Modified version of lm_dataformat Reader with self.fh set, allowing peeking for tqdm. +class Reader: + def __init__(self) -> None: + pass + + def read( + self, + file, + get_meta: bool = False, + autojoin_paragraphs: bool = True, + para_joiner: str = "\n\n", + ): + with open(file, "rb") as fh: + self.fh = fh + cctx = zstandard.ZstdDecompressor() + reader = io.BufferedReader(cctx.stream_reader(fh)) + rdr = jsonlines.Reader(reader) + for ob in rdr: + # naive jsonl where each object is just the string itself, with no meta. For legacy compatibility. + if isinstance(ob, str): + assert not get_meta + yield ob + continue + + text = ob["text"] + + if autojoin_paragraphs and isinstance(text, list): + text = para_joiner.join(text) + + if get_meta: + yield text, (ob["meta"] if "meta" in ob else {}) + else: + yield text + + +class TextArchive: + def __init__(self, file_path, mode: str = "rb+") -> None: + self.file_path = file_path + dir_name = os.path.dirname(file_path) + if dir_name: + os.makedirs(dir_name, exist_ok=True) + + if not os.path.exists(file_path): + Path(file_path).touch() + + self.fh = open(self.file_path, mode) + + def add_data(self, data) -> None: + self.fh.write(data.encode("UTF-8") + b"\n") + + def commit(self) -> None: + self.fh.flush() + self.fh.close() + + +class TextReader: + def __init__(self, file_path) -> None: + self.file_path = file_path + + # Optimized mmap read with infrequent tqdm updates to maintain speed + # Tested up to 250MB/s. + def read_tqdm(self, update_frequency: int = 10000): + current_file_position = 0 + line_counter = 0 + with open(self.file_path, "r", encoding="utf-8") as fh, tqdm.tqdm( + total=os.path.getsize(self.file_path), + dynamic_ncols=True, + unit="byte", + unit_scale=1, + ) as progress: + with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj: + for line in iter(mmap_obj.readline, b""): + line = line.decode("utf-8") + line_counter += 1 + if line_counter == update_frequency: + new_file_pos = mmap_obj.tell() + bytes_read = new_file_pos - current_file_position + current_file_position = new_file_pos + progress.update(bytes_read) + line_counter = 0 + yield line[:-1] + + def read_and_tell(self): + current_file_position = 0 + with open(self.file_path, "r", encoding="utf8") as fh: + with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj: + for line in iter(mmap_obj.readline, b""): + line = line.decode("utf-8") + new_file_pos = mmap_obj.tell() + raw_bytes_read = new_file_pos - current_file_position + current_file_position = new_file_pos + yield line[:-1], raw_bytes_read + + def read(self): + with open(self.file_path, "r", encoding="utf8") as fh: + with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj: + for line in iter(mmap_obj.readline, b""): + line = line.decode("utf-8") + yield line[:-1] + + def read_slow(self): + with open(self.file_path, "r", encoding="utf8") as fh: + while True: + line = fh.readline() + if line == -1 or line == "": + break + else: + yield line[:-1] + + +# Optimized for speed. Decompresses the archive in shell before +# using the mmap'd TextReader. +class ZStdTextReader: + def __init__(self, file) -> None: + self.file = file + + def read_tqdm(self): + decompressed_file = self.file[:-4] + print("Decompressing file, please wait...") + os.system(f"zstd -d {self.file}") # linux decompress is faster + reader = TextReader(decompressed_file) + yield from reader.read_tqdm() + os.remove(decompressed_file) diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/decontaminate.py b/venv/lib/python3.10/site-packages/lm_eval/decontamination/decontaminate.py new file mode 100644 index 0000000000000000000000000000000000000000..3874eb58be99aebd2736aeede76c13145231434f --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/decontamination/decontaminate.py @@ -0,0 +1,166 @@ +import collections +import glob +import json +import os +import pickle +import random +import time + +from .archiver import ZStdTextReader +from .janitor import Janitor, word_ngrams + + +# Was used for testing the evaluator decoupled from the full logic below +def get_train_overlap_stub(docs: dict, ngrams_path: str, ngrams_n_size: str): + simulated_overlap = 0.1 + contaminated = int(len(docs) * simulated_overlap) + return random.sample(range(len(docs)), contaminated) + + +# Returns a dictionary containing all overlapping documents in each +# task. In the standard use case, an overlap occurs when any of the 13-grams +# found in the task document exist in the training set documents. +# +# To generate 13-grams for the pile see scripts/clean_training_data. The final output of these +# scripts are an info.json file containing the n_gram_size (13) and a bunch of "ngrams_{x}.bkt.txt.sorted.zst" +# files. These should exist in the "ngrams_path" provided to this function. + + +# Algorithm: +# 1. Build lookups for each dataset {ngram: list(document_ids)} +# 2. Merge into an overall lookup {ngram: [(task_name, task_set, doc_ids),]} +# 3. Full scan the 13-grams from the training set against the merged lookup, +# saving matches in the "duplicates" dictionary {(task_name, task_set): set(doc_ids)} +# 4. Strip the task_set from the dictionary keys and return +# +# We cache the task+set lookups as well as the overlaps. +def get_train_overlap(docs_by_task_set: dict, ngrams_path: str, limit: int) -> dict: + # return get_train_overlap_stub(docs, ngrams_path, ngrams_n_size) + + info_dict_path = os.path.join(ngrams_path, "info.json") + info_dict = json.load(open(info_dict_path, "r", encoding="utf-8")) + ngrams_n_size = info_dict["ngram_size"] + + janitor = Janitor() + + # Build lookup for each dataset first in case we use different task combinations later + print("Building Lookups...") + start = time.perf_counter() + + def get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit) -> str: + return f"data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.overlaps" + + lookups = {} + duplicates = {} # (task_name, task_set): set(doc_ids)} + sets_to_decontaminate = len(docs_by_task_set.keys()) + + for (task_name, task_set), docs in docs_by_task_set.items(): + if not os.path.exists(f"data/{task_name}"): + os.mkdir(f"data/{task_name}") + + # Check if we've decontaminated this combination before + overlaps_dump_path = get_overlaps_dump_path( + task_name, task_set, ngrams_n_size, limit + ) + if os.path.exists(overlaps_dump_path): + duplicates[(task_name, task_set)] = pickle.load( + open(overlaps_dump_path, "rb") + ) + sets_to_decontaminate -= 1 + continue + else: + duplicates[(task_name, task_set)] = set() + + # Build/load the task lookup {ngram: set(documents)}. + task_set_lookup_path = ( + f"data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.lookup" + ) + if os.path.exists(task_set_lookup_path): + print(f"{task_set_lookup_path} available, loading...") + lookups[(task_name, task_set)] = pickle.load( + open(task_set_lookup_path, "rb") + ) + else: + print(f"{task_set_lookup_path} not available, building...") + lookup = collections.defaultdict(set) + + for doc_id, document in enumerate(docs): + ngrams = word_ngrams(janitor.normalize_string(document), ngrams_n_size) + for ngram in ngrams: + lookup[ngram].add(doc_id) + + pickle.dump(lookup, open(task_set_lookup_path, "wb")) + lookups[(task_name, task_set)] = lookup + + elapsed = time.perf_counter() - start + print(f"Building lookups took {elapsed:0.5f} seconds.") + + matched_ngrams = [] + + if sets_to_decontaminate > 0: + print("Merging lookups...") + start = time.perf_counter() + merged_lookup = collections.defaultdict(list) + for (task_name, task_set), lookup in lookups.items(): + for ngram, doc_ids in lookup.items(): + merged_lookup[ngram].append((task_name, task_set, doc_ids)) + + elapsed = time.perf_counter() - start + print(f"Merging lookups took {elapsed:0.5f} seconds.") + + print(f"{ngrams_n_size} grams files found in {ngrams_path}:") + files = glob.glob(os.path.join(ngrams_path, "*.sorted.zst")) + print(files) + + for file in files: + start = time.perf_counter() + print(f"Scanning {file}") + reader = ZStdTextReader(file) + total_ngrams = 0 + unique_ngrams = 0 + matching_unique = 0 + non_matching_unique = 0 + + current_ngram = "" + for line in reader.read_tqdm(): # Scan training set ngrams file + total_ngrams += 1 + [ngram, document_id] = line.rsplit(" ", 1) + if ( + ngram != current_ngram + ): # Only need to match the ngram once in training set + unique_ngrams += 1 + current_ngram = ngram + if ngram in merged_lookup: + matched_ngrams.append(ngram) # For logging + matching_unique += 1 + for task_name, task_set, doc_ids in merged_lookup[ngram]: + task_doc_set = duplicates[(task_name, task_set)] + for doc_id in doc_ids: # Record contamination across all relevant task/set combos + task_doc_set.add(doc_id) + del merged_lookup[ngram] # No point matching again + else: + non_matching_unique += 1 + + print(f"Total Ngrams: {total_ngrams}") + print(f"Unique Ngrams: {unique_ngrams}") + print(f"Unique Matching: {matching_unique}") + print(f"Unique Non Matching: {non_matching_unique}") + print("Matched ngrams:") + for ngram in matched_ngrams: + print(ngram) + + elapsed = time.perf_counter() - start + print(f"Read took {elapsed:0.5f} seconds.") + print(f"Speed: {(os.path.getsize(file)/1000000.0)/elapsed}MB/second") + + print(duplicates) + + # Dump overlaps separately + for (task_name, task_set), doc_ids in duplicates.items(): + overlaps_dump_path = get_overlaps_dump_path( + task_name, task_set, ngrams_n_size, limit + ) + pickle.dump(doc_ids, open(overlaps_dump_path, "wb")) + + # Strip task set and return + return {task_name: doc_ids for (task_name, task_set), doc_ids in duplicates.items()} diff --git a/venv/lib/python3.10/site-packages/lm_eval/decontamination/janitor.py b/venv/lib/python3.10/site-packages/lm_eval/decontamination/janitor.py new file mode 100644 index 0000000000000000000000000000000000000000..cedf8a5717aa8156674836ba236fdcabf36e0487 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/decontamination/janitor.py @@ -0,0 +1,328 @@ +import pickle +import re +import string +import traceback +from typing import Iterator, List, Sequence, Tuple, TypeVar + + +# This is a cpp module. Compile janitor_util.cpp with: +# c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) janitor_util.cpp -o janitor_util$(python3-config --extension-suffix) -undefined dynamic_lookup +try: + import janitor_util + + JANITOR_CPP = True +except Exception: + print("WARNING: C++ module could not be loaded. Janitor running in python mode") + traceback.print_exc() + JANITOR_CPP = False + +T = TypeVar("T") + + +# Implementation from nltk source +# https://www.nltk.org/_modules/nltk/util.html +def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[Tuple[T, ...]]: + history = [] + while n > 1: + # PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator + try: + next_item = next(sequence) + except StopIteration: + # no more data, terminate the generator + return + history.append(next_item) + n -= 1 + for item in sequence: + history.append(item) + yield tuple(history) + del history[0] + + +def word_ngrams(s: str, n: int) -> Iterator[str]: + """Splits a string into ngram words""" + tokens = s.split() # not a generator :( + ngram_seqs = form_ngrams(iter(tokens), n) + return (" ".join(ngram) for ngram in ngram_seqs) + + +# Does character sequences only - combined faster function to play around with later +# def word_ngrams_indices_combined(sequence, n): +# current_word = "" +# history = [] +# gap = False; +# start = 0 +# end = 0 +# for character in sequence: +# if character == " ": +# if not gap: +# gap = True +# history.append(current_word) +# end += len(current_word) - 1 +# current_word = "" +# if len(history) == n: +# yield (tuple(history), start, end) +# del history[0] +# start = end + 1 +# end = start +# else: +# gap = False +# current_word += character + + +# https://stackoverflow.com/questions/13734451/string-split-with-indices-in-python +def split_indices(s: str) -> Iterator[Tuple[str, Tuple[int, int]]]: + """Splits a string on whitespaces and records the indices of each in the original string. + @:return generator((word, (start_idx, end_idx)), ...) + """ + return ((m.group(0), (m.start(), m.end() - 1)) for m in re.finditer(r"\S+", s)) + + +def word_ngrams_indices(s: str, n: int) -> Iterator[Tuple[str, Tuple[int, int]]]: + """Splits a string into pairs of (ngram words, their start/end indices)""" + tokens_with_indices = split_indices(s) + + # Generator of ngrams of (word, idx_pairs) + # ( + # [(word, (start,end)), (word, (start, end))...], + # [(word, (start, end)), ...], + # ... + # ) + ngram_seqs_with_indices = form_ngrams(tokens_with_indices, n) + + # Generator of pairs of word and index ngrams + # ( + # ([word, word, ...], [(start,end), (start,end), ...]), + # ... + # ) + ngram_indices_pairs = ( + zip(*ngram_with_indices) for ngram_with_indices in ngram_seqs_with_indices + ) + + # Generator of ( (word_ngram, (start, end)), (word_ngram, start, end)), ...) + return ( + (" ".join(ngram_seq), (indices[0][0], indices[-1][1])) + for ngram_seq, indices in ngram_indices_pairs + ) + + +class Janitor: + # FIXME delete_chars: Should anything else go here? Special chars? + def __init__( + self, + ngram_n: int = 13, + window_to_remove: int = 200, + too_dirty_cutoff: int = 10, + minimum_slice_length: int = 200, + delete_chars: str = string.punctuation, + ) -> None: + self.ngram_n = ngram_n + self.window_to_remove = window_to_remove + self.too_dirty_cutoff = too_dirty_cutoff + self.minimum_slice_length = minimum_slice_length + self.delete_chars = delete_chars + + self.dirt_ngrams = set() + + # If in python, we'll translate uppercase to lowercase and delete naughty characters. + # This is fast by python standards + # https://stackoverflow.com/questions/638893/what-is-the-most-efficient-way-in-python-to-convert-a-string-to-all-lowercase-st + self.translation_table = str.maketrans( + string.ascii_lowercase + string.ascii_uppercase, # These characters + string.ascii_lowercase * 2, # Become these characters + self.delete_chars, # These are deleted + ) + + ############## + # I/O for saving contamination ngrams + ############## + + def save_contamination_ngrams(self, filename: str) -> None: + with open(filename, "wb") as fp: + pickle.dump(filename, fp) + + def load_contamination_ngrams(self, filename: str) -> None: + with open(filename, "rb") as fp: + self.dirt_ngrams = pickle.load(fp) + + ############## + # Call these :) + ############## + + def register_contaminant(self, dirt_string: str) -> None: + """Register a string as contamination to be removed, e.g. a test set + This breaks the dirt_string into ngrams to store for future cleaning""" + if JANITOR_CPP: + return self.register_contaminant_cpp(dirt_string) + else: + print("WARNING: Janitor running in python mode") + return self.register_contaminant_python(dirt_string) + + def clean(self, dirty_string: str) -> List[str]: + """Clean a string (e.g. a training set) by removing all ngrams previously + registered as contaminants. Returns a list of clean chunks, or empty if + the string was too dirty""" + if JANITOR_CPP: + return self.clean_cpp(dirty_string) + else: + print("WARNING: Janitor running in python mode") + return self.clean_python(dirty_string) + + def _split_chunks( + self, dirty_string: str, dirty_parts: Sequence[Tuple] + ) -> List[str]: + clean_chunks = [] + splice_idx = 0 + end = -1 + for i, (ngram, start, end) in enumerate(dirty_parts): + if i >= self.too_dirty_cutoff: + return [] + start = max(0, start - self.window_to_remove) + end = min(len(dirty_string), end + self.window_to_remove) + + if start - splice_idx > self.minimum_slice_length: + clean_chunks.append(dirty_string[splice_idx:start]) + splice_idx = end + + if end < len(dirty_string) - self.minimum_slice_length: + clean_chunks.append(dirty_string[end + 1 :]) + + return clean_chunks + + ############## + # Fast C++ + ############## + + def register_contaminant_cpp(self, dirt_string) -> None: + self.dirt_ngrams.update( + janitor_util.clean_ngram(dirt_string, self.delete_chars, self.ngram_n) + ) + + def clean_cpp(self, dirty_string: str) -> List[str]: + contamination_indices = janitor_util.clean_ngram_with_indices( + dirty_string, self.delete_chars, self.ngram_n + ) + return self._split_chunks(dirty_string, contamination_indices) + + ############## + # Slow python + ############## + + def normalize_string(self, s: str) -> str: + return s.translate(self.translation_table) + + def register_contaminant_python(self, dirt_string: str) -> None: + self.dirt_ngrams.update( + word_ngrams(self.normalize_string(dirt_string), self.ngram_n) + ) + + def clean_python(self, dirty_string: str) -> List[str]: + contamination_indices = ( + (None, *idx_pair) + for dirty_ngram, idx_pair in word_ngrams_indices(dirty_string, self.ngram_n) + if self.normalize_string(dirty_ngram) in self.dirt_ngrams + ) + return self._split_chunks(dirty_string, contamination_indices) + + +################################################################## +# Tests +################################################################# + +# def print_cpp(): +# source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2 + +# for i in range(1, 10, 2): +# pprint(janitor_util.clean_ngram(source, string.punctuation, i)) +# for ngram, start, end in \ +# janitor_util.clean_ngram_with_indices(source, string.punctuation, i): +# print(ngram, "\t", start, end, source[start:end].replace("\n", "\\n")) + + +# def test_cpp(): +# source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2 +# contaminant = "dirty boy. Clean he he" + +# jan_python = Janitor() +# jan_cpp = Janitor() + +# jan_python.register_contaminant_python(contaminant) +# jan_cpp.register_contaminant(contaminant) + +# assert jan_python.dirt_ngrams == jan_cpp.dirt_ngrams, (jan_python.dirt_ngrams, jan_cpp.dirt_ngrams) + +# assert jan_python.clean_python(source) == jan_cpp.clean(source), \ +# (jan_python.clean_python(source), jan_cpp.clean(source)) + +# print("Passed test, python==cpp") + + +# def benchmark(): +# # Download and put in data folder: enwik8 (100 MB) from https://cs.fit.edu/~mmahoney/compression/textdata.html +# setup = \ +# """ +# with open("data/enwik8", "r") as f: +# data = f.read() +# jan = Janitor(too_dirty_cutoff=1000) +# jan.register_contaminant(''' +# theories is that there is a connection between "geekdom" and autism. +# This is hinted, for instance, by a ''Wired Magazine'' article in 2001 entitled " +# The [[Geek]] Syndrome", which is a point argued by many in the autism rights +# movement{{ref|Wired}}. This article, many professionals assert, is just one example of +# the media's application of mental disease labels to what is actually variant normal behavior +# &mdash;they argue that shyness, lack of athletic ability or social skills, and intellectual +# interests, even when they seem unusual to others, are not in themselves signs of autism or +# Asperger's syndrome. Others assert that it is actually the medical profession which is applying +# mental disease labels to children who in the past would have simply been accepted as a little +# different or even labeled 'gifted'. See [[clinomorphism]] for further discussion of this issue. +# Due to the recent publicity surrounding autism and autis +# ultan Al Nahyan]] granted [[Petroleum]] concessions, and oil was first found in 1958. At first, +# oil money had a marginal impact. A few lowrise concete buildings were erected, and the first +# paved road was completed in 1961, but Sheikh Shakbut, uncertain whether the new oil royalties +# would last, took a cautious approach, preferring to save the revenue rather than investing it in +# development. His brother, [[Zayed bin Sultan Al Nahayan]], saw that oil wealth had the potential +# to transform Abu Dhabi. The ruling Al Nahayan family decided that Sheikh Zayed should replace his +# brother as Ruler and carry out his vision of developing the country. On [[August 6]], [[1966]], +# with the assistance of the British, Sheikh Zayed became the new ruler. See generally, Al-Fahim, M, +# ''From Rags to Riches: A Story of Abu Dhabi'', Chapter Six (London Centre of Arab Studies, 1995), +# ISBN 1 900404 00 1. With the announcement by Britain in 1968 that it would withdraw from the +# Gulf area by 1971, Sheikh Zayed became the main driving force behind the formation of the +# [[United Arab Emirates]]. After the Emirates gained independence in 1971, +# ''') +# """ + +# n = 1 +# print(f"Timing {n} run on 100 MB") +# print("Register contaminant") +# # print("\tPython", timeit.timeit("jan.register_contaminant_python(data)", setup=setup, globals=globals(), number=n)) +# print("\tCpp", timeit.timeit("jan.register_contaminant(data)", setup=setup, globals=globals(), number=n)) + +# print("Clean") +# # print("\tPython", timeit.timeit("jan.clean_python(data)", setup=setup, globals=globals(), number=n)) +# print("\tCpp", timeit.timeit("jan.clean(data)", setup=setup, globals=globals(), number=n)) + + +# def test_janitor_general(): +# source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2 +# contaminant = "dirty boy. Clean he he" + +# jan = Janitor(ngram_n=3) +# jan.register_contaminant(contaminant) +# cleaned = " ".join(jan.clean(source)) +# for contam in jan.dirt_ngrams: +# assert contam not in cleaned, contam + +# filename = "data/saved_contam" +# jan.save_contamination_ngrams(filename) + +# jan = Janitor(ngram_n=3) +# jan.load_contamination_ngrams(filename) +# cleaned = " ".join(jan.clean(source)) +# for contam in jan.dirt_ngrams: +# assert contam not in cleaned, contam + + +# if __name__ == "__main__": +# test() +# # print_cpp() +# # test_cpp() +# # benchmark() diff --git a/venv/lib/python3.10/site-packages/lm_eval/evaluator.py b/venv/lib/python3.10/site-packages/lm_eval/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..73226b0514f1b556e4e2d4b06c9e03362180b335 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/evaluator.py @@ -0,0 +1,691 @@ +import itertools +import json +import logging +import random +import time +from collections import defaultdict +from typing import TYPE_CHECKING, List, Optional, Union + +import numpy as np +import torch + +import lm_eval.api.metrics +import lm_eval.api.registry +import lm_eval.models +from lm_eval.caching.cache import delete_cache +from lm_eval.evaluator_utils import ( + consolidate_results, + get_sample_size, + get_task_list, + prepare_print_tasks, + print_writeout, + run_task_tests, +) +from lm_eval.loggers import EvaluationTracker +from lm_eval.loggers.utils import add_env_info, add_tokenizer_info, get_git_commit_hash +from lm_eval.tasks import TaskManager, get_task_dict +from lm_eval.utils import ( + eval_logger, + handle_non_serializable, + hash_string, + positional_deprecated, + simple_parse_args_string, +) + + +if TYPE_CHECKING: + from lm_eval.api.model import LM + from lm_eval.tasks import Task + + +@positional_deprecated +def simple_evaluate( + model, + model_args: Optional[Union[str, dict]] = None, + tasks: Optional[List[Union[str, dict, object]]] = None, + num_fewshot: Optional[int] = None, + batch_size: Optional[int] = None, + max_batch_size: Optional[int] = None, + device: Optional[str] = None, + use_cache: Optional[str] = None, + cache_requests: bool = False, + rewrite_requests_cache: bool = False, + delete_requests_cache: bool = False, + limit: Optional[Union[int, float]] = None, + bootstrap_iters: int = 100000, + check_integrity: bool = False, + write_out: bool = False, + log_samples: bool = True, + evaluation_tracker: Optional[EvaluationTracker] = None, + system_instruction: Optional[str] = None, + apply_chat_template: bool = False, + fewshot_as_multiturn: bool = False, + gen_kwargs: Optional[str] = None, + task_manager: Optional[TaskManager] = None, + verbosity: str = "INFO", + predict_only: bool = False, + random_seed: int = 0, + numpy_random_seed: int = 1234, + torch_random_seed: int = 1234, + fewshot_random_seed: int = 1234, +): + """Instantiate and evaluate a model on a list of tasks. + + :param model: Union[str, LM] + Name of model or LM object, see lm_eval.models.get_model + :param model_args: Optional[str, dict] + String or dict arguments for each model class, see LM.create_from_arg_string and LM.create_from_arg_object. + Ignored if `model` argument is a LM object. + :param tasks: list[Union[str, dict, Task]] + List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise. + :param num_fewshot: int + Number of examples in few-shot context + :param batch_size: int or str, optional + Batch size for model + :param max_batch_size: int, optional + Maximal batch size to try with automatic batch size detection + :param device: str, optional + PyTorch device (e.g. "cpu" or "cuda:0") for running models + :param use_cache: str, optional + A path to a sqlite db file for caching model responses. `None` if not caching. + :param cache_requests: bool, optional + Speed up evaluation by caching the building of dataset requests. `None` if not caching. + :param rewrite_requests_cache: bool, optional + Rewrites all of the request cache if set to `True`. `None` if not desired. + :param delete_requests_cache: bool, optional + Deletes all of the request cache if set to `True`. `None` if not desired. + :param limit: int or float, optional + Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples. + :param bootstrap_iters: + Number of iterations for bootstrap statistics, used when calculating stderrs. set to 0 for no stderr calculations to be performed. + :param check_integrity: bool + Whether to run the relevant part of the test suite for the tasks + :param write_out: bool + If True, write out an example document and model input for checking task integrity + :param log_samples: bool + If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis + :param system_instruction: str + System instruction to be applied to the prompt + :param apply_chat_template: bool + If True, apply chat template to the prompt + :param fewshot_as_multiturn: bool + Whether to provide the fewshot examples as a multiturn conversation or a single user turn. + :param gen_kwargs: str + String arguments for model generation + Ignored for all tasks with loglikelihood output_type + :param predict_only: bool + If true only model outputs will be generated and returned. Metrics will not be evaluated + :param random_seed: int + Random seed for python's random module. If set to None, the seed will not be set. + :param numpy_random_seed: int + Random seed for numpy. If set to None, the seed will not be set. + :param torch_random_seed: int + Random seed for torch. If set to None, the seed will not be set. + :param fewshot_random_seed: int + Random seed for fewshot sampler random generator. If set to None, the seed of generator will be set to None. + + :return + Dictionary of results + """ + eval_logger.setLevel(getattr(logging, f"{verbosity}")) + start_date = time.time() + + if delete_requests_cache: + eval_logger.info("Deleting requests cache...") + delete_cache() + + seed_message = [] + if random_seed is not None: + # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1412 + seed_message.append(f"Setting random seed to {random_seed}") + random.seed(random_seed) + + if numpy_random_seed is not None: + seed_message.append(f"Setting numpy seed to {numpy_random_seed}") + np.random.seed(numpy_random_seed) + + if torch_random_seed is not None: + seed_message.append(f"Setting torch manual seed to {torch_random_seed}") + torch.manual_seed(torch_random_seed) + + if seed_message: + eval_logger.info(" | ".join(seed_message)) + + if tasks is None: + tasks = [] + if len(tasks) == 0: + raise ValueError( + "No tasks specified, or no tasks found. Please verify the task names." + ) + + if gen_kwargs is not None: + gen_kwargs = simple_parse_args_string(gen_kwargs) + eval_logger.warning( + "generation_kwargs specified through cli, these settings will update set parameters in yaml tasks. " + "Ensure 'do_sample=True' for non-greedy decoding!" + ) + if gen_kwargs == "": + gen_kwargs = None + + if isinstance(model, str): + if model_args is None: + eval_logger.warning("model_args not specified. Using defaults.") + model_args = "" + + if isinstance(model_args, dict): + eval_logger.info( + f"Initializing {model} model, with arguments: {model_args}" + ) + lm = lm_eval.api.registry.get_model(model).create_from_arg_obj( + model_args, + { + "batch_size": batch_size, + "max_batch_size": max_batch_size, + "device": device, + }, + ) + + else: + eval_logger.info( + f"Initializing {model} model, with arguments: {simple_parse_args_string(model_args)}" + ) + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + model_args, + { + "batch_size": batch_size, + "max_batch_size": max_batch_size, + "device": device, + }, + ) + else: + if not isinstance(model, lm_eval.api.model.LM): + raise TypeError + eval_logger.info("Using pre-initialized model") + lm = model + + if use_cache is not None: + eval_logger.info(f"Using cache at {use_cache + '_rank' + str(lm.rank) + '.db'}") + lm = lm_eval.api.model.CachingLM( + lm, + use_cache + # each rank receives a different cache db. + # necessary to avoid multiple writes to cache at once + + "_rank" + + str(lm.rank) + + ".db", + ) + + if task_manager is None: + task_manager = TaskManager(verbosity) + + task_dict = get_task_dict(tasks, task_manager) + for task_name in task_dict.keys(): + task_obj = task_dict[task_name] + if isinstance(task_obj, tuple): + _, task_obj = task_obj + if task_obj is None: + continue + + if task_obj.get_config("output_type") == "generate_until": + if gen_kwargs is not None: + task_obj.set_config( + key="generation_kwargs", value=gen_kwargs, update=True + ) + + if predict_only: + log_samples = True + eval_logger.info( + f"Processing {task_name} in output-only mode. Metrics will not be calculated!" + ) + # we have to change the class properties post-hoc. This is pretty hacky. + task_obj.override_metric(metric_name="bypass") + + # override tasks' fewshot values to the provided num_fewshot arg value + # except if tasks have it set to 0 manually in their configs--then we should never overwrite that + if num_fewshot is not None: + if (default_num_fewshot := task_obj.get_config("num_fewshot")) == 0: + eval_logger.info( + f"num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored." + ) + else: + eval_logger.warning( + f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}" + ) + task_obj.set_config(key="num_fewshot", value=num_fewshot) + else: + # if num_fewshot not provided, and the task does not define a default one, default to 0 + if (default_num_fewshot := task_obj.get_config("num_fewshot")) is None: + task_obj.set_config(key="num_fewshot", value=0) + # fewshot_random_seed set for tasks, even with a default num_fewshot (e.g. in the YAML file) + task_obj.set_fewshot_seed(seed=fewshot_random_seed) + eval_logger.info( + f"Setting fewshot random generator seed to {fewshot_random_seed}" + ) + + if check_integrity: + run_task_tests(task_list=tasks) + + if evaluation_tracker is not None: + evaluation_tracker.general_config_tracker.log_experiment_args( + model_source=model, + model_args=model_args, + system_instruction=system_instruction, + chat_template=lm.chat_template if apply_chat_template else None, + fewshot_as_multiturn=fewshot_as_multiturn, + ) + + results = evaluate( + lm=lm, + task_dict=task_dict, + limit=limit, + cache_requests=cache_requests, + rewrite_requests_cache=rewrite_requests_cache, + bootstrap_iters=bootstrap_iters, + write_out=write_out, + log_samples=log_samples, + system_instruction=system_instruction, + apply_chat_template=apply_chat_template, + fewshot_as_multiturn=fewshot_as_multiturn, + verbosity=verbosity, + ) + + if lm.rank == 0: + if isinstance(model, str): + model_name = model + elif hasattr(model, "config") and hasattr(model.config, "_name_or_path"): + model_name = model.config._name_or_path + else: + model_name = type(model).__name__ + + # add info about the model and few shot config + results["config"] = { + "model": model_name, + "model_args": model_args, + } + # add more detailed model info if available + if isinstance(lm, lm_eval.models.huggingface.HFLM): + results["config"].update(lm.get_model_info()) + # add info about execution + results["config"].update( + { + "batch_size": batch_size, + "batch_sizes": ( + list(lm.batch_sizes.values()) if hasattr(lm, "batch_sizes") else [] + ), + "device": device, + "use_cache": use_cache, + "limit": limit, + "bootstrap_iters": bootstrap_iters, + "gen_kwargs": gen_kwargs, + "random_seed": random_seed, + "numpy_seed": numpy_random_seed, + "torch_seed": torch_random_seed, + "fewshot_seed": fewshot_random_seed, + } + ) + results["git_hash"] = get_git_commit_hash() + results["date"] = start_date + add_env_info(results) # additional environment info to results + add_tokenizer_info(results, lm) # additional info about tokenizer + return results + else: + return None + + +@positional_deprecated +def evaluate( + lm: "LM", + task_dict, + limit: Optional[int] = None, + cache_requests: bool = False, + rewrite_requests_cache: bool = False, + bootstrap_iters: Optional[int] = 100000, + write_out: bool = False, + log_samples: bool = True, + system_instruction: Optional[str] = None, + apply_chat_template: bool = False, + fewshot_as_multiturn: bool = False, + verbosity: str = "INFO", +): + """Instantiate and evaluate a model on a list of tasks. + + :param lm: obj + Language Model + :param task_dict: dict[str, Task] + Dictionary of tasks. Tasks will be taken to have name type(task).config.task . + :param limit: int, optional + Limit the number of examples per task (only use this for testing) + :param bootstrap_iters: + Number of iterations for bootstrap statistics, used when calculating stderr. Set to 0 for skipping all stderr calculations. + :param write_out: bool + If True, write out an example document and model input for checking task integrity + :param log_samples: bool + If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis + :param system_instruction: str + System instruction to be applied to the prompt + :param apply_chat_template: bool + If True, apply chat template to the prompt + :param fewshot_as_multiturn: bool + Whether to provide the fewshot examples as a multiturn conversation or a single user turn. + :return + Dictionary of results + """ + + eval_logger.setLevel(getattr(logging, f"{verbosity}")) + + # tracks all Instances/requests a model must generate output on. + requests = defaultdict(list) + # stores the amount to pad out reqs per req. type so that + # number of fwd passes per distributed rank is equal + padding_requests = defaultdict(int) + + # get lists of group hierarchy and each type of request + task_hierarchy, eval_tasks = get_task_list(task_dict) + if not log_samples: + if not all( + "bypass" not in getattr(task_output.task, "_metric_fn_list", {}).keys() + for task_output in eval_tasks + ): + raise ValueError("log_samples must be True for 'bypass' metric-only tasks") + for task_output in eval_tasks: + task: Task = task_output.task + limit = get_sample_size(task, limit) + task.build_all_requests( + limit=limit, + rank=lm.rank, + world_size=lm.world_size, + cache_requests=cache_requests, + rewrite_requests_cache=rewrite_requests_cache, + system_instruction=system_instruction, + apply_chat_template=apply_chat_template, + fewshot_as_multiturn=fewshot_as_multiturn, + chat_template=getattr(lm, "apply_chat_template") + if apply_chat_template + else None, + tokenizer_name=getattr(lm, "tokenizer_name", "") + if apply_chat_template + else "", + ) + eval_logger.debug( + f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}" + ) + if write_out: + print_writeout(task) + # aggregate Instances by LM method requested to get output. + for instance in task.instances: + reqtype = instance.request_type + requests[reqtype].append(instance) + + if lm.world_size > 1: + instances_rnk = torch.tensor(len(task._instances), device=lm.device) + gathered_item = ( + lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist() + ) + # "multiple_choice" task types dispatch (several) "loglikelihood" request types + reqtype = ( + "loglikelihood" + if task.OUTPUT_TYPE == "multiple_choice" + else task.OUTPUT_TYPE + ) + # compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks) + numpad = max(gathered_item) - gathered_item[lm.rank] + # todo: may not account for padding in cases like SquadV2 which has multiple req types + padding_requests[reqtype] += numpad + + ### Run LM on inputs, get all outputs ### + # execute each type of request + for reqtype, reqs in requests.items(): + eval_logger.info(f"Running {reqtype} requests") + # create `K` copies of each request `req` based off `K = req.repeats` + cloned_reqs = [] + for req in reqs: + cloned_reqs.extend([req] * req.repeats) + + if (lm.world_size > 1) and (padding_requests[reqtype] > 0): + for _ in range(padding_requests[reqtype]): + cloned_reqs.extend([req] * req.repeats) + + # run requests through model + resps = getattr(lm, reqtype)(cloned_reqs) + + # put responses from model into a list of length K for each request. + for x, req in zip(resps, cloned_reqs): + req.resps.append(x) + + if lm.world_size > 1: + lm.accelerator.wait_for_everyone() + + RANK = lm.rank + WORLD_SIZE = lm.world_size + ### Postprocess outputs ### + # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately) + for task_output in eval_tasks: + task = task_output.task + task.apply_filters() + + ### Collect values of metrics on all datapoints ### + # # unpack results and sort back in order and return control to Task + # TODO: make it possible to use a different metric per filter + # Pre-process task.instances to group by doc_id + instances_by_doc_id = defaultdict(list) + for instance in task.instances: + instances_by_doc_id[instance.doc_id].append(instance) + # Sort instances within each group + for instances in instances_by_doc_id.values(): + instances.sort(key=lambda x: x.idx) + # iterate over different filters used + for filter_key in task.instances[0].filtered_resps.keys(): + doc_iterator = task.doc_iterator( + rank=RANK, limit=limit, world_size=WORLD_SIZE + ) + for doc_id, doc in doc_iterator: + requests = instances_by_doc_id[doc_id] + metrics = task.process_results( + doc, [req.filtered_resps[filter_key] for req in requests] + ) + if log_samples: + target = task.doc_to_target(doc) + example = { + "doc_id": doc_id, + "doc": doc, + "target": target, + "arguments": [req.args for req in requests], + "resps": [req.resps for req in requests], + "filtered_resps": [ + req.filtered_resps[filter_key] for req in requests + ], + "doc_hash": hash_string( + json.dumps( + requests[0].doc, + indent=2, + default=handle_non_serializable, + ensure_ascii=False, + ) + ), + "prompt_hash": hash_string(requests[0].arguments[0]), + "target_hash": hash_string(str(target)), + } + example.update(metrics) + task_output.logged_samples.append(example) + for metric, value in metrics.items(): + task_output.sample_metrics[(metric, filter_key)].append(value) + + if WORLD_SIZE > 1: + # if multigpu, then gather data across all ranks to rank 0 + # first gather logged samples across all ranks + for task_output in eval_tasks: + if log_samples: + # for task_name, task_samples in list(samples.items()): + full_samples = [None] * WORLD_SIZE if RANK == 0 else None + torch.distributed.gather_object( + obj=task_output.logged_samples, + object_gather_list=full_samples, + dst=0, + ) + + if RANK == 0: + task_output.logged_samples = list( + itertools.chain.from_iterable(full_samples) + ) + + # then collect metrics across all ranks + for metrics in task_output.sample_metrics: + metric_list = [None] * WORLD_SIZE if RANK == 0 else None + torch.distributed.gather_object( + obj=task_output.sample_metrics[metrics], + object_gather_list=metric_list, + dst=0, + ) + if RANK == 0: + task_output.sample_metrics[metrics] = list( + itertools.chain.from_iterable(metric_list) + ) + + if RANK == 0: + ### Aggregate results over all datapoints ### + # aggregate results ; run bootstrap CIs + for task_output in eval_tasks: + task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters) + ( + results, + samples, + configs, + versions, + num_fewshot, + higher_is_better, + ) = consolidate_results(eval_tasks) + + ### Calculate group metrics ### + if bool(results): + for group, task_list in reversed(task_hierarchy.items()): + if len(task_list) == 0: + # task_hierarchy entries are either + # `group_name: [subtask1, subtask2, ...]` + # or `task_name: []`. + # we only want to operate on groups here. + continue + + # collect all higher_is_better values for metrics + # in the group's subtasks. + # TODO: clean this up ; unify with the below metric_list loop? + _higher_is_better = {} + for task in task_list: + for m, h in higher_is_better[task].items(): + if m not in _higher_is_better.keys(): + _higher_is_better[m] = h + if ( + m in _higher_is_better + and _higher_is_better[m] is not None + and _higher_is_better[m] != h + ): + eval_logger.warning( + f"Higher_is_better values for metric {m} in group {group} are not consistent. Defaulting to None." + ) + _higher_is_better[m] = None + higher_is_better[group] = _higher_is_better + + # collect all metric keys used by a subtask in the group. + metric_list = list( + { + key + for task in task_list + for key in results[task].keys() + if "_stderr" not in key and key not in ["alias", "samples"] + } + ) + for metric in metric_list: + stderr = "_stderr,".join(metric.split(",")) + + # gather metrics, sizes, and stderrs from subtasks + metrics = [ + results[task][metric] + for task in task_list + if metric in results[task] + ] # TODO: copy? + stderrs = [ + results[task][stderr] + for task in task_list + if stderr in results[task] + ] + sizes = [ + results[task]["samples"] + for task in task_list + if metric in results[task] + ] + + # compute group's pooled metric and stderr + results[group][metric] = ( + lm_eval.api.metrics.aggregate_subtask_metrics(metrics, sizes) + ) + # TODO: calculate grouped metric using aggregation fn + if "N/A" in stderrs: + results[group][stderr] = "N/A" + else: + results[group][stderr] = ( + lm_eval.api.metrics.pooled_sample_stderr(stderrs, sizes) + ) + # TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility + # To use the old (likely incorrect) variance formula, comment out the above and uncomment this line: + # results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, sizes, metrics=metrics) + + results[group]["samples"] = sum(sizes) + + results_agg = defaultdict(dict) + groups_agg = defaultdict(dict) + all_tasks_list = list(task_hierarchy.keys()) + while True: + add_tasks_list = list(k for k in results_agg.keys()) + left_tasks_list = sorted(list(set(all_tasks_list) - set(add_tasks_list))) + if len(left_tasks_list) == 0: + break + + _task_hierarchy = { + k: v for k, v in task_hierarchy.items() if k in left_tasks_list + } + _results_agg, _groups_agg = prepare_print_tasks(_task_hierarchy, results) + + results_agg = {**results_agg, **_results_agg} + groups_agg = {**groups_agg, **_groups_agg} + + for group_name, task_list in task_hierarchy.items(): + if task_list: + num_fewshot[group_name] = num_fewshot[ + task_list[0] + ] # TODO: validate this + + results_dict = { + "results": dict(results_agg.items()), + **({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}), + "group_subtasks": dict(reversed(task_hierarchy.items())), + "configs": dict(sorted(configs.items())), + "versions": dict(sorted(versions.items())), + "n-shot": dict(sorted(num_fewshot.items())), + "higher_is_better": dict(sorted(higher_is_better.items())), + "n-samples": { + task_output.task_name: { + "original": len(task_output.task.eval_docs), + "effective": min( + limit if limit else len(task_output.task.eval_docs), + len(task_output.task.eval_docs), + ), + } + for task_output in eval_tasks + }, + } + if log_samples: + results_dict["samples"] = dict(samples) + + return results_dict + + else: + return None + + +def request_caching_arg_to_dict(cache_requests: str) -> dict: + request_caching_args = { + "cache_requests": cache_requests in {"true", "refresh"}, + "rewrite_requests_cache": cache_requests == "refresh", + "delete_requests_cache": cache_requests == "delete", + } + + return request_caching_args diff --git a/venv/lib/python3.10/site-packages/lm_eval/evaluator_utils.py b/venv/lib/python3.10/site-packages/lm_eval/evaluator_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a773c92ea7e1ca6cb456db8d7d63b41e07a69d9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/evaluator_utils.py @@ -0,0 +1,322 @@ +import collections +import math +import pathlib +import sys +from typing import Dict, List, Optional, Tuple, Union + +from lm_eval.api import metrics +from lm_eval.utils import eval_logger, positional_deprecated + + +class TaskOutput: + """ + Wrapper class for Task outputs.It contains various attributes and methods to manage and calculate metrics for the task. + + Attributes: + task (object): The task object. + task_name (str): The name of the task. + task_config (dict): The configuration of the task. + version (str): The version of the task. + group_name (str): The name of the task group. + n_shot (int): The number of shots for the task. + task_alias (str): The alias of the task. + group_alias (str): The alias of the task group. + is_group (bool): Indicates if the task is a group. + logged_samples (list): The list of logged samples. + sample_len (int): The length of the samples. + sample_metrics (defaultdict): The dictionary of samples' metrics. + agg_metrics (defaultdict): The dictionary of aggregate metrics. + + Methods: + from_taskdict(cls, task_name: str, task): + Creates a TaskOutput instance from a task dictionary. + + calculate_aggregate_metric(bootstrap_iters=100000) -> None: + Calculates the aggregate metrics for the task. + """ + + def __init__( + self, + task=None, + task_name=None, + task_config=None, + version=None, + group_name=None, + n_shot=None, + task_alias=None, + group_alias=None, + is_group=None, + ): + self.task = task + self.task_config = task_config + self.task_name = task_name + self.group_name = group_name + self.version = version + self.n_shot = n_shot + self.task_alias = task_alias + self.group_alias = group_alias + self.is_group = is_group + self.logged_samples = [] + self.sample_len = None + self.sample_metrics = collections.defaultdict(list) + self.agg_metrics = collections.defaultdict(list) + + @classmethod + def from_taskdict(cls, task_name: str, task): + if isinstance(task, tuple): + group_name, task = task + else: + group_name = None + if not task: + # these gets filtered out in get_task_list + # once they are added to group hierarchy + is_group = True + return cls( + task=task, task_name=task_name, is_group=is_group, group_name=group_name + ) + version = task.VERSION + task_config = dict(task.dump_config()) + if (n_shot := task_config.get("num_fewshot")) == 0: + n_shot = task_config.get("metadata", {}).get("num_fewshot", 0) + task_alias = task_config.get("alias") + group_alias = task_config.get("group_alias") + return cls( + task=task, + task_name=task_name, + task_config=task_config, + group_name=group_name, + version=version, + n_shot=n_shot, + task_alias=task_alias, + group_alias=group_alias, + ) + + def calculate_aggregate_metric(self, bootstrap_iters=100000) -> None: + for (metric, filter_key), items in self.sample_metrics.items(): + agg_fn = self.task.aggregation()[metric] + metric_key = f"{metric},{filter_key}" + self.agg_metrics[metric_key] = agg_fn(items) + self.sample_len = len(items) # TODO: same sample size for each metric? + if isinstance(bootstrap_iters, int): + stderr_fn = metrics.stderr_for_metric( + metric=agg_fn, + bootstrap_iters=min(bootstrap_iters, 100) + if metric in ["bleu", "chrf", "ter"] + else bootstrap_iters, + ) + self.agg_metrics[f"{metric}_stderr,{filter_key}"] = ( + stderr_fn(items) if (stderr_fn and len(items) > 1) else "N/A" + ) + else: + raise ValueError( + f"Received bootstrap_iters '{bootstrap_iters}' but expected an integer. Set to 0 to turn off stderr calculations." + ) + + def __repr__(self): + return ( + f"TaskOutput(task_name={self.task_name}, " + f"group_name={self.group_name}, " + f"version={self.version}," + f"n_shot={self.n_shot}" + f"task_alias={self.task_alias}, group_alias={self.group_alias})" + ) + + +def get_task_list(task_dict: dict) -> Tuple[Dict[str, list], List[TaskOutput]]: + task_hierarchy = collections.defaultdict(list) + outputs = list(TaskOutput.from_taskdict(x, y) for x, y in task_dict.items()) + for task_output in outputs: + if group_name := task_output.group_name: + task_hierarchy[group_name].append(task_output.task_name) + else: + task_hierarchy[task_output.task_name] = [] + # returns task_hierarchy tracking which groups contain which subtasks, + # and a list of TaskOutput classes for each non-group subtask + return task_hierarchy, [x for x in outputs if x.task] + + +def print_writeout(task) -> None: + for inst in task.instances: + # print the prompt for the first few documents + if inst.doc_id < 1: + eval_logger.info( + f"Task: {task}; document {inst.doc_id}; context prompt (starting on next line):\ + \n{inst.args[0]}\n(end of prompt on previous line)\ntarget string or answer choice index (starting on next line):\n{task.doc_to_target(inst.doc)}\n(end of target on previous line)" + ) + eval_logger.info(f"Request: {str(inst)}") + + +def get_sample_size(task, limit: Optional[int]) -> Union[int, None]: + if limit is not None: + limit = ( + int(math.ceil(len(task.eval_docs) * limit)) if limit < 1.0 else int(limit) + ) + return limit + + +def prepare_print_tasks( + task_hierarchy: dict, results: dict, tab=0 +) -> Tuple[dict, dict]: + """ + @param task_hierarchy: Dictionary representing the group hierarchy of tasks. Each key is a group name and its + value is a list of task names. + @param results: Dictionary containing the results of each task. Each key is a + group name and its value is a dictionary of task results. + @param tab: The indentation level for printing the task + hierarchy. Default is 0. + @return: A tuple of two dictionaries: results_agg and groups_agg. results_agg contains + aggregated results for each task, and groups_agg contains aggregated results for each group. + + Prepares the task hierarchy and aggregates the results for each task and group recursively for printing. + """ + results_agg = collections.defaultdict(dict) + groups_agg = collections.defaultdict(dict) + + (group_name, task_list), *_ = task_hierarchy.items() + task_list = sorted(task_list) + + results_agg[group_name] = results[group_name].copy() + # results_agg[group_name]["tab"] = tab + if "samples" in results_agg[group_name]: + results_agg[group_name].pop("samples") + + tab_string = " " * tab + "- " if tab > 0 else "" + + if "alias" in results_agg[group_name]: + results_agg[group_name]["alias"] = tab_string + results_agg[group_name]["alias"] + else: + results_agg[group_name]["alias"] = tab_string + group_name + + if len(task_list) > 0: + groups_agg[group_name] = results[group_name].copy() + # groups_agg[group_name]["tab"] = tab + if "samples" in groups_agg[group_name]: + groups_agg[group_name].pop("samples") + + if "alias" in groups_agg[group_name]: + groups_agg[group_name]["alias"] = ( + tab_string + groups_agg[group_name]["alias"] + ) + else: + groups_agg[group_name]["alias"] = tab_string + group_name + + for task_name in task_list: + if task_name in task_hierarchy: + _task_hierarchy = { + **{task_name: task_hierarchy[task_name]}, + **task_hierarchy, + } + else: + _task_hierarchy = { + **{task_name: []}, + **task_hierarchy, + } + + _results_agg, _groups_agg = prepare_print_tasks( + _task_hierarchy, results, tab + 1 + ) + results_agg = {**results_agg, **_results_agg} + groups_agg = {**groups_agg, **_groups_agg} + + return results_agg, groups_agg + + +def consolidate_results( + eval_tasks: List[TaskOutput], +) -> Tuple[dict, dict, dict, dict, dict, dict]: + """ + @param eval_tasks: list(TaskOutput). + @return: A tuple containing the consolidated results, samples, configs, versions, and num_fewshot. + + Consolidates the results of multiple evaluation tasks into a single structure. + + The method iterates over each evaluation instance and extracts relevant information to create the consolidated + results structure. The consolidated results structure has the following properties: + + - results: A defaultdict with task names as keys and dictionaries as values. Each dictionary contains + metric/filter pairs as keys and corresponding metric values as values. The "alias" key is used to store task + aliases specified in the task configuration. + - samples: A defaultdict with task names as keys and lists of log samples as values. + - configs: A defaultdict with task names as keys and task configurations as values. + - versions: A defaultdict with task names as keys and task versions as values. + - num_fewshot: A defaultdict with task names as keys and number of few-shot samples as values. + - higher_is_better: A defaultdict with task names as keys and indicators of whether higher values are better + for each metric as values. + + The method then returns the consolidated results, samples, configs, versions, and num_fewshot as a tuple. + """ + # stores the final result for each task, for each metric/filter pair. + results = collections.defaultdict(dict) + # logs info about each document evaluated. + samples = collections.defaultdict(list) + # store num-fewshot value per task + num_fewshot = collections.defaultdict(int) + # Tracks the YAML configs of all chosen task + configs = collections.defaultdict(dict) + # Tracks each task's version. + versions = collections.defaultdict(dict) + # Track `higher_is_better` for each metric + higher_is_better = collections.defaultdict(dict) + + for task_output in eval_tasks: + if "task_alias" in (task_config := task_output.task_config): + results[task_output.task_name]["alias"] = task_config["task_alias"] + if group_alias := task_output.group_alias: + if group_alias not in results and (group_name := task_output.group_name): + results[group_name]["alias"] = group_alias + num_fewshot[task_output.task_name] = task_output.n_shot + configs[task_output.task_name] = task_output.task_config + versions[task_output.task_name] = task_output.version + samples[task_output.task_name] = task_output.logged_samples + higher_is_better[task_output.task_name] = task_output.task.higher_is_better() + for (metric, filter_key), items in task_output.sample_metrics.items(): + metric_key = f"{metric},{filter_key}" + results[task_output.task_name][metric_key] = task_output.agg_metrics[ + metric_key + ] + results[task_output.task_name]["samples"] = task_output.sample_len + results[task_output.task_name][f"{metric}_stderr,{filter_key}"] = ( + task_output.agg_metrics[f"{metric}_stderr,{filter_key}"] + ) + return results, samples, configs, versions, num_fewshot, higher_is_better + + +@positional_deprecated +def find_test_root(start_path: pathlib.Path) -> pathlib.Path: + """ + Search upward in the directory tree to a maximum of three layers + to find and return the package root (containing the 'tests' folder) + """ + cur_path = start_path.resolve() + max_layers = 3 + for _ in range(max_layers): + if (cur_path / "tests" / "test_version_stable.py").exists(): + return cur_path + else: + cur_path = cur_path.parent.resolve() + raise FileNotFoundError( + f"Unable to find package root within {max_layers} upwards" + f"of {start_path}" + ) + + +@positional_deprecated +def run_task_tests(task_list: List[str]): + """ + Find the package root and run the tests for the given tasks + """ + import pytest + + package_root = find_test_root(start_path=pathlib.Path(__file__)) + task_string = " or ".join(task_list) + args = [ + f"{package_root}/tests/test_version_stable.py", + f"--rootdir={package_root}", + "-k", + f"{task_string}", + ] + sys.path.append(str(package_root)) + pytest_return_val = pytest.main(args) + if pytest_return_val: + raise ValueError( + f"Not all tests for the specified tasks ({task_list}) ran successfully! Error code: {pytest_return_val}" + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/filters/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46fa4acd4cc4f06d1f62f25840b3c4d9ffc92b7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/filters/__init__.py @@ -0,0 +1,25 @@ +from functools import partial +from typing import List + +from lm_eval.api.filter import FilterEnsemble +from lm_eval.api.registry import get_filter + +from . import extraction, selection, transformation + + +def build_filter_ensemble( + filter_name: str, components: List[List[str]] +) -> FilterEnsemble: + """ + Create a filtering pipeline. + """ + filters = [] + for function, kwargs in components: + if kwargs is None: + kwargs = {} + # create a filter given its name in the registry + f = partial(get_filter(function), **kwargs) + # add the filter as a pipeline step + filters.append(f) + + return FilterEnsemble(name=filter_name, filters=filters) diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..839f99c92e6f8096211ecd651db448e64858dc6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73680c16d4a05946c3fae1ccc63e03e8554454f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/decontamination.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/decontamination.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da8578e9cacc085458baa262734895b1744b9315 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/decontamination.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/extraction.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/extraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..867e579a04a1439d1163dd18e3010ae6a7d008fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/extraction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/extraction.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/extraction.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..626e25c0d3b176f8bc68c00085c7309554f31648 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/extraction.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/selection.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/selection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c2084d2d463b260150516816d1b0422be986202 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/selection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/selection.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/selection.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6266f295fd7c75ae034e62953b84027f2a27e35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/selection.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/transformation.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/transformation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4949eaaa603e6f9cdbed3c102610d93ea532648d Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/transformation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/transformation.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/transformation.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d41c5f1666ab6ab0c7e51fba2b6d325824148805 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/filters/__pycache__/transformation.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/decontamination.py b/venv/lib/python3.10/site-packages/lm_eval/filters/decontamination.py new file mode 100644 index 0000000000000000000000000000000000000000..4eda4e022445355f191926790b2edf8f0cfa4bbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/filters/decontamination.py @@ -0,0 +1,25 @@ +from lm_eval.api.filter import Filter +from lm_eval.api.registry import register_filter + + +@register_filter("decontaminate") +class DecontaminationFilter(Filter): + """ + A filter which evaluates + """ + + name = "track_decontamination" + + def __init__(self, path) -> None: + """ + + TODO: make sure only ever run one time on the train set (should this be cached as a class var? keyed by value for "path"). + should further cache result on a given (task_name, doc_id) + """ + self._decontam_results = None + + def apply(self, resps, docs) -> None: + """ + Return {"no_contamination", "only_contamination"} keys for the 2 different subsets + """ + pass diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/extraction.py b/venv/lib/python3.10/site-packages/lm_eval/filters/extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..9230808a6678e652821889f8060af5a441c221f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/filters/extraction.py @@ -0,0 +1,187 @@ +import re +import sys +import unicodedata + +from lm_eval.api.filter import Filter +from lm_eval.api.registry import register_filter + + +@register_filter("regex") +class RegexFilter(Filter): + """ """ + + def __init__( + self, + regex_pattern: str = r"#### (\-?[0-9\.\,]+)", + group_select=0, + fallback: str = "[invalid]", + ) -> None: + """ + pass a string `regex` to run `re.compile(r"regex")` on. + `fallback` defines the output returned if no matches for the regex are located. + """ + self.regex_pattern = regex_pattern + self.regex = re.compile(regex_pattern) + self.group_select = group_select + self.fallback = fallback + + def apply(self, resps, docs): + # here, we assume we have a list, in which each element is + # a list of model responses for some particular input/target pair. + # so we process each of these (same input/target response sets) + # independently (and keep them a list.) + def filter_set(inst): + filtered = [] + for resp in inst: + match = self.regex.findall(resp) + if match: + match = match[self.group_select] + if isinstance(match, tuple): + match = [m for m in match if m][0] + match = match.strip() + else: + match = self.fallback + filtered.append(match) + return filtered + + # print(resps) + filtered_resps = list(map(lambda x: filter_set(x), resps)) + # print(filtered_resps) + + return filtered_resps + + +@register_filter("remove_whitespace") +class WhitespaceFilter(Filter): + """ """ + + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + filtered_resp = [] + for resp in inst: + if resp.startswith(" "): + resp = resp[1:] + + filtered_resp.append(resp) + + return filtered_resp + + filtered_resps = [filter_set(resp) for resp in resps] + + return filtered_resps + + +@register_filter("multi_choice_regex") +class MultiChoiceRegexFilter(RegexFilter): + """ + A filter used to extract a model's answer on multiple choice questions with + letter answers. assumes each document has a "choices" field + containing the list of answer choices and that the answer label symbols + are of the form (A), (B), (C), ... or A, B, C. + """ + + def __init__( + self, + regex_pattern: str = r"#### (\-?[0-9\.\,]+)", + group_select=0, + fallback: str = "[invalid]", + ignore_case=False, + ignore_punctuation=False, + regexes_to_ignore=None, + ) -> None: + """ + regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure + - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response. + - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices. + group_select: Selects the (group_select)th match from the findall result. + ignore_case: Ignores the case during step 1 matching + ignore_punctuation: Remove the punctuation during step 1 matching + regexes_to_ignore: Remove these regexes during step 1 matching + """ + super().__init__(regex_pattern, group_select, fallback) + self.ignore_case = ignore_case + self.ignore_punctuation = ignore_punctuation + self.regexes_to_ignore = regexes_to_ignore + + def apply(self, resps, docs): + # here, we assume we have a list, in which each element is + # a list of model responses for some particular input/target pair. + # so we process each of these (same input/target response sets) + # independently (and keep them a list.) + + def find_match(regex, resp, convert_dict={}): + match = regex.findall(resp) + if match: + match = match[self.group_select] + if isinstance(match, tuple): + match = [m for m in match if m][0] + match = match.strip() + if match and match in convert_dict: + match = convert_dict[match] + return match + + punct_tbl = dict.fromkeys( + i + for i in range(sys.maxunicode) + if unicodedata.category(chr(i)).startswith("P") + ) + + def filter_ignores(st): + if self.regexes_to_ignore is not None: + for s in self.regexes_to_ignore: + st = re.sub(s, "", st) + + if self.ignore_case: + st = st.lower() + + if self.ignore_punctuation: + # https://stackoverflow.com/a/266162 + st = st.translate(punct_tbl) + return st + + filtered_resps = [] + + for r, doc in zip(resps, docs): + fallback_regexes = [] + choice_to_alpha = {} + next_alpha = "A" + + without_paren_fallback_regexes = [] + without_paren_to_target = {} + + choices = doc["choices"] + for c in choices: + m = filter_ignores(c.strip()) + fallback_regexes.append(f"{re.escape(m)}") + choice_to_alpha[m] = f"({next_alpha})" + + without_paren_fallback_regexes.append(next_alpha) + without_paren_to_target[next_alpha] = f"({next_alpha})" + + next_alpha = chr(ord(next_alpha) + 1) + fallback_regex = re.compile("|".join(fallback_regexes)) + without_paren_fallback_regex = "|".join(without_paren_fallback_regexes) + without_paren_fallback_regex = re.compile( + f":[\s]*({without_paren_fallback_regex})" + ) + + filtered = [] + for resp in r: + match = find_match(self.regex, resp) + if not match: + match = find_match( + fallback_regex, filter_ignores(resp), choice_to_alpha + ) + if not match: + match = find_match( + without_paren_fallback_regex, resp, without_paren_to_target + ) + if not match: + match = self.fallback + filtered.append(match) + filtered_resps.append(filtered) + + return filtered_resps diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/selection.py b/venv/lib/python3.10/site-packages/lm_eval/filters/selection.py new file mode 100644 index 0000000000000000000000000000000000000000..6e368b5980626c8008ed48c45a360046660db13e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/filters/selection.py @@ -0,0 +1,61 @@ +from collections import Counter + +from lm_eval.api.filter import Filter +from lm_eval.api.registry import register_filter + + +# TODO: implement "arg_max" filter. either it should take in an arbitrary "scoring"/reward function +# that takes an input and returns a scalar and then should select the max reward, +# or should implement different filters for different ways of handling a reward model's inference. + + +@register_filter("take_first") +class TakeFirstFilter(Filter): + def __init__(self) -> None: + """ + Can define custom behavior here, if an individual instantiation of a Filter class should have state. + """ + + def apply(self, resps, docs): + """ + Assuming each entry of `resps` is a list of model responses, we discard all but the first response. + """ + return map(lambda r: r[0], resps) + + +@register_filter("take_first_k") +class TakeKFilter(Filter): + def __init__(self, **kwargs) -> None: + self.k = kwargs.pop("k") + + super().__init__(**kwargs) + + def apply(self, resps, docs): + # need resp to be subscriptable to check below + resps = list(resps) + # check we have at least k responses per doc, else we can't take the first k + assert ( + len(resps[0]) >= self.k + ), f"Need at least {self.k} responses per doc to take first {self.k}, but got {len(resps[0])} only! Please increase TaskConfig.repeats ." + return map(lambda r: r[: self.k], resps) + + +@register_filter("majority_vote") +class MajorityVoteFilter(Filter): + def __init__(self) -> None: + """ + Can define custom behavior here, if an individual instantiation of a Filter class should have state. + """ + + def apply(self, resps, docs): + """ + Each entry of `resps` is a list of model responses. + We select the response that occurs most frequently in each entry of `resps`. + """ + + def select_majority(resp): + counts = Counter(resp) + vote = counts.most_common(1)[0][0] + return vote + + return map(lambda r: [select_majority(r)], resps) diff --git a/venv/lib/python3.10/site-packages/lm_eval/filters/transformation.py b/venv/lib/python3.10/site-packages/lm_eval/filters/transformation.py new file mode 100644 index 0000000000000000000000000000000000000000..cac1c5921dafe74be0b8416bd3a0678dc1fa1570 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/filters/transformation.py @@ -0,0 +1,56 @@ +from lm_eval.api.filter import Filter +from lm_eval.api.registry import register_filter + + +@register_filter("lowercase") +class LowercaseFilter(Filter): + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + return [resp.lower() for resp in inst] + + return [filter_set(resp) for resp in resps] + + +@register_filter("uppercase") +class UppercaseFilter(Filter): + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + return [resp.upper() for resp in inst] + + return [filter_set(resp) for resp in resps] + + +@register_filter("map") +class MapFilter(Filter): + def __init__(self, mapping_dict: dict = None, default_value=None) -> None: + """ + Initializes the MapFilter with a given mapping dictionary and default value. + + Args: + - mapping_dict (dict): A dictionary containing the key-value mappings. + Default is an empty dictionary. + - default_value (Any): The value to be returned when a key is not found in the mapping_dict. + Default is None. + + Example: + mapper = MapFilter({'A': 1, 'B': 2}, default_value=0) + """ + if mapping_dict is None: + mapping_dict = {} + assert isinstance( + mapping_dict, dict + ), "Provided mapping_dict is not a dictionary" + self.mapping_dict = mapping_dict + self.default_value = default_value + + def apply(self, resps, docs): + def filter_set(inst): + return [self.mapping_dict.get(resp, self.default_value) for resp in inst] + + return [filter_set(resp) for resp in resps] diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/loggers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..02b7a6834c6486fde35ef02d715e90be3fba223a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/loggers/__init__.py @@ -0,0 +1,2 @@ +from .evaluation_tracker import EvaluationTracker +from .wandb_logger import WandbLogger diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e95af1129ebbab219baf1249b7c3a26b7a957ec2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2605aea2e6bacfc2bdca124e0ff92439bc686366 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/evaluation_tracker.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/evaluation_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c7138dfcbbb973c6d90b708fad50c2851b17294 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/evaluation_tracker.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/evaluation_tracker.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/evaluation_tracker.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7546e92b79d6ae12f9eb1953a7fd2375c78e8d08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/evaluation_tracker.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5897620948bbdf96a84dd181a0a68e40809eb906 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/utils.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..880821805081e9c4722e5d234894c49619829dbf Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/wandb_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/wandb_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e2195ffc0f8457cf2c30d480ecff192cdda7380 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/wandb_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/wandb_logger.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/wandb_logger.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89706e13bec1c50e1d2bf68241b9dcaae8fb333a Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/loggers/__pycache__/wandb_logger.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/evaluation_tracker.py b/venv/lib/python3.10/site-packages/lm_eval/loggers/evaluation_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..6aa7f25bef9454534855d75ed584f025bb4e6401 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/loggers/evaluation_tracker.py @@ -0,0 +1,522 @@ +import json +import re +import time +from collections import defaultdict +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path + +from datasets import load_dataset +from datasets.utils.metadata import MetadataConfigs +from huggingface_hub import ( + DatasetCard, + DatasetCardData, + HfApi, + hf_hub_url, +) + +from lm_eval.utils import ( + eval_logger, + get_file_datetime, + get_file_task_name, + get_results_filenames, + get_sample_results_filenames, + handle_non_serializable, + hash_string, + sanitize_list, + sanitize_model_name, + sanitize_task_name, +) + + +@dataclass(init=False) +class GeneralConfigTracker: + """ + Tracker for the evaluation parameters. + + Attributes: + model_source (str): Source of the model (e.g. Hugging Face, GGUF, etc.) + model_name (str): Name of the model. + model_name_sanitized (str): Sanitized model name for directory creation. + start_time (float): Start time of the experiment. Logged at class init. + end_time (float): Start time of the experiment. Logged when calling [`GeneralConfigTracker.log_end_time`] + total_evaluation_time_seconds (str): Inferred total evaluation time in seconds (from the start and end times). + """ + + model_source: str = None + model_name: str = None + model_name_sanitized: str = None + system_instruction: str = None + system_instruction_sha: str = None + fewshot_as_multiturn: bool = None + chat_template: str = None + chat_template_sha: str = None + start_time: float = None + end_time: float = None + total_evaluation_time_seconds: str = None + + def __init__(self) -> None: + """Starts the evaluation timer.""" + self.start_time = time.perf_counter() + + @staticmethod + def _get_model_name(model_args: str) -> str: + """Extracts the model name from the model arguments.""" + + def extract_model_name(model_args: str, key: str) -> str: + """Extracts the model name from the model arguments using a key.""" + args_after_key = model_args.split(key)[1] + return args_after_key.split(",")[0] + + # order does matter, e.g. peft and delta are provided together with pretrained + prefixes = ["peft=", "delta=", "pretrained=", "model=", "path=", "engine="] + for prefix in prefixes: + if prefix in model_args: + return extract_model_name(model_args, prefix) + return "" + + def log_experiment_args( + self, + model_source: str, + model_args: str, + system_instruction: str, + chat_template: str, + fewshot_as_multiturn: bool, + ) -> None: + """Logs model parameters and job ID.""" + self.model_source = model_source + self.model_name = GeneralConfigTracker._get_model_name(model_args) + self.model_name_sanitized = sanitize_model_name(self.model_name) + self.system_instruction = system_instruction + self.system_instruction_sha = ( + hash_string(system_instruction) if system_instruction else None + ) + self.chat_template = chat_template + self.chat_template_sha = hash_string(chat_template) if chat_template else None + self.fewshot_as_multiturn = fewshot_as_multiturn + + def log_end_time(self) -> None: + """Logs the end time of the evaluation and calculates the total evaluation time.""" + self.end_time = time.perf_counter() + self.total_evaluation_time_seconds = str(self.end_time - self.start_time) + + +class EvaluationTracker: + """ + Keeps track and saves relevant information of the evaluation process. + Compiles the data from trackers and writes it to files, which can be published to the Hugging Face hub if requested. + """ + + def __init__( + self, + output_path: str = None, + hub_results_org: str = "", + hub_repo_name: str = "", + push_results_to_hub: bool = False, + push_samples_to_hub: bool = False, + public_repo: bool = False, + token: str = "", + leaderboard_url: str = "", + point_of_contact: str = "", + ) -> None: + """ + Creates all the necessary loggers for evaluation tracking. + + Args: + output_path (str): Path to save the results. If not provided, the results won't be saved. + hub_results_org (str): The Hugging Face organization to push the results to. If not provided, the results will be pushed to the owner of the Hugging Face token. + hub_repo_name (str): The name of the Hugging Face repository to push the results to. If not provided, the results will be pushed to `lm-eval-results`. + push_results_to_hub (bool): Whether to push the results to the Hugging Face hub. + push_samples_to_hub (bool): Whether to push the samples to the Hugging Face hub. + public_repo (bool): Whether to push the results to a public or private repository. + token (str): Token to use when pushing to the Hugging Face hub. This token should have write access to `hub_results_org`. + leaderboard_url (str): URL to the leaderboard on the Hugging Face hub on the dataset card. + point_of_contact (str): Contact information on the Hugging Face hub dataset card. + """ + self.general_config_tracker = GeneralConfigTracker() + + self.output_path = output_path + self.push_results_to_hub = push_results_to_hub + self.push_samples_to_hub = push_samples_to_hub + self.public_repo = public_repo + self.leaderboard_url = leaderboard_url + self.point_of_contact = point_of_contact + self.api = HfApi(token=token) if token else None + + if not self.api and (push_results_to_hub or push_samples_to_hub): + raise ValueError( + "Hugging Face token is not defined, but 'push_results_to_hub' or 'push_samples_to_hub' is set to True. " + "Please provide a valid Hugging Face token by setting the HF_TOKEN environment variable." + ) + + if ( + self.api + and hub_results_org == "" + and (push_results_to_hub or push_samples_to_hub) + ): + hub_results_org = self.api.whoami()["name"] + eval_logger.warning( + f"hub_results_org was not specified. Results will be pushed to '{hub_results_org}'." + ) + + hub_repo_name = hub_repo_name if hub_repo_name else "lm-eval-results" + self.hub_results_repo = f"{hub_results_org}/{hub_repo_name}" + self.hub_results_repo_private = f"{hub_results_org}/{hub_repo_name}-private" + + def save_results_aggregated( + self, + results: dict, + samples: dict, + ) -> None: + """ + Saves the aggregated results and samples to the output path and pushes them to the Hugging Face hub if requested. + + Args: + results (dict): The aggregated results to save. + samples (dict): The samples results to save. + """ + self.general_config_tracker.log_end_time() + + if self.output_path: + try: + eval_logger.info("Saving results aggregated") + + # calculate cumulative hash for each task - only if samples are provided + task_hashes = {} + if samples: + for task_name, task_samples in samples.items(): + sample_hashes = [ + s["doc_hash"] + s["prompt_hash"] + s["target_hash"] + for s in task_samples + ] + task_hashes[task_name] = hash_string("".join(sample_hashes)) + + # update initial results dict + results.update({"task_hashes": task_hashes}) + results.update(asdict(self.general_config_tracker)) + dumped = json.dumps( + results, + indent=2, + default=handle_non_serializable, + ensure_ascii=False, + ) + + path = Path(self.output_path if self.output_path else Path.cwd()) + path = path.joinpath(self.general_config_tracker.model_name_sanitized) + path.mkdir(parents=True, exist_ok=True) + + self.date_id = datetime.now().isoformat().replace(":", "-") + file_results_aggregated = path.joinpath(f"results_{self.date_id}.json") + file_results_aggregated.open("w", encoding="utf-8").write(dumped) + + if self.api and self.push_results_to_hub: + repo_id = ( + self.hub_results_repo + if self.public_repo + else self.hub_results_repo_private + ) + self.api.create_repo( + repo_id=repo_id, + repo_type="dataset", + private=not self.public_repo, + exist_ok=True, + ) + self.api.upload_folder( + repo_id=repo_id, + folder_path=str(path), + path_in_repo=self.general_config_tracker.model_name_sanitized, + repo_type="dataset", + commit_message=f"Adding aggregated results for {self.general_config_tracker.model_name}", + ) + eval_logger.info( + "Successfully pushed aggregated results to the Hugging Face Hub. " + f"You can find them at: {repo_id}" + ) + + except Exception as e: + eval_logger.warning("Could not save results aggregated") + eval_logger.info(repr(e)) + else: + eval_logger.info( + "Output path not provided, skipping saving results aggregated" + ) + + def save_results_samples( + self, + task_name: str, + samples: dict, + ) -> None: + """ + Saves the samples results to the output path and pushes them to the Hugging Face hub if requested. + + Args: + task_name (str): The task name to save the samples for. + samples (dict): The samples results to save. + """ + if self.output_path: + try: + eval_logger.info(f"Saving per-sample results for: {task_name}") + + path = Path(self.output_path if self.output_path else Path.cwd()) + path = path.joinpath(self.general_config_tracker.model_name_sanitized) + path.mkdir(parents=True, exist_ok=True) + + file_results_samples = path.joinpath( + f"samples_{task_name}_{self.date_id}.jsonl" + ) + + for sample in samples: + # we first need to sanitize arguments and resps + # otherwise we won't be able to load the dataset + # using the datasets library + arguments = {} + for i, arg in enumerate(sample["arguments"]): + arguments[f"gen_args_{i}"] = {} + for j, tmp in enumerate(arg): + arguments[f"gen_args_{i}"][f"arg_{j}"] = tmp + + sample["resps"] = sanitize_list(sample["resps"]) + sample["filtered_resps"] = sanitize_list(sample["filtered_resps"]) + sample["arguments"] = arguments + + sample_dump = ( + json.dumps( + sample, + default=handle_non_serializable, + ensure_ascii=False, + ) + + "\n" + ) + + with open(file_results_samples, "a") as f: + f.write(sample_dump) + + if self.api and self.push_samples_to_hub: + repo_id = ( + self.hub_results_repo + if self.public_repo + else self.hub_results_repo_private + ) + self.api.create_repo( + repo_id=repo_id, + repo_type="dataset", + private=not self.public_repo, + exist_ok=True, + ) + self.api.upload_folder( + repo_id=repo_id, + folder_path=str(path), + path_in_repo=self.general_config_tracker.model_name_sanitized, + repo_type="dataset", + commit_message=f"Adding samples results for {task_name} to {self.general_config_tracker.model_name}", + ) + eval_logger.info( + f"Successfully pushed sample results for task: {task_name} to the Hugging Face Hub. " + f"You can find them at: {repo_id}" + ) + + except Exception as e: + eval_logger.warning("Could not save sample results") + eval_logger.info(repr(e)) + else: + eval_logger.info("Output path not provided, skipping saving sample results") + + def recreate_metadata_card(self) -> None: + """ + Creates a metadata card for the evaluation results dataset and pushes it to the Hugging Face hub. + """ + + eval_logger.info("Recreating metadata card") + repo_id = ( + self.hub_results_repo if self.public_repo else self.hub_results_repo_private + ) + + files_in_repo = self.api.list_repo_files(repo_id=repo_id, repo_type="dataset") + results_files = get_results_filenames(files_in_repo) + sample_files = get_sample_results_filenames(files_in_repo) + + # Build a dictionary to store the latest evaluation datetime for: + # - Each tested model and its aggregated results + # - Each task and sample results, if existing + # i.e. { + # "org__model_name__gsm8k": "2021-09-01T12:00:00", + # "org__model_name__ifeval": "2021-09-01T12:00:00", + # "org__model_name__results": "2021-09-01T12:00:00" + # } + latest_task_results_datetime = defaultdict(lambda: datetime.min.isoformat()) + + for file_path in sample_files: + file_path = Path(file_path) + filename = file_path.name + model_name = file_path.parent + task_name = get_file_task_name(filename) + results_datetime = get_file_datetime(filename) + task_name_sanitized = sanitize_task_name(task_name) + # Results and sample results for the same model and task will have the same datetime + samples_key = f"{model_name}__{task_name_sanitized}" + results_key = f"{model_name}__results" + latest_datetime = max( + latest_task_results_datetime[samples_key], + results_datetime, + ) + latest_task_results_datetime[samples_key] = latest_datetime + latest_task_results_datetime[results_key] = latest_datetime + + # Create metadata card + card_metadata = MetadataConfigs() + + # Add the latest aggregated results to the metadata card for easy access + for file_path in results_files: + file_path = Path(file_path) + results_filename = file_path.name + model_name = file_path.parent + eval_date = get_file_datetime(results_filename) + eval_date_sanitized = re.sub(r"[^\w\.]", "_", eval_date) + results_filename = Path("**") / Path(results_filename).name + config_name = f"{model_name}__results" + sanitized_last_eval_date_results = re.sub( + r"[^\w\.]", "_", latest_task_results_datetime[config_name] + ) + # Ensure that all results files are listed in the metadata card + current_results = card_metadata.get(config_name, {"data_files": []}) + current_results["data_files"].append( + {"split": eval_date_sanitized, "path": [str(results_filename)]} + ) + card_metadata[config_name] = current_results + # If the results file is the newest, update the "latest" field in the metadata card + if eval_date_sanitized == sanitized_last_eval_date_results: + card_metadata[config_name]["data_files"].append( + {"split": "latest", "path": [str(results_filename)]} + ) + + # Add the tasks details configs + for file_path in sample_files: + file_path = Path(file_path) + filename = file_path.name + model_name = file_path.parent + task_name = get_file_task_name(filename) + eval_date = get_file_datetime(filename) + task_name_sanitized = sanitize_task_name(task_name) + eval_date_sanitized = re.sub(r"[^\w\.]", "_", eval_date) + results_filename = Path("**") / Path(filename).name + config_name = f"{model_name}__{task_name_sanitized}" + sanitized_last_eval_date_results = re.sub( + r"[^\w\.]", "_", latest_task_results_datetime[config_name] + ) + # Ensure that all sample results files are listed in the metadata card + current_details_for_task = card_metadata.get( + config_name, {"data_files": []} + ) + current_details_for_task["data_files"].append( + {"split": eval_date_sanitized, "path": [str(results_filename)]} + ) + card_metadata[config_name] = current_details_for_task + # If the samples results file is the newest, update the "latest" field in the metadata card + if eval_date_sanitized == sanitized_last_eval_date_results: + card_metadata[config_name]["data_files"].append( + {"split": "latest", "path": [str(results_filename)]} + ) + + # Special case for MMLU with a single split covering it all + # We add another config with all MMLU splits results together for easy inspection + SPECIAL_TASKS = ["mmlu", "gpqa", "minerva_math"] + for special_task in SPECIAL_TASKS: + if special_task in config_name: + special_task = f"{model_name}__{special_task}" + former_entry = card_metadata.get(special_task, {"data_files": []}) + + former_split = [ + (i, entry) + for i, entry in enumerate(former_entry["data_files"]) + if entry.get("split", None) == eval_date_sanitized + ] + + if len(former_split) == 0: + former_entry["data_files"].append( + { + "split": eval_date_sanitized, + "path": [str(results_filename)], + } + ) + else: + split_index, _ = former_split[0] + former_entry["data_files"][split_index]["path"].append( + str(results_filename) + ) + + if eval_date_sanitized == sanitized_last_eval_date_results: + latest_split = [ + (i, entry) + for i, entry in enumerate(former_entry["data_files"]) + if entry.get("split", None) == "latest" + ] + if len(latest_split) == 0: + former_entry["data_files"].append( + {"split": "latest", "path": [str(results_filename)]} + ) + else: + latest_index, _ = latest_split[0] + former_entry["data_files"][latest_index]["path"].append( + str(results_filename) + ) + + card_metadata[special_task] = former_entry + + # Get latest results and extract info to update metadata card examples + latest_datetime = max(latest_task_results_datetime.values()) + latest_model_name = max( + latest_task_results_datetime, key=lambda k: latest_task_results_datetime[k] + ) + last_results_file = [ + f for f in results_files if latest_datetime.replace(":", "-") in f + ][0] + last_results_file_path = hf_hub_url( + repo_id=repo_id, filename=last_results_file, repo_type="dataset" + ) + latest_results_file = load_dataset( + "json", data_files=last_results_file_path, split="train" + ) + results_dict = latest_results_file["results"][0] + new_dictionary = {"all": results_dict} + new_dictionary.update(results_dict) + results_string = json.dumps(new_dictionary, indent=4) + + dataset_summary = ( + "Dataset automatically created during the evaluation run of model " + ) + if self.general_config_tracker.model_source == "hf": + dataset_summary += f"[{self.general_config_tracker.model_name}](https://huggingface.co/{self.general_config_tracker.model_name})\n" + else: + dataset_summary += f"{self.general_config_tracker.model_name}\n" + dataset_summary += ( + f"The dataset is composed of {len(card_metadata)-1} configuration(s), each one corresponding to one of the evaluated task.\n\n" + f"The dataset has been created from {len(results_files)} run(s). Each run can be found as a specific split in each " + 'configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.\n\n' + 'An additional configuration "results" store all the aggregated results of the run.\n\n' + "To load the details from a run, you can for instance do the following:\n" + ) + if self.general_config_tracker.model_source == "hf": + dataset_summary += ( + "```python\nfrom datasets import load_dataset\n" + f'data = load_dataset(\n\t"{repo_id}",\n\tname="{latest_model_name}",\n\tsplit="latest"\n)\n```\n\n' + ) + dataset_summary += ( + "## Latest results\n\n" + f'These are the [latest results from run {latest_datetime}]({last_results_file_path.replace("/resolve/", "/blob/")}) ' + "(note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. " + 'You find each in the results and the "latest" split for each eval):\n\n' + f"```python\n{results_string}\n```" + ) + card_data = DatasetCardData( + dataset_summary=dataset_summary, + repo_url=f"https://huggingface.co/{self.general_config_tracker.model_name}", + pretty_name=f"Evaluation run of {self.general_config_tracker.model_name}", + leaderboard_url=self.leaderboard_url, + point_of_contact=self.point_of_contact, + ) + card_metadata.to_dataset_card_data(card_data) + card = DatasetCard.from_template( + card_data, + pretty_name=card_data.pretty_name, + ) + card.push_to_hub(repo_id, repo_type="dataset") diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/utils.py b/venv/lib/python3.10/site-packages/lm_eval/loggers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..348175a95462e0968b16375eaa8f690db9d0af44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/loggers/utils.py @@ -0,0 +1,143 @@ +import logging +import os +import re +import subprocess +from pathlib import Path +from typing import Any, Dict, Optional, Tuple, Union + +import numpy as np +from torch.utils.collect_env import get_pretty_env_info +from transformers import __version__ as trans_version + + +logger = logging.getLogger(__name__) + + +def remove_none_pattern(input_string: str) -> Tuple[str, bool]: + """Remove the ',none' substring from the input_string if it exists at the end. + + Args: + input_string (str): The input string from which to remove the ',none' substring. + + Returns: + Tuple[str, bool]: A tuple containing the modified input_string with the ',none' substring removed + and a boolean indicating whether the modification was made (True) or not (False). + """ + # Define the pattern to match ',none' at the end of the string + pattern = re.compile(r",none$") + + # Use sub() to replace ',none' with an empty string + result = re.sub(pattern, "", input_string) + + # check if the input_string changed + removed = result != input_string + + return result, removed + + +def _handle_non_serializable(o: Any) -> Union[int, str, list]: + """Handle non-serializable objects by converting them to serializable types. + + Args: + o (Any): The object to be handled. + + Returns: + Union[int, str, list]: The converted object. If the object is of type np.int64 or np.int32, + it will be converted to int. If the object is of type set, it will be converted + to a list. Otherwise, it will be converted to str. + """ + if isinstance(o, np.int64) or isinstance(o, np.int32): + return int(o) + elif isinstance(o, set): + return list(o) + else: + return str(o) + + +def get_commit_from_path(repo_path: Union[Path, str]) -> Optional[str]: + try: + git_folder = Path(repo_path, ".git") + if git_folder.is_file(): + git_folder = Path( + git_folder.parent, + git_folder.read_text(encoding="utf-8").split("\n")[0].split(" ")[-1], + ) + if Path(git_folder, "HEAD").exists(): + head_name = ( + Path(git_folder, "HEAD") + .read_text(encoding="utf-8") + .split("\n")[0] + .split(" ")[-1] + ) + head_ref = Path(git_folder, head_name) + git_hash = head_ref.read_text(encoding="utf-8").replace("\n", "") + else: + git_hash = None + except Exception as err: + logger.debug( + f"Failed to retrieve a Git commit hash from path: {str(repo_path)}. Error: {err}" + ) + return None + return git_hash + + +def get_git_commit_hash(): + """ + Gets the git commit hash of your current repo (if it exists). + Source: https://github.com/EleutherAI/gpt-neox/blob/b608043be541602170bfcfb8ec9bf85e8a0799e0/megatron/neox_arguments/neox_args.py#L42 + """ + try: + git_hash = subprocess.check_output(["git", "describe", "--always"]).strip() + git_hash = git_hash.decode() + except (subprocess.CalledProcessError, FileNotFoundError): + # FileNotFoundError occurs when git not installed on system + git_hash = get_commit_from_path(os.getcwd()) # git hash of repo if exists + return git_hash + + +def add_env_info(storage: Dict[str, Any]): + try: + pretty_env_info = get_pretty_env_info() + except Exception as err: + pretty_env_info = str(err) + transformers_version = trans_version + upper_dir_commit = get_commit_from_path( + Path(os.getcwd(), "..") + ) # git hash of upper repo if exists + added_info = { + "pretty_env_info": pretty_env_info, + "transformers_version": transformers_version, + "upper_git_hash": upper_dir_commit, # in case this repo is submodule + } + storage.update(added_info) + + +def add_tokenizer_info(storage: Dict[str, Any], lm): + if getattr(lm, "tokenizer", False): + try: + tokenizer_info = { + "tokenizer_pad_token": [ + lm.tokenizer.pad_token, + lm.tokenizer.pad_token_id, + ], + "tokenizer_eos_token": [ + lm.tokenizer.eos_token, + lm.tokenizer.eos_token_id, + ], + "tokenizer_bos_token": [ + lm.tokenizer.bos_token, + lm.tokenizer.bos_token_id, + ], + "eot_token_id": getattr(lm, "eot_token_id", None), + "max_length": getattr(lm, "max_length", None), + } + storage.update(tokenizer_info) + except Exception as err: + logger.debug( + f"Logging detailed tokenizer info failed with {err}, skipping..." + ) + # seems gguf and textsynth do not have tokenizer + else: + logger.debug( + "LM does not have a 'tokenizer' attribute, not logging tokenizer metadata to results." + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/loggers/wandb_logger.py b/venv/lib/python3.10/site-packages/lm_eval/loggers/wandb_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..c9715a0fe99ad443ac4925a951c7f2c785ceb11f --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/loggers/wandb_logger.py @@ -0,0 +1,352 @@ +import copy +import json +import logging +from typing import Any, Dict, List, Literal, Tuple + +import numpy as np +import pandas as pd +from packaging.version import Version + +from lm_eval.loggers.utils import _handle_non_serializable, remove_none_pattern + + +logger = logging.getLogger(__name__) + + +def get_wandb_printer() -> Literal["Printer"]: + """Returns a wandb printer instance for pretty stdout.""" + from wandb.sdk.lib.printer import get_printer + from wandb.sdk.wandb_settings import Settings + + printer = get_printer(Settings()._jupyter) + return printer + + +class WandbLogger: + def __init__(self, **kwargs) -> None: + """Attaches to wandb logger if already initialized. Otherwise, passes kwargs to wandb.init() + + Args: + kwargs Optional[Any]: Arguments for configuration. + + Parse and log the results returned from evaluator.simple_evaluate() with: + wandb_logger.post_init(results) + wandb_logger.log_eval_result() + wandb_logger.log_eval_samples(results["samples"]) + """ + try: + import wandb + + assert Version(wandb.__version__) >= Version("0.13.6") + if Version(wandb.__version__) < Version("0.13.6"): + wandb.require("report-editing:v0") + except Exception as e: + logger.warning( + "To use the wandb reporting functionality please install wandb>=0.13.6.\n" + "To install the latest version of wandb run `pip install wandb --upgrade`\n" + f"{e}" + ) + + self.wandb_args: Dict[str, Any] = kwargs + + # initialize a W&B run + if wandb.run is None: + self.run = wandb.init(**self.wandb_args) + else: + self.run = wandb.run + + self.printer = get_wandb_printer() + + def post_init(self, results: Dict[str, Any]) -> None: + self.results: Dict[str, Any] = copy.deepcopy(results) + self.task_names: List[str] = list(results.get("results", {}).keys()) + self.group_names: List[str] = list(results.get("groups", {}).keys()) + + def _get_config(self) -> Dict[str, Any]: + """Get configuration parameters.""" + self.task_configs = self.results.get("configs", {}) + cli_configs = self.results.get("config", {}) + configs = { + "task_configs": self.task_configs, + "cli_configs": cli_configs, + } + + return configs + + def _sanitize_results_dict(self) -> Tuple[Dict[str, str], Dict[str, Any]]: + """Sanitize the results dictionary.""" + _results = copy.deepcopy(self.results.get("results", dict())) + + # Remove None from the metric string name + tmp_results = copy.deepcopy(_results) + for task_name in self.task_names: + task_result = tmp_results.get(task_name, dict()) + for metric_name, metric_value in task_result.items(): + _metric_name, removed = remove_none_pattern(metric_name) + if removed: + _results[task_name][_metric_name] = metric_value + _results[task_name].pop(metric_name) + + # remove string valued keys from the results dict + wandb_summary = {} + for task in self.task_names: + task_result = _results.get(task, dict()) + for metric_name, metric_value in task_result.items(): + if isinstance(metric_value, str): + wandb_summary[f"{task}/{metric_name}"] = metric_value + + for summary_metric, summary_value in wandb_summary.items(): + _task, _summary_metric = summary_metric.split("/") + _results[_task].pop(_summary_metric) + + tmp_results = copy.deepcopy(_results) + for task_name, task_results in tmp_results.items(): + for metric_name, metric_value in task_results.items(): + _results[f"{task_name}/{metric_name}"] = metric_value + _results[task_name].pop(metric_name) + for task in self.task_names: + _results.pop(task) + + return wandb_summary, _results + + def _log_results_as_table(self) -> None: + """Generate and log evaluation results as a table to W&B.""" + columns = [ + "Version", + "Filter", + "num_fewshot", + "Metric", + "Value", + "Stderr", + ] + + def make_table(columns: List[str], key: str = "results"): + import wandb + + table = wandb.Table(columns=columns) + results = copy.deepcopy(self.results) + + for k, dic in results.get(key).items(): + if k in self.group_names and not key == "groups": + continue + version = results.get("versions").get(k) + if version == "N/A": + version = None + n = results.get("n-shot").get(k) + + for (mf), v in dic.items(): + m, _, f = mf.partition(",") + if m.endswith("_stderr"): + continue + if m == "alias": + continue + + if m + "_stderr" + "," + f in dic: + se = dic[m + "_stderr" + "," + f] + if se != "N/A": + se = "%.4f" % se + table.add_data(*[k, version, f, n, m, str(v), str(se)]) + else: + table.add_data(*[k, version, f, n, m, str(v), ""]) + + return table + + # log the complete eval result to W&B Table + table = make_table(["Tasks"] + columns, "results") + self.run.log({"evaluation/eval_results": table}) + + if "groups" in self.results.keys(): + table = make_table(["Groups"] + columns, "groups") + self.run.log({"evaluation/group_eval_results": table}) + + def _log_results_as_artifact(self) -> None: + """Log results as JSON artifact to W&B.""" + import wandb + + dumped = json.dumps( + self.results, indent=2, default=_handle_non_serializable, ensure_ascii=False + ) + artifact = wandb.Artifact("results", type="eval_results") + with artifact.new_file("results.json", mode="w", encoding="utf-8") as f: + f.write(dumped) + self.run.log_artifact(artifact) + + def log_eval_result(self) -> None: + """Log evaluation results to W&B.""" + # Log configs to wandb + configs = self._get_config() + self.run.config.update(configs) + + wandb_summary, self.wandb_results = self._sanitize_results_dict() + # update wandb.run.summary with items that were removed + self.run.summary.update(wandb_summary) + # Log the evaluation metrics to wandb + self.run.log(self.wandb_results) + # Log the evaluation metrics as W&B Table + self._log_results_as_table() + # Log the results dict as json to W&B Artifacts + self._log_results_as_artifact() + + def _generate_dataset( + self, data: List[Dict[str, Any]], config: Dict[str, Any] + ) -> pd.DataFrame: + """Generate a dataset from evaluation data. + + Args: + data (List[Dict[str, Any]]): The data to generate a dataset for. + config (Dict[str, Any]): The configuration of the task. + + Returns: + pd.DataFrame: A dataframe that is ready to be uploaded to W&B. + """ + ids = [x["doc_id"] for x in data] + labels = [x["target"] for x in data] + instance = [""] * len(ids) + resps = [""] * len(ids) + filtered_resps = [""] * len(ids) + model_outputs = {} + + metrics_list = config["metric_list"] + metrics = {} + for metric in metrics_list: + metric = metric.get("metric") + if metric in ["word_perplexity", "byte_perplexity", "bits_per_byte"]: + metrics[f"{metric}_loglikelihood"] = [x[metric][0] for x in data] + if metric in ["byte_perplexity", "bits_per_byte"]: + metrics[f"{metric}_bytes"] = [x[metric][1] for x in data] + else: + metrics[f"{metric}_words"] = [x[metric][1] for x in data] + else: + metrics[metric] = [x[metric] for x in data] + + if config["output_type"] == "loglikelihood": + instance = [x["arguments"][0][0] for x in data] + labels = [x["arguments"][0][1] for x in data] + resps = [ + f'log probability of continuation is {x["resps"][0][0][0]} ' + + "\n\n" + + "continuation will {} generated with greedy sampling".format( + "not be" if not x["resps"][0][0][1] else "be" + ) + for x in data + ] + filtered_resps = [ + f'log probability of continuation is {x["filtered_resps"][0][0]} ' + + "\n\n" + + "continuation will {} generated with greedy sampling".format( + "not be" if not x["filtered_resps"][0][1] else "be" + ) + for x in data + ] + elif config["output_type"] == "multiple_choice": + instance = [x["arguments"][0][0] for x in data] + choices = [ + "\n".join([f"{idx}. {y[1]}" for idx, y in enumerate(x["arguments"])]) + for x in data + ] + resps = [np.argmax([n[0][0] for n in x["resps"]]) for x in data] + filtered_resps = [ + np.argmax([n[0] for n in x["filtered_resps"]]) for x in data + ] + elif config["output_type"] == "loglikelihood_rolling": + instance = [x["arguments"][0][0] for x in data] + resps = [x["resps"][0][0] for x in data] + filtered_resps = [x["filtered_resps"][0] for x in data] + elif config["output_type"] == "generate_until": + instance = [x["arguments"][0][0] for x in data] + resps = [x["resps"][0][0] for x in data] + filtered_resps = [x["filtered_resps"][0] for x in data] + + model_outputs["raw_predictions"] = resps + model_outputs["filtered_predictions"] = filtered_resps + + df_data = { + "id": ids, + "data": instance, + } + if config["output_type"] == "multiple_choice": + df_data["choices"] = choices + + tmp_data = { + "input_len": [len(x) for x in instance], + "labels": labels, + "output_type": config["output_type"], + } + df_data.update(tmp_data) + df_data.update(model_outputs) + df_data.update(metrics) + + return pd.DataFrame(df_data) + + def _log_samples_as_artifact( + self, data: List[Dict[str, Any]], task_name: str + ) -> None: + import wandb + + # log the samples as an artifact + dumped = json.dumps( + data, + indent=2, + default=_handle_non_serializable, + ensure_ascii=False, + ) + artifact = wandb.Artifact(f"{task_name}", type="samples_by_task") + with artifact.new_file( + f"{task_name}_eval_samples.json", mode="w", encoding="utf-8" + ) as f: + f.write(dumped) + self.run.log_artifact(artifact) + # artifact.wait() + + def log_eval_samples(self, samples: Dict[str, List[Dict[str, Any]]]) -> None: + """Log evaluation samples to W&B. + + Args: + samples (Dict[str, List[Dict[str, Any]]]): Evaluation samples for each task. + """ + task_names: List[str] = [ + x for x in self.task_names if x not in self.group_names + ] + + ungrouped_tasks = [] + tasks_by_groups = {} + + for task_name in task_names: + group_names = self.task_configs[task_name].get("group", None) + if group_names: + if isinstance(group_names, str): + group_names = [group_names] + + for group_name in group_names: + if not tasks_by_groups.get(group_name): + tasks_by_groups[group_name] = [task_name] + else: + tasks_by_groups[group_name].append(task_name) + else: + ungrouped_tasks.append(task_name) + + for task_name in ungrouped_tasks: + eval_preds = samples[task_name] + + # log the samples as a W&B Table + df = self._generate_dataset(eval_preds, self.task_configs.get(task_name)) + self.run.log({f"{task_name}_eval_results": df}) + + # log the samples as a json file as W&B Artifact + self._log_samples_as_artifact(eval_preds, task_name) + + for group, grouped_tasks in tasks_by_groups.items(): + grouped_df = pd.DataFrame() + for task_name in grouped_tasks: + eval_preds = samples[task_name] + df = self._generate_dataset( + eval_preds, self.task_configs.get(task_name) + ) + df["group"] = group + df["task"] = task_name + grouped_df = pd.concat([grouped_df, df], ignore_index=True) + + # log the samples as a json file as W&B Artifact + self._log_samples_as_artifact(eval_preds, task_name) + + self.run.log({f"{group}_eval_results": grouped_df}) diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..698c912f277fb1de6cca0ab4068e399bcbd29607 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/__init__.py @@ -0,0 +1,27 @@ +from . import ( + anthropic_llms, + dummy, + gguf, + huggingface, + mamba_lm, + nemo_lm, + neuralmagic, + neuron_optimum, + openai_completions, + optimum_lm, + textsynth, + vllm_causallms, +) + + +# TODO: implement __all__ + + +try: + # enable hf hub transfer if available + import hf_transfer # type: ignore # noqa + import huggingface_hub.constants # type: ignore + + huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True +except ImportError: + pass diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91acded69dbab4779d47e341eec0e2f828708df6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f658bf11b51409595ccf4d55c22d1dc2a2166440 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5faafb5db1f415b0c4e8d19812da9e1f73368b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/anthropic_llms.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/anthropic_llms.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5159049fcd6d288f24e4dfe133b4c9baee3da439 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/anthropic_llms.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/dummy.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ba98ac1371b9d187c9bab00dc96c19db9b0ae77 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/dummy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/dummy.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/dummy.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c79498f6449b736628fbd3404f4c27016e739a00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/dummy.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/gguf.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/gguf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1124fa839636359832ad88a5bd1f3743dac0be8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/gguf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/gguf.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/gguf.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44908aecb3644ca91f57a46d6eb5419ae1e44b7d Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/gguf.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/huggingface.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/huggingface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b44649fd36cd2f2a5518d23dd446e85c08bcfbd Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/huggingface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/huggingface.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/huggingface.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1eb5a1878f1a694576fb2a6007de0bbc38b7ed1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/huggingface.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..411e0d0f2588872209bb9e318432675aee5dcf85 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/mamba_lm.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/mamba_lm.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8480d8acd33833c43c93237ac37c049dc8b627a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/mamba_lm.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9f9f7809f16cafe3789f81305b38dcd3a0c52a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/nemo_lm.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/nemo_lm.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f086abf83001d39c550a6c2f3e4637613317672b Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/nemo_lm.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6b9c924934d648b4f2dd3db2c20a097f1634c5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuralmagic.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuralmagic.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96965d54390dc5b33aeda0d0b35cad279ffae106 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuralmagic.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e62dfa4f45fa1454cbfc92129452a81ece0744ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuron_optimum.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuron_optimum.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d33f5adaf0db658ebbe0956cdc3d2ef3bebfa64d Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/neuron_optimum.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0c4f589fc2097f43caee52f4939f37155bd2fd6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/openai_completions.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/openai_completions.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..690929f4b70a5eacd22fb536ea13e5b9a78c887b Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/openai_completions.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17b2ea1621fd83c8ba4010adc30a5f07310da450 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/optimum_lm.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/optimum_lm.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c39278b1f2e3672bd291a169b348dc9d50642baa Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/optimum_lm.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/textsynth.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/textsynth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..570cfb84c173d4dbd30d1a7ba816eed5a1324d45 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/textsynth.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/textsynth.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/textsynth.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e2c49aaaba4ad3a824a666a3ff266471f864c99 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/textsynth.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb69cc86626bb27e5946ac9c87706486c6fc70c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/utils.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b9ab454a88c87ec42e41f84aa309124f88daaf6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2d3c2cbc81fb3f852709be532e6d42680ed6b1c Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/vllm_causallms.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/vllm_causallms.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e7f4a5b9b773a6b79ca323082d8eca98ef3c017 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/models/__pycache__/vllm_causallms.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/anthropic_llms.py b/venv/lib/python3.10/site-packages/lm_eval/models/anthropic_llms.py new file mode 100644 index 0000000000000000000000000000000000000000..d011fe074f931fb9e83b7b50baaef67ae47f9c5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/anthropic_llms.py @@ -0,0 +1,360 @@ +from typing import Any, List, Tuple + +from tqdm import tqdm + +from lm_eval import utils +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import retry_on_specific_exceptions + + +eval_logger = utils.eval_logger + + +def anthropic_completion( + client, #: anthropic.Anthropic, + model: str, + prompt: str, + max_tokens_to_sample: int, + temperature: float, + stop: List[str], + **kwargs: Any, +) -> str: + """Wrapper function around the Anthropic completion API client with exponential back-off + in case of RateLimitError. + + params: + client: anthropic.Anthropic + Anthropic API client + model: str + Anthropic model e.g. 'claude-instant-v1', 'claude-2' + prompt: str + Prompt to feed to the model + max_tokens_to_sample: int + Maximum number of tokens to sample from the model + temperature: float + Sampling temperature + stop: List[str] + List of stop sequences + kwargs: Any + Additional model_args to pass to the API client + """ + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + def _exception_callback(e: Exception, sleep_time: float) -> None: + eval_logger.warning( + f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds" + ) + + @retry_on_specific_exceptions( + on_exceptions=[anthropic.RateLimitError], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def completion(): + response = client.completions.create( + prompt=f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}", + model=model, + # NOTE: Claude really likes to do CoT, and overly aggressive stop sequences + # (e.g. gsm8k's ":") may truncate a lot of the input. + stop_sequences=[anthropic.HUMAN_PROMPT] + stop, + max_tokens_to_sample=max_tokens_to_sample, + temperature=temperature, + **kwargs, + ) + return response.completion + + return completion() + + +def anthropic_chat( + client, #: anthropic.Anthropic, + model: str, + prompt: str, + max_tokens: int, + temperature: float, + stop: List[str], + **kwargs: Any, +) -> str: + """Wrapper function around the Anthropic completion API client with exponential back-off + in case of RateLimitError. + + params: + client: anthropic.Anthropic + Anthropic API client + model: str + Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229' + prompt: str + Prompt to feed to the model + max_tokens: int + Maximum number of tokens to sample from the model + temperature: float + Sampling temperature + stop: List[str] + List of stop sequences + kwargs: Any + Additional model_args to pass to the API client + """ + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + def _exception_callback(e: Exception, sleep_time: float) -> None: + eval_logger.warning( + f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds" + ) + + @retry_on_specific_exceptions( + on_exceptions=[ + anthropic.RateLimitError, + anthropic.APIConnectionError, + anthropic.APIStatusError, + ], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def messages(): + response = client.messages.create( + model=model, + max_tokens=max_tokens, + temperature=temperature, + messages=[{"role": "user", "content": f"{prompt}"}], + **kwargs, + ) + return response.content[0].text + + return messages() + + +@register_model("anthropic") +class AnthropicLM(LM): + REQ_CHUNK_SIZE = 20 # TODO: not used + + def __init__( + self, + batch_size: int = 1, + model: str = "claude-2.0", + max_tokens_to_sample: int = 256, + temperature: float = 0, # defaults to 1 + **kwargs, # top_p, top_k, etc. + ) -> None: + """Anthropic API wrapper. + + :param model: str + Anthropic model e.g. 'claude-instant-v1', 'claude-2' + :param max_tokens_to_sample: int + Maximum number of tokens to sample from the model + :param temperature: float + Sampling temperature + :param kwargs: Any + Additional model_args to pass to the API client + """ + super().__init__() + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + self.model = model + # defaults to os.environ.get("ANTHROPIC_API_KEY") + self.client = anthropic.Anthropic() + self.temperature = temperature + self.max_tokens_to_sample = max_tokens_to_sample + self.tokenizer = self.client.get_tokenizer() + self.kwargs = kwargs + + @property + def eot_token_id(self): + # Not sure but anthropic.HUMAN_PROMPT ? + raise NotImplementedError("No idea about anthropic tokenization.") + + @property + def max_length(self) -> int: + return 2048 + + @property + def max_gen_toks(self) -> int: + return self.max_tokens_to_sample + + @property + def batch_size(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError("No support for logits.") + + @property + def device(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError("No support for logits.") + + def tok_encode(self, string: str) -> List[int]: + return self.tokenizer.encode(string).ids + + def tok_decode(self, tokens: List[int]) -> str: + return self.tokenizer.decode(tokens) + + def _loglikelihood_tokens(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + if not requests: + return [] + + _requests: List[Tuple[str, dict]] = [req.args for req in requests] + + res = [] + for request in tqdm(_requests, disable=disable_tqdm): + try: + inp = request[0] + request_args = request[1] + # generation_kwargs + until = request_args.get("until") + max_gen_toks = request_args.get("max_gen_toks", self.max_length) + temperature = request_args.get("temperature", self.temperature) + response = anthropic_completion( + client=self.client, + model=self.model, + prompt=inp, + max_tokens_to_sample=max_gen_toks, + temperature=temperature, # TODO: implement non-greedy sampling for Anthropic + stop=until, # type: ignore + **self.kwargs, + ) + res.append(response) + + self.cache_hook.add_partial("generate_until", request, response) + except anthropic.APIConnectionError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"Server unreachable: {e.__cause__}") + break + except anthropic.APIStatusError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"API error {e.status_code}: {e.message}") + break + + return res + + def _model_call(self, inps): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def _model_generate(self, context, max_length, eos_token_id): + # Isn't used because we override generate_until + raise NotImplementedError() + + def loglikelihood(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + +@register_model("anthropic-chat", "anthropic-chat-completions") +class AnthropicChatLM(AnthropicLM): + REQ_CHUNK_SIZE = 20 # TODO: not used + + def __init__( + self, + model: str, + batch_size: int = 1, + max_tokens: int = 256, + temperature: float = 0, # defaults to 1 + **kwargs, # top_p, top_k, etc. + ) -> None: + """Anthropic API wrapper. + + :param model: str + Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229' + :param max_tokens: int + Maximum number of tokens to sample from the model + :param temperature: float + Sampling temperature + :param kwargs: Any + Additional model_args to pass to the API client + """ + super().__init__() + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + self.model = model + # defaults to os.environ.get("ANTHROPIC_API_KEY") + self.client = anthropic.Anthropic() + self.temperature = temperature + self.max_tokens = max_tokens + self.tokenizer = self.client.get_tokenizer() + self.kwargs = kwargs + + @property + def max_gen_toks(self) -> int: + return self.max_tokens + + def generate_until(self, requests) -> List[str]: + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + if not requests: + return [] + + _requests: List[Tuple[str, dict]] = [req.args for req in requests] + + res = [] + for request in tqdm(_requests): + try: + inp = request[0] + request_args = request[1] + # generation_kwargs + until = request_args.get("until") + max_tokens = request_args.get("max_gen_toks", self.max_length) + temperature = request_args.get("temperature", self.temperature) + response = anthropic_chat( + client=self.client, + model=self.model, + prompt=inp, + max_tokens=max_tokens, + temperature=temperature, # TODO: implement non-greedy sampling for Anthropic + stop=until, # type: ignore + **self.kwargs, + ) + res.append(response) + + self.cache_hook.add_partial("generate_until", request, response) + except anthropic.APIConnectionError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"Server unreachable: {e.__cause__}") + break + except anthropic.APIStatusError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"API error {e.status_code}: {e.message}") + break + + return res diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/dummy.py b/venv/lib/python3.10/site-packages/lm_eval/models/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..83737739672724f5fd6581ad59955e555b770ec4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/dummy.py @@ -0,0 +1,41 @@ +import random + +from tqdm import tqdm + +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model + + +@register_model("dummy") +class DummyLM(LM): + def __init__(self) -> None: + super().__init__() + + @classmethod + def create_from_arg_string(cls, arg_string, additional_config=None): + return cls() + + def loglikelihood(self, requests, disable_tqdm: bool = False): + res = [] + + for _ in tqdm(requests, disable=disable_tqdm): + res.append((-random.random(), False)) + + return res + + def generate_until(self, requests, disable_tqdm: bool = False): + res = [] + + for ctx, _ in tqdm(requests, disable=disable_tqdm): + res.append("lol") + assert ctx.strip() != "" + + return res + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + res = [] + + for _ in tqdm(requests, disable=disable_tqdm): + res.append(-random.random()) + + return res diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/gguf.py b/venv/lib/python3.10/site-packages/lm_eval/models/gguf.py new file mode 100644 index 0000000000000000000000000000000000000000..ee1362c6b0bedd8f831a1a4f93821b8c661f25e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/gguf.py @@ -0,0 +1,130 @@ +import logging +import time + +import requests +from requests.exceptions import RequestException +from tqdm import tqdm + +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model + + +logger = logging.getLogger(__name__) + + +def get_result(logprobs, context_length): + is_greedy = True + offsets = logprobs["text_offset"] + tokens = logprobs["tokens"] + tokens_logprobs = logprobs["token_logprobs"] + + idx = 0 + while offsets[idx] < context_length: + idx += 1 + continuation_logprobs = sum(tokens_logprobs[idx:-1]) + for i in range(idx, len(tokens)): + token = tokens[i] + top_tokens = logprobs["top_logprobs"][i] + top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x]) + if top_token != token: + is_greedy = False + break + + return continuation_logprobs, is_greedy + + +@register_model("gguf", "ggml") +class GGUFLM(LM): + def __init__(self, base_url=None, max_length=2048, **kwargs): + super().__init__() + self.base_url = base_url + assert self.base_url, "must pass `base_url` to use GGUF LM!" + self.logprobs = 10 + self.temperature = 0.0 + self.max_length = max_length + + def gguf_completion( + self, context, continuation=None, stop=None, retries=3, delay=5, **kwargs + ): + for _ in range(retries): + try: + prompt = context + request = { + "prompt": prompt, + "logprobs": self.logprobs, + "temperature": self.temperature, + } + if continuation: + prompt += continuation + request.update({"prompt": prompt, "max_tokens": 1, "echo": True}) + if stop is not None: + request["stop"] = stop + response = requests.post( + f"{self.base_url}/v1/completions", json=request + ) + response.raise_for_status() + return response.json() + except RequestException as e: + logger.error(f"RequestException: {e}") + time.sleep(delay) # wait before retrying + else: + raise Exception(f"Failed to get a valid response after {retries} retries.") + + def loglikelihood(self, requests, disable_tqdm: bool = False): + if not requests: + return [] + res = [] + for context, continuation in tqdm( + [req.args for req in requests], disable=disable_tqdm + ): + response = self.gguf_completion(context=context, continuation=continuation) + if response and "choices" in response and response["choices"]: + choice = response["choices"][0] + logprobs = choice.get("logprobs") + if ( + logprobs + and "token_logprobs" in logprobs + and logprobs["token_logprobs"] + ): + logprob, is_greedy = get_result(logprobs, len(context)) + res.append((logprob, is_greedy)) + else: + logger.warning( + "Invalid logprobs data. Expected 'logprobs' to contain 'token_logprobs' list." + ) + else: + logger.error( + f"Invalid response for loglikelihood. Response: {response}" + ) + assert False + return res + + def generate_until(self, requests, disable_tqdm: bool = False): + if not requests: + return [] + + res = [] + for request in tqdm([req.args for req in requests], disable=disable_tqdm): + inp = request[0] + request_args = request[1] + until = request_args.get("until", [""]) + response = self.gguf_completion(context=inp, stop=until) + if response and "choices" in response and response["choices"]: + choice = response["choices"][0] + if "text" in choice: + generated_text = choice["text"].strip() + res.append(generated_text) + else: + logger.error( + f"Invalid response for greedy_until. Response: {response}" + ) + res.append(None) # Add default value in case of error + else: + logger.error(f"Invalid response for greedy_until. Response: {response}") + res.append(None) # Add default value in case of error + return res + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + raise NotImplementedError( + "loglikelihood_rolling not yet supported for GGUF models" + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/huggingface.py b/venv/lib/python3.10/site-packages/lm_eval/models/huggingface.py new file mode 100644 index 0000000000000000000000000000000000000000..8c125f71fc28d2b184aef56b9ca638706c6d9d7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/huggingface.py @@ -0,0 +1,1330 @@ +import copy +import os +from datetime import timedelta +from pathlib import Path +from typing import Dict, List, Literal, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import transformers +from accelerate import ( + Accelerator, + DistributedType, + InitProcessGroupKwargs, + find_executable_batch_size, +) +from huggingface_hub import HfApi +from packaging import version +from peft import PeftModel +from peft import __version__ as PEFT_VERSION +from tqdm import tqdm +from transformers.models.auto.modeling_auto import ( + MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, +) + +from lm_eval import utils +from lm_eval.api.instance import Instance +from lm_eval.api.model import TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import ( + Collator, + clear_torch_cache, + configure_pad_token, + get_dtype, + pad_and_concat, + stop_sequences_criteria, +) + + +eval_logger = utils.eval_logger + + +def _get_accelerate_args( + device_map_option: Optional[str] = "auto", + max_memory_per_gpu: Optional[Union[int, str]] = None, + max_cpu_memory: Optional[Union[int, str]] = None, + offload_folder: Optional[str] = "./offload", + gpus: Optional[int] = None, +) -> dict: + """Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`.""" + max_memory = {} + if max_memory_per_gpu is not None: + max_memory_per_gpu_map = { + device_idx: max_memory_per_gpu for device_idx in range(gpus) + } + max_memory.update(max_memory_per_gpu_map) + if max_cpu_memory is not None: + max_memory["cpu"] = max_cpu_memory + + args = {} + if max_memory: + args["max_memory"] = max_memory + args["device_map"] = device_map_option + args["offload_folder"] = offload_folder + return args + + +@register_model("hf-auto", "hf", "huggingface") +class HFLM(TemplateLM): + """ + An abstracted Huggingface model class. Enables usage with both models of + `transformers.AutoModelForCausalLM` and `transformers.AutoModelForSeq2SeqLM` classes. + + Supports data-parallel multi-GPU with HF Accelerate. + """ + + AUTO_MODEL_CLASS = None + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + pretrained: Union[str, transformers.PreTrainedModel], + backend: Optional[Literal["default", "causal", "seq2seq"]] = "default", + # override whether the model should be treated as decoder-only (causal) or encoder-decoder (seq2seq) + revision: Optional[str] = "main", + subfolder: Optional[str] = None, + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ] = None, + truncation: Optional[bool] = False, + logits_cache: bool = True, + max_length: Optional[int] = None, + device: Optional[str] = "cuda", + dtype: Optional[Union[str, torch.dtype]] = "auto", + batch_size: Optional[Union[int, str]] = 1, + max_batch_size: Optional[int] = 64, + trust_remote_code: Optional[bool] = False, + use_fast_tokenizer: Optional[bool] = True, + add_bos_token: Optional[bool] = False, + prefix_token_id: Optional[int] = None, + # arguments used for splitting a model across GPUs naively. + # only used if `parallelize=True`. + parallelize: Optional[bool] = False, + device_map_option: Optional[str] = "auto", + max_memory_per_gpu: Optional[Union[int, str]] = None, + max_cpu_memory: Optional[Union[int, str]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = "./offload", + # PEFT, delta weights and quantization options + peft: Optional[str] = None, + delta: Optional[str] = None, + autogptq: Optional[Union[bool, str]] = False, + **kwargs, + ) -> None: + super().__init__() + + # optionally: take in an already-initialized transformers.PreTrainedModel + if not isinstance(pretrained, str): + eval_logger.warning( + "`pretrained` model kwarg is not of type `str`. Many other model arguments may be ignored. Please do not launch via accelerate or use `parallelize=True` if passing an existing model this way." + ) + assert not parallelize, "`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`" + self._model = pretrained + self._device = self._model.device + self._config = self._model.config + gpus = 0 + + if tokenizer: + assert isinstance( + tokenizer, transformers.PreTrainedTokenizer + ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) + self.tokenizer = tokenizer + else: + # Get tokenizer + model_name = self._model.name_or_path + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + + else: + assert isinstance(device, str) + assert isinstance(pretrained, str) + assert isinstance(batch_size, (int, str)) + + gpus = torch.cuda.device_count() + accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52)) + accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs]) + if accelerator.num_processes > 1: + self.accelerator = accelerator + + if "npu" in accelerator.device.type: + gpus = torch.npu.device_count() + + if not (parallelize or accelerator.num_processes > 1): + # use user-passed device + device_list = set( + ["cuda", "cpu"] + + [f"cuda:{i}" for i in range(gpus)] + + ["mps", "mps:0"] + + [f"npu:{i}" for i in range(gpus)] + ) + if device and device in device_list: + self._device = torch.device(device) + eval_logger.info(f"Using device '{device}'") + if device in ("mps", "mps:0") and version.parse( + torch.__version__ + ) < version.parse("2.1"): + raise RuntimeError( + f"mps requires torch >= 2.1. You have {torch.__version__}" + ) + else: + eval_logger.info("Device not specified") + eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}") + self._device = ( + torch.device("cuda") + if torch.cuda.is_available() + else torch.device("cpu") + ) + else: + if device != "cuda": + eval_logger.info( + f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model." + ) + # TODO: include in warning that `load_in_8bit` etc. affect this too + self._device = torch.device(device) + + # TODO: update this to be less of a hack once subfolder is fixed in HF + revision = revision + ("/" + subfolder if subfolder is not None else "") + + self._get_config( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + ) + + # determine which of 'causal' and 'seq2seq' backends to use + self._get_backend( + config=self.config, backend=backend, trust_remote_code=trust_remote_code + ) + + # load tokenizer so we know tokenizer vocabulary size before loading model and PEFT + self._create_tokenizer( + pretrained, + tokenizer, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast_tokenizer=use_fast_tokenizer, + ) + + # if we passed `pretrained` as a string, initialize our model now + if isinstance(pretrained, str): + self._create_model( + pretrained=pretrained, + revision=revision, + dtype=dtype, + trust_remote_code=trust_remote_code, + parallelize=parallelize, + gpus=gpus, + device_map_option=device_map_option, + max_memory_per_gpu=max_memory_per_gpu, + max_cpu_memory=max_cpu_memory, + offload_folder=offload_folder, + peft=peft, + delta=delta, + autogptq=autogptq, + **kwargs, + ) + + # access self._model through self.model property outside this method + if isinstance(self.model, torch.nn.Module): + self.model.eval() + self.model.tie_weights() + + if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) == "mps"): + # TODO: can remove this whole snippet except in the mps case, perhaps? + if not (parallelize or autogptq or hasattr(self, "accelerator")): + # place model onto device requested manually, + # if not using HF Accelerate or device_map + # or any other option that preloads model onto device + try: + self.model.to(self.device) + except ValueError: + eval_logger.debug( + "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore." + ) + + self.truncation = truncation + self.logits_cache = logits_cache + self.vocab_size = self.tokenizer.vocab_size + # select (or create) a pad token to use + self.tokenizer = configure_pad_token(self.tokenizer, model_config=self.config) + + # TODO: override this for Gemma + self.add_bos_token = add_bos_token + if getattr(self.config, "model_type", None) == "gemma": + self.add_bos_token = True + eval_logger.info( + f"Model type is '{self.config.model_type}', a BOS token will be used as Gemma underperforms without it." + ) + + self._max_length = max_length + self.pretrained = pretrained + self.delta = delta + self.peft = peft + self.revision = revision + self.batch_schedule = 1 + self.batch_sizes = {} + self.max_batch_size = max_batch_size + + if str(batch_size).startswith("auto"): + batch_size = batch_size.split(":") + self.batch_size_per_gpu = batch_size[0] + self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1 + else: + self.batch_size_per_gpu = int(batch_size) + + if isinstance(pretrained, str): + # multigpu data-parallel support when launched with accelerate + if gpus > 1: + if parallelize: + if accelerator.num_processes > 1: + raise RuntimeError( + "Attempted to use both a HF Accelerate `device_map` and to launch via `accelerate launch`. If this is the case, please either remove `parallelize=True` from --model_args or launch outside of the Accelerate launcher." + ) + else: + pass + elif accelerator.num_processes == 1: + # if we aren't launching via accelerate, ditch + self._rank = 0 + self._world_size = 1 + else: + if gpus > accelerator.num_processes: + eval_logger.warning( + "WARNING: The number of total system GPUs does not match the number of spawned processes. " + "If you would like to use data parallelism, please launch the script " + "with 'accelerate launch *script*'. " + f"Current run will proceed with {accelerator.num_processes} devices." + ) + assert ( + accelerator.distributed_type + in [ + DistributedType.FSDP, + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + ] + ), "Unsupported distributed type provided. Only DDP and FSDP are supported." + if accelerator.distributed_type == DistributedType.FSDP: + self._model = accelerator.prepare(self.model) + else: + self._model = accelerator.prepare_model( + self.model, evaluation_mode=True + ) + self._device = torch.device(f"{accelerator.device}") + self.accelerator = accelerator + + if self.accelerator.is_local_main_process: + eval_logger.info(f"Using {gpus} devices with data parallelism") + + self._rank = self.accelerator.local_process_index + self._world_size = self.accelerator.num_processes + else: + # if a PreTrainedModel was passed into HFLM, we forgo distributed setup. + eval_logger.warning( + "Passed an already-initialized model through `pretrained`, assuming single-process call to evaluate() or custom distributed integration" + ) + self._rank = 0 + self._world_size = 1 + + self.custom_prefix_token_id = prefix_token_id + if prefix_token_id is not None: + eval_logger.info( + f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}" + ) + + @property + def config(self): + # return the associated transformers.AutoConfig for the given pretrained model. + return self._config + + @property + def model(self): + # returns the model, unwrapping it if using Accelerate + if hasattr(self, "accelerator"): + return self.accelerator.unwrap_model(self._model) + else: + return self._model + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + if self.custom_prefix_token_id is not None: + return self.custom_prefix_token_id + if self.tokenizer.bos_token_id is not None: + return self.tokenizer.bos_token_id + return self.tokenizer.eos_token_id + + @property + def max_length(self): + if self._max_length: # if max length manually set, return it + return self._max_length + seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") + for attr in seqlen_config_attrs: + if hasattr(self.model.config, attr): + return getattr(self.model.config, attr) + if hasattr(self.tokenizer, "model_max_length"): + if self.tokenizer.model_max_length == 1000000000000000019884624838656: + return self._DEFAULT_MAX_LENGTH + return self.tokenizer.model_max_length + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + return self.batch_size_per_gpu + + @property + def device(self): + return self._device + + @property + def rank(self): + return self._rank + + @property + def world_size(self): + return self._world_size + + @property + def tokenizer_name(self) -> str: + return self.tokenizer.name_or_path.replace("/", "__") + + @property + def chat_template(self) -> str: + if self.tokenizer.chat_template is not None: + return self.tokenizer.chat_template + return self.tokenizer.default_chat_template + + def _get_backend( + self, + config: Union[transformers.PretrainedConfig, transformers.AutoConfig], + backend: Optional[Literal["default", "causal", "seq2seq"]] = "default", + trust_remote_code: Optional[bool] = False, + ) -> None: + """ + Helper method during initialization. + Determines the backend ("causal" (decoder-only) or "seq2seq" (encoder-decoder)) + model type to be used. + """ + assert backend in ["default", "causal", "seq2seq"] + + if backend != "default": + # if we've settled on non-default backend, use that manually + if backend == "causal": + self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + elif backend == "seq2seq": + self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM + eval_logger.info( + f"Overrode HF model backend type, and using type '{backend}'" + ) + else: + # determine and use the default HF backend for this model, based on its config + metadata. + if ( + getattr(config, "model_type") + in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES + ): + # first check if model type is listed under seq2seq models, since some + # models like MBart are listed in both seq2seq and causal mistakenly in HF transformers. + # these special cases should be treated as seq2seq models. + self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM + elif ( + getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES + ): + self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + else: + if not trust_remote_code: + eval_logger.warning( + "HF model type is neither marked as CausalLM or Seq2SeqLM. \ + This is expected if your model requires `trust_remote_code=True` but may be an error otherwise." + ) + # if model type is neither in HF transformers causal or seq2seq model registries + # then we default to AutoModelForCausalLM + self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + + assert self.AUTO_MODEL_CLASS in [ + transformers.AutoModelForCausalLM, + transformers.AutoModelForSeq2SeqLM, + ] + return None + + def _get_config( + self, + pretrained: str, + revision: str = "main", + trust_remote_code: bool = False, + ) -> None: + self._config = transformers.AutoConfig.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + ) + + def _create_model( + self, + pretrained: str, + revision: Optional[str] = "main", + dtype: Optional[Union[str, torch.dtype]] = "auto", + trust_remote_code: Optional[bool] = False, + # arguments used for splitting a model across GPUs naively. + # only used if `parallelize=True`. + # (accelerate naive PP (device_map) options) + parallelize: Optional[bool] = False, + gpus: Optional[int] = None, + device_map_option: Optional[str] = "auto", + max_memory_per_gpu: Optional[Union[int, str]] = None, + max_cpu_memory: Optional[Union[int, str]] = None, + offload_folder: Optional[str] = "./offload", + # PEFT, delta weights and quantization options + peft: Optional[str] = None, + delta: Optional[str] = None, + autogptq: Optional[Union[bool, str]] = False, + **kwargs, + ) -> None: + """ + Initializes an HF or HF-compatible PreTrainedModel from scratch + inside HFLM, using the kwargs passed into self.__init__(). + + Also handles functionality such as AutoGPTQ usage and PEFT wrapping. + + For future similar extensions to AutoGPTQ that are not core to HF's ecosystem, + (such as PyTorch models that are nearly, but not quite, fully mirroring + HF's public interface relied on in this HFLM class) + please consider subclassing HFLM and overriding this and other methods as needed. + """ + + model_kwargs = kwargs if kwargs else {} + + if parallelize: + model_kwargs.update( + _get_accelerate_args( + device_map_option, # TODO: phase out device_map_option? + max_memory_per_gpu, + max_cpu_memory, + offload_folder, + gpus, + ) + ) + elif "device_map" not in model_kwargs: + # set a device_map to initialize model on the right GPU. + # this is needed because it seems that the default behavior + # for quantized models now seems to be device_map="auto" + # which breaks data-parallel mode. + if hasattr(self, "accelerator"): + model_kwargs.update({"device_map": {"": f"{self.accelerator.device}"}}) + else: + model_kwargs.update({"device_map": {"": str(self.device)}}) + + if not autogptq: + if model_kwargs.get("load_in_4bit", None): + assert ( + transformers.__version__ >= "4.30.0" + ), "load_in_4bit requires transformers >= 4.30.0" + if transformers.__version__ >= "4.30.0": + if model_kwargs.get("load_in_4bit", None): + if model_kwargs.get("bnb_4bit_compute_dtype", None): + model_kwargs["bnb_4bit_compute_dtype"] = get_dtype( + model_kwargs["bnb_4bit_compute_dtype"] + ) + self._model = self.AUTO_MODEL_CLASS.from_pretrained( + pretrained, + revision=revision, + torch_dtype=get_dtype(dtype), + trust_remote_code=trust_remote_code, + **model_kwargs, + ) + else: + try: + from auto_gptq import AutoGPTQForCausalLM + except ModuleNotFoundError: + raise Exception( + "Tried to load auto_gptq, but auto-gptq is not installed ", + "please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]", + ) + + self._model = AutoGPTQForCausalLM.from_quantized( + pretrained, + trust_remote_code=trust_remote_code, + model_basename=None if autogptq is True else Path(autogptq).stem, + use_safetensors=True + if autogptq is True + else autogptq.endswith(".safetensors"), + **model_kwargs, + ) + + if peft and delta: + raise ValueError( + "Cannot use both 'peft' and 'delta' options at the same time." + ) + + if peft: + if model_kwargs.get("load_in_4bit", None): + if version.parse(PEFT_VERSION) < version.parse("0.4.0"): + raise AssertionError("load_in_4bit requires peft >= 0.4.0") + if self._model.config.vocab_size != len(self.tokenizer): + # resize model for LoRAs with added tokens + self._model.resize_token_embeddings(len(self.tokenizer)) + eval_logger.info( + f"Model config indicates vocab_size='{self._model.config.vocab_size}', but found tokenizer with vocab size '{len(self.tokenizer)}'. Resizing model embedding layer..." + ) + self._model = PeftModel.from_pretrained( + self._model, peft, revision=revision + ) + elif delta: + if autogptq: + eval_logger.warning( + "Delta weights might trigger unexpected behavior when used with AutoGPTQ." + ) + _model_delta = self.AUTO_MODEL_CLASS.from_pretrained( + delta, + revision=revision, + torch_dtype=get_dtype(dtype), + trust_remote_code=trust_remote_code, + **model_kwargs, + ) + for name, param in self._model.state_dict().items(): + try: + param.data += _model_delta.state_dict()[name] + except KeyError: + raise KeyError(f"Delta model is missing weights for layer: {name}") + except Exception as e: + raise RuntimeError( + f"Failed to add delta weights to layer {name}. Error: {e}" + ) + + del _model_delta + + return None + + def _create_tokenizer( + self, + pretrained: Union[str, transformers.PreTrainedModel], + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ], + revision: Optional[str] = "main", + trust_remote_code: Optional[bool] = False, + use_fast_tokenizer: Optional[bool] = True, + ) -> None: + """ + Helper method during initialization. + + Create a tokenizer object corresponding to the correct + tokenizer for value of `pretrained`, or use the pre-initialized tokenizer passed. + """ + + if tokenizer: + if isinstance(tokenizer, str): + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + tokenizer, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + else: + assert isinstance( + tokenizer, transformers.PreTrainedTokenizer + ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) + self.tokenizer = tokenizer + else: + # Get tokenizer based on 'pretrained' + if isinstance(pretrained, str): + model_name = pretrained + else: + # get the HF hub name via accessor on model + model_name = self.model.name_or_path + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + return None + + def _detect_batch_size(self, requests=None, pos: int = 0): + if requests: + _, context_enc, continuation_enc = requests[pos] + max_length = len( + (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1] + ) + max_context_enc = len(context_enc[-(self.max_length + 1) :]) + max_cont_enc = len(continuation_enc[-(self.max_length + 1) :]) + else: + max_length = self.max_length + max_context_enc = max_length + max_cont_enc = max_length + + # if OOM, then halves batch_size and tries again + @find_executable_batch_size(starting_batch_size=self.max_batch_size) + def forward_batch(batch_size): + if self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + length = max(max_context_enc, max_cont_enc) + batched_conts = torch.ones( + (batch_size, length), device=self.device + ).long() + test_batch = torch.ones((batch_size, length), device=self.device).long() + call_kwargs = { + "attn_mask": test_batch, + "labels": batched_conts, + } + else: + call_kwargs = {} + test_batch = torch.ones( + (batch_size, max_length), device=self.device + ).long() + for _ in range(5): + out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1) # noqa: F841 + + return batch_size + + try: + batch_size = forward_batch() + except RuntimeError as e: + if "No executable batch size found" in str(e): + batch_size = 1 + else: + raise + + if self.world_size > 1: + # if multi-GPU, always take minimum over all selected batch sizes + max_rnk_bs = torch.tensor([batch_size], device=self.device) + gathered = ( + self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist() + ) + batch_size = min(gathered) + clear_torch_cache() + return batch_size + + clear_torch_cache() + return batch_size + + def tok_encode( + self, string: str, left_truncate_len=None, add_special_tokens=None + ) -> List[int]: + """ """ + # default for None - empty dict, use predefined tokenizer param + # used for all models except for CausalLM or predefined value + special_tokens_kwargs = {} + + # by default for CausalLM - false or self.add_bos_token is set + if add_special_tokens is None: + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + special_tokens_kwargs = { + "add_special_tokens": False or self.add_bos_token + } + # otherwise the method explicitly defines the value + else: + special_tokens_kwargs = {"add_special_tokens": add_special_tokens} + + encoding = self.tokenizer.encode(string, **special_tokens_kwargs) + + # left-truncate the encoded context to be at most `left_truncate_len` tokens long + if left_truncate_len: + encoding = encoding[-left_truncate_len:] + + return encoding + + def tok_batch_encode( + self, + strings: List[str], + padding_side: str = "left", + left_truncate_len: int = None, + truncation: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. + old_padding_side = self.tokenizer.padding_side + self.tokenizer.padding_side = padding_side + + add_special_tokens = {} + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + add_special_tokens = {"add_special_tokens": False or self.add_bos_token} + + encoding = self.tokenizer( + strings, + truncation=truncation, + padding="longest", + return_tensors="pt", + **add_special_tokens, + ) + if left_truncate_len: + encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:] + encoding["attention_mask"] = encoding["attention_mask"][ + :, -left_truncate_len: + ] + self.tokenizer.padding_side = old_padding_side + + return encoding["input_ids"], encoding["attention_mask"] + + def tok_decode(self, tokens, skip_special_tokens=True): + return self.tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens) + + def _model_call(self, inps, attn_mask=None, labels=None): + """ + :param inps: torch.Tensor + A torch tensor of shape [batch, (sequence_ctx + sequence_cont)] or of shape + [batch, sequence_ctx]. the size of sequence may vary from call to call + :param attn_mask: torch.Tensor, optional + A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed + (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM + :param labels: torch.Tensor, optional + A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed + (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM + :return + A torch tensor of shape [batch, sequence, vocab] with the + logits returned from the model's decoder + """ + with torch.no_grad(): + if attn_mask is not None or labels is not None: + assert attn_mask is not None and labels is not None + assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM + return self.model( + input_ids=inps, attention_mask=attn_mask, labels=labels + ).logits + else: + assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM + return self.model(inps).logits + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + # temperature = 0.0 if not set + # if do_sample is false and temp==0.0: + # remove temperature, as do_sample=False takes care of this + # and we don't want a warning from HF + generation_kwargs["temperature"] = generation_kwargs.get("temperature", 0.0) + do_sample = generation_kwargs.get("do_sample", None) + + # The temperature has to be a strictly positive float -- if it is 0.0, use greedy decoding strategies + if generation_kwargs.get("temperature") == 0.0 and do_sample is None: + generation_kwargs["do_sample"] = do_sample = False + + if do_sample is False and generation_kwargs.get("temperature") == 0.0: + generation_kwargs.pop("temperature") + # build stopping criteria + stopping_criteria = stop_sequences_criteria( + self.tokenizer, stop, context.shape[1], context.shape[0] + ) + return self.model.generate( + input_ids=context, + max_length=max_length, + stopping_criteria=stopping_criteria, + pad_token_id=self.tokenizer.pad_token_id, + use_cache=True, + **generation_kwargs, + ) + + def _select_cont_toks( + self, logits: torch.Tensor, contlen: int = None, inplen: int = None + ) -> torch.Tensor: + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + assert ( + contlen and inplen + ), "Must pass input len and cont. len to select scored logits for causal LM" + # discard right-padding. + # also discard the input/context tokens. we'll only score continuations. + logits = logits[inplen - contlen : inplen] + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + assert ( + contlen and not inplen + ), "Selecting scored logits for Seq2SeqLM requires only cont. len" + # only discard right-padding. + # the logits input to this fn only contain decoder-side tokens. + logits = logits[:contlen] + + return logits + + def loglikelihood_rolling( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + adaptive_batch_size = None + if self.batch_size == "auto": + # using rolling window with maximum context + print("Passed argument batch_size = auto. Detecting largest batch size") + batch_size = self._detect_batch_size() + print(f"Determined Largest batch size: {batch_size}") + adaptive_batch_size = batch_size + + for (string,) in tqdm( + [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0)) + ): + rolling_token_windows = list( + map( + utils.make_disjoint_window, + utils.get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.prefix_token_id, + max_seq_len=self.max_length, + context_len=1, + ), + ) + ) + + # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + pad_amnt = 0 + if self.world_size > 1: + # We pad out the external document-level iterator so the inner iterator doesn't hang + mytensor = torch.tensor(len(rolling_token_windows), device=self.device) + gathered = ( + self.accelerator.gather(mytensor).cpu().detach().numpy().tolist() + ) + + pad_amnt = max(gathered) - gathered[self.rank] + if pad_amnt > 0: + rolling_token_windows += pad_amnt * [rolling_token_windows[0]] + + string_nll = self._loglikelihood_tokens( + requests=rolling_token_windows, + disable_tqdm=True, + override_bs=adaptive_batch_size, + ) + + if (self.world_size > 1) and (pad_amnt > 0): + string_nll = [x[0] for x in string_nll[:-pad_amnt]] + else: + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + + return loglikelihoods + + def _batch_scheduler(self, pos, n_reordered_requests): + sched = pos // int(len(n_reordered_requests) / self.batch_schedule) + if sched in self.batch_sizes: + return self.batch_sizes[sched] + if (len(self.batch_sizes) > 1) and ( + self.batch_sizes[sched - 1] == self.max_batch_size + ): + # if previous batch size is already maximal, skip recomputation + self.batch_sizes[sched] = self.max_batch_size + return self.batch_sizes[sched] + print( + f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size" + ) + self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos) + print(f"Determined largest batch size: {self.batch_sizes[sched]}") + return self.batch_sizes[sched] + + def _loglikelihood_tokens( + self, + requests: List[Tuple[Tuple[str, str], List[int], List[int]]], + disable_tqdm: bool = False, + override_bs: int = None, + ) -> List[Tuple[float, bool]]: + # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context + res = [] + + def _collate(req: Tuple[Tuple[str, str], List[int], List[int]]): + """Defines the key for the sorted method""" + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + + toks = req[1] + req[2] + return -len(toks), tuple(toks) + + def _lookup_one_token_cont(req: Tuple[Tuple[str, str], List[int], List[int]]): + """Defines the key to group and lookup one-token continuations""" + # Use with group_by="contexts" (optional)" + # allows for the creation of a lookup, so we can reuse logits in case of one-token continuations. + # speeds up some multiple-choice tasks proportionally to the number of choices. + # groups requests by context+continuation[:-1] and infer on one request/group. + return req[-2] + req[-1][:-1] + + re_ord = Collator( + requests, + sort_fn=_collate, + group_by="contexts" + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM + and self.logits_cache + else None, + group_fn=_lookup_one_token_cont, + ) + + # automatic (variable) batch size detection for vectorization + # pull longest context sample from request + n_reordered_requests = len(re_ord) + batch_size = ( + self.batch_size + if self.batch_size != "auto" + else override_bs + if override_bs is not None + else 0 + ) + batch_fn = ( + self._batch_scheduler + if self.batch_size == "auto" + and n_reordered_requests > 0 + and not override_bs + else None + ) + + chunks = re_ord.get_batched(n=batch_size, batch_fn=batch_fn) + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running loglikelihood requests", + ) + for chunk in chunks: + inps = [] + cont_toks_list = [] + inplens = [] + + conts = [] + encoder_attns = [] + + padding_len_inp = None + padding_len_cont = None + # because vectorizing is annoying, we first convert each (context, continuation) pair to padded + # tensors, then we pack them together into a batch, call the model, and then pick it all apart + # again because vectorizing is annoying + + for _, context_enc, continuation_enc in chunk: + # sanity check + assert len(context_enc) > 0 + assert len(continuation_enc) > 0 + assert len(continuation_enc) <= self.max_length + + # how this all works (illustrated on a causal decoder-only setup): + # CTX CONT + # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1] + # model \ \ + # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the + # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice + + # when too long to fit in context, truncate from the left + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + inp = torch.tensor( + (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1], + dtype=torch.long, + device=self.device, + ) + (inplen,) = inp.shape + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + inp = torch.tensor( + (context_enc)[-self.max_length :], + dtype=torch.long, + device=self.device, + ) + (inplen,) = inp.shape + + # build encoder attn masks + encoder_attns.append(torch.ones_like(inp)) + + cont = torch.tensor( + (continuation_enc)[-self.max_length :], + # TODO: left-shift these? + # TODO: our code assumes we never end up truncating conts for either model type + dtype=torch.long, + device=self.device, + ) + (contlen,) = cont.shape + + conts.append(cont) + + padding_len_cont = ( + max(padding_len_cont, contlen) + if padding_len_cont is not None + else contlen + ) + + padding_len_inp = ( + max(padding_len_inp, inplen) + if padding_len_inp is not None + else inplen + ) + + inps.append(inp) # [1, inp_length] + cont_toks_list.append(continuation_enc) + inplens.append(inplen) + + # create encoder attn mask and batched conts, if seq2seq + call_kwargs = {} + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + batched_inps = pad_and_concat( + padding_len_inp, inps, padding_side="right" + ) # [batch, padding_len_inp] + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + # TODO: left-pad encoder inps and mask? + batched_inps = pad_and_concat( + padding_len_inp, inps + ) # [batch, padding_len_inp] + batched_conts = pad_and_concat( + padding_len_cont, conts + ) # [batch, padding_len_cont] + batched_encoder_mask = pad_and_concat( + padding_len_inp, encoder_attns + ) # [batch, padding_len_inp] + call_kwargs = { + "attn_mask": batched_encoder_mask, + "labels": batched_conts, + } + + multi_logits = F.log_softmax( + self._model_call(batched_inps, **call_kwargs), dim=-1 + ) # [batch, padding_length (inp or cont), vocab] + + for (request_str, ctx_tokens, _), logits, inplen, cont_toks in zip( + chunk, multi_logits, inplens, cont_toks_list + ): + # Slice to original seq length + contlen = len(cont_toks) + # take only logits in the continuation + # (discard context toks if decoder-only ; discard right-padding) + # also discards + checks for "virtual tokens" in the causal LM's input window + # from prompt/prefix tuning tokens, if applicable + ctx_len = ( + inplen + (logits.shape[0] - padding_len_inp) + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM + else None + ) + logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len) + logits = logits.unsqueeze(0) # [1, seq, vocab] + + # Check if per-token argmax is exactly equal to continuation + greedy_tokens = logits.argmax(dim=-1) + + # check for one-token continuation cache hits. + # noop in case group_by != "contexts" or no cache hit and returns the + # original args. Otherwise, expands the logits batch dimension and yields each + # batch along with matching continuation tokens and prompt strings. + # logits -> [1, seq, vocab] + for request_str, cont_toks, logits in re_ord.get_cache( + req_str=request_str, + cxt_toks=ctx_tokens, + cont_toks=cont_toks, + logits=logits, + ): + cont_toks = torch.tensor( + cont_toks, dtype=torch.long, device=self.device + ).unsqueeze(0) # [1, seq] + max_equal = (greedy_tokens == cont_toks).all() + + # Obtain log-probs at the corresponding continuation token indices + # last_token_slice = logits[:, -1, :].squeeze(0).tolist() + logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze( + -1 + ) # [1, seq] + + # Answer: (log prob, is-exact-match) + answer = (float(logits.sum()), bool(max_equal)) + + res.append(answer) + + self.cache_hook.add_partial("loglikelihood", request_str, answer) + pbar.update(1) + + pbar.close() + + return re_ord.get_original(res) + + def generate_until( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[str]: + res = [] + + def _collate(req: Tuple[str, dict]): + """Defines the key for the sorted method""" + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + toks = self.tok_encode(req[0]) + return -len(toks), req[0] + + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running generate_until requests", + ) + adaptive_batch_size = None + if self.batch_size == "auto": + # using rolling window with maximum context + print("Passed argument batch_size = auto. Detecting largest batch size") + batch_size = self._detect_batch_size() + print(f"Determined Largest batch size: {batch_size}") + adaptive_batch_size = batch_size + # for each different set of kwargs, we execute all requests, by batch. + batch_size = ( + self.batch_size + if self.batch_size != "auto" + else adaptive_batch_size + if adaptive_batch_size is not None + else 0 + ) + batch_fn = ( + self._batch_scheduler + if self.batch_size == "auto" and not adaptive_batch_size + else None + ) + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + # group_fn=lambda x: x[1] -> x=(context, gen_kwargs) + re_ords = Collator( + [reg.args for reg in requests], + sort_fn=_collate, + group_by="gen_kwargs", + group_fn=lambda x: x[1], + ) + chunks = re_ords.get_batched(n=batch_size, batch_fn=batch_fn) + for chunk in chunks: + contexts, all_gen_kwargs = zip(*chunk) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + gen_kwargs = all_gen_kwargs[0] + # unpack our keyword arguments. + until = None + if isinstance(gen_kwargs, dict): + kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1 + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [until] + elif not isinstance(until, list): + raise ValueError( + f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}" + ) + else: + raise ValueError( + f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}" + ) + # add EOS token to stop sequences + eos = self.tok_decode(self.eot_token_id, skip_special_tokens=False) + if not until: + until = [eos] + else: + until.append(eos) + if "max_gen_toks" in kwargs.keys(): + max_gen_toks = kwargs.pop("max_gen_toks") + else: + max_gen_toks = self.max_gen_toks + + # set the max length in tokens of inputs ("context_enc") + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + # max len for inputs = max length, minus room to generate the max new tokens + max_ctx_len = self.max_length - max_gen_toks + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + # max len for inputs = encoder's whole max_length + max_ctx_len = self.max_length + + # encode, pad, and truncate contexts for this batch + context_enc, attn_masks = self.tok_batch_encode( + contexts, + left_truncate_len=max_ctx_len, + truncation=self.truncation, + ) + context_enc = context_enc.to(self.device) + attn_masks = attn_masks.to(self.device) + + if "max_length" not in kwargs: + kwargs["max_length"] = context_enc.shape[1] + max_gen_toks + + # perform batched generation + cont = self._model_generate( + context=context_enc, + attention_mask=attn_masks, + stop=until, + **kwargs, + ) + + cont_toks_list = cont.tolist() + for cont_toks, context in zip(cont_toks_list, contexts): + # discard context + left-padding toks if using causal decoder-only LM + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + cont_toks = cont_toks[context_enc.shape[1] :] + + s = self.tok_decode(cont_toks) + + # use secondary stop seqs to cut off should-have-been-stopped content post-hoc + for term in until: + if len(term) > 0: + # ignore '' separator, + # for seq2seq case where self.tok_decode(self.eot_token_id) = '' + s = s.split(term)[0] + + res.append(s) + + self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s) + pbar.update(1) + # reorder this group of results back to original unsorted form + res = re_ords.get_original(res) + + pbar.close() + + return res + + def apply_chat_template(self, chat_history: List[Dict[str, str]]) -> str: + """ + Method to apply a chat template to a list of chat history between user and model. + """ + return self.tokenizer.apply_chat_template( + chat_history, tokenize=False, add_generation_prompt=True + ) + + def get_model_info(self) -> dict: + """ + Method to get Hugging Face model information for experiment reproducibility. + """ + + def get_model_num_params(model) -> int: + if hasattr(model, "num_parameters"): + return model.num_parameters() + if hasattr(model, "parameters"): + return sum(p.numel() for p in model.parameters()) + else: + return -1 + + def get_model_dtype(model) -> str: + if hasattr(model, "dtype"): + return model.dtype + else: + return "" + + def get_model_sha(pretrained: str, revision: str) -> str: + try: + model_info = HfApi().model_info(repo_id=pretrained, revision=revision) + return model_info.sha + except Exception as e: + eval_logger.warn( + f"Failed to get model SHA for {pretrained} at revision {revision}. Error: {e}" + ) + return "" + + model_info = { + "model_num_parameters": get_model_num_params(self._model), + "model_dtype": get_model_dtype(self._model), + "model_revision": self.revision, + "model_sha": get_model_sha(self.pretrained, self.revision), + } + if self.peft: + model_info["peft_sha"] = get_model_sha(self.peft, self.revision) + if self.delta: + model_info["delta_sha"] = get_model_sha(self.delta, self.revision) + return model_info diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/mamba_lm.py b/venv/lib/python3.10/site-packages/lm_eval/models/mamba_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..cd9049836838a1dabb2baf383f8e8ce5a02e7391 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/mamba_lm.py @@ -0,0 +1,126 @@ +from typing import Optional, Union + +import torch + +import lm_eval.models.utils +from lm_eval.api.registry import register_model +from lm_eval.models.huggingface import HFLM + + +@register_model("mamba_ssm") +class MambaLMWrapper(HFLM): + def __init__( + self, + pretrained="state-spaces/mamba-130m", + **kwargs, + ) -> None: + """ + Mamba (via the `mamba_ssm` package) supports the following args: + ``` + d_model: int, + n_layer: int, + vocab_size: int, + initializer_cfg=None, + pad_vocab_size_multiple: int = 1, + ssm_cfg=None, + norm_epsilon: float = 1e-5, + rms_norm: bool = False, + initializer_cfg=None, + fused_add_norm=False, + residual_in_fp32=False, + ``` + + See https://github.com/state-spaces/mamba/blob/main/mamba_ssm/models/mixer_seq_simple.py#L175 for more info. + The above can all be passed via `--model_args` or to this __init__() directly + but we recommend placing many of these within the config.json file uploaded alongside your + Mamba model to the HF Hub instead. + All other HuggingFace from_pretrained() kwargs + such as those related to + `parallelize=True`, PEFT, autoGPTQ, + or any sub-configurations of these advanced args, + are unsupported by the `mamba_ssm` package. + + The HFLM arguments + + `backend`, `tokenizer`, `truncation`, `max_length`, + `device`, `dtype`, `batch_size`, `max_batch_size`, `trust_remote_code`, `use_fast_tokenizer` + + Are all supported by Mamba where they do not conflict + with Mamba-specific restrictions such as causal LMs only. + """ + + if "backend" in kwargs: + # mamba currently only supports causal models + assert kwargs["backend"] == "causal" + + super().__init__( + pretrained=pretrained, + # set appropriate defaults for tokenizer, max length, etc + backend=kwargs.pop("backend", "causal"), + tokenizer=kwargs.pop("tokenizer", "EleutherAI/gpt-neox-20b"), + max_length=kwargs.pop("max_length", 2048), + **kwargs, + ) + + def _get_config( + self, + pretrained: str, + **kwargs, + ) -> None: + try: + from mamba_ssm.utils.hf import load_config_hf # noqa: F811 + except ModuleNotFoundError: + raise Exception( + "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ +please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", + ) + + self._config = load_config_hf(pretrained) + + def _create_model( + self, + pretrained: str, + dtype: Optional[Union[str, torch.dtype]] = "float16", + # no `parallelize=True` options + # no PEFT and quantization options + # Mamba does not support arbitrary HF from_pretrained() args + **kwargs, + ) -> None: + try: + from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel # noqa: F811 + except ModuleNotFoundError: + raise Exception( + "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ +please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", + ) + + self._model = MambaLMHeadModel.from_pretrained( + pretrained, + device=self._device, + dtype=torch.float16 + if dtype == "auto" + else lm_eval.models.utils.get_dtype(dtype), + ) + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + for key in ("do_sample", "attention_mask"): + if key in generation_kwargs: + generation_kwargs.pop(key) + + # mamba's custom GenerationMixin currently does not support + # passing stopping criteria. + # for the time being, we simply generate to max length, + # then truncate (equivalent result) + # -- this should be revisited to speed up generation + # stopping_criteria = stop_sequences_criteria( + # self.tokenizer, stop, 1, context.shape[0] + # ) + + return self.model.generate( + input_ids=context, + max_length=max_length, + # stopping_criteria=stopping_criteria, + # pad_token_id=self.tokenizer.pad_token_id, + # use_cache=True, + **generation_kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/nemo_lm.py b/venv/lib/python3.10/site-packages/lm_eval/models/nemo_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1aca1eec18a05725ffb29e15f633078cab699b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/nemo_lm.py @@ -0,0 +1,537 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import pathlib +from copy import deepcopy +from typing import List, Literal + +import filelock +import numpy as np +import torch +from tqdm import tqdm + +from lm_eval.api.instance import Instance +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import Collator +from lm_eval.utils import ( + eval_logger, + get_rolling_token_windows, + make_disjoint_window, + simple_parse_args_string, +) + + +def _patch_pretrained_cfg( + pretrained_cfg, trainer, tensor_model_parallel_size, pipeline_model_parallel_size +): + try: + import omegaconf + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + + omegaconf.OmegaConf.set_struct(pretrained_cfg, True) + with omegaconf.open_dict(pretrained_cfg): + attributes_to_update = { + "sequence_parallel": False, + "activations_checkpoint_granularity": None, + "activations_checkpoint_method": None, + "precision": trainer.precision, + "global_batch_size": None, + "tensor_model_parallel_size": tensor_model_parallel_size, + "pipeline_model_parallel_size": pipeline_model_parallel_size, + "apply_rope_fusion": False, + } + for name, value in attributes_to_update.items(): + if hasattr(pretrained_cfg, name): + pretrained_cfg[name] = value + return pretrained_cfg + + +def _get_target_from_class(target_class) -> str: + return f"{target_class.__module__}.{target_class.__name__}" + + +def load_model( + model_path: str, + trainer, + tensor_model_parallel_size: int, + pipeline_model_parallel_size: int, +) -> torch.nn.Module: + try: + from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import ( + MegatronGPTModel, + ) + from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + model_path = pathlib.Path(model_path) + + save_restore_connector = NLPSaveRestoreConnector() + if model_path.is_dir(): + save_restore_connector.model_extracted_dir = model_path.as_posix() + pretrained_cfg = save_restore_connector.restore_from( + None, model_path.as_posix(), return_config=True, trainer=trainer + ) + if not hasattr(pretrained_cfg, "target"): + pretrained_cfg["target"] = _get_target_from_class(MegatronGPTModel) + + pretrained_cfg = _patch_pretrained_cfg( + pretrained_cfg, + trainer, + tensor_model_parallel_size=tensor_model_parallel_size, + pipeline_model_parallel_size=pipeline_model_parallel_size, + ) + + model_to_load_path = model_path + override_config = pretrained_cfg + + module_name, class_name = override_config.target.rsplit(".", 1) + model_class = getattr(importlib.import_module(module_name), class_name) + + # monkeypatch _build_tokenizer method to be process-safe + tokenizer_lock = filelock.FileLock(f"/tmp/{model_path.name}.tokenizer.lock") + + def _synced_build_tokenizer(self): + with tokenizer_lock: + self._original_build_tokenizer() + + model_class._original_build_tokenizer = model_class._build_tokenizer + model_class._build_tokenizer = _synced_build_tokenizer + + model = model_class.restore_from( + restore_path=model_to_load_path.as_posix(), + trainer=trainer, + override_config_path=override_config, + save_restore_connector=save_restore_connector, + map_location=f"cuda:{trainer.local_rank}", + ) + + model.freeze() + model.training = False + try: + # Have to turn off activations_checkpoint_method for inference + model.model.language_model.encoder.activations_checkpoint_method = None + except AttributeError: + pass + return model + + +def setup_distributed_environment(trainer): + try: + from nemo.utils.app_state import AppState + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + + def dummy(): + return + + if trainer.strategy.launcher is not None: + trainer.strategy.launcher.launch(dummy, trainer=trainer) + trainer.strategy.setup_environment() + + app_state = AppState() + + return app_state + + +@register_model("nemo_lm") +class NeMoLM(LM): + def __init__( + self, + path: str, + max_length: int = 4096, + batch_size: int = 1, + max_gen_toks: int = 256, + devices: int = 1, + num_nodes: int = 1, + tensor_model_parallel_size: int = 1, + pipeline_model_parallel_size: int = 1, + precision: Literal[ + "16-mixed", + "bf16-mixed", + "32-true", + "64-true", + 64, + 32, + 16, + "64", + "32", + "16", + "bf16", + ] = "bf16", + **kwargs, + ): + try: + from nemo.collections.nlp.modules.common.text_generation_utils import ( + generate, + ) + from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy + from pytorch_lightning.trainer.trainer import Trainer + + self.generate = generate + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + + super().__init__() + + if ( + tensor_model_parallel_size == 1 + and pipeline_model_parallel_size == 1 + and devices > 1 + ): + eval_logger.info( + f"The number of data replicas for evaluation is {devices}." + ) + eval_logger.info(f"The total number of devices is {devices}.") + eval_logger.info( + "No tensor parallelism or pipeline parallelism is applied." + ) + + elif tensor_model_parallel_size * pipeline_model_parallel_size == devices: + eval_logger.info( + f"Setting tensor parallelism to {tensor_model_parallel_size} and pipeline parallelism to {pipeline_model_parallel_size}." + ) + eval_logger.info(f"The total number of devices is {devices}.") + eval_logger.info("No data parallelism is applied.") + + else: + raise ValueError( + "Please set the product of tensor_model_parallel_size and pipeline_model_parallel_size" + "equal to the specified number of devices." + ) + + if num_nodes > 1: + raise ValueError( + "A number of nodes greater than 1 is not supported yet. Please set num_nodes as 1." + ) + + trainer = Trainer( + strategy=NLPDDPStrategy(), + devices=devices, + accelerator="gpu", + num_nodes=num_nodes, + precision=precision, + logger=False, + enable_checkpointing=False, + use_distributed_sampler=False, + ) + # Modify the following flags only for data replication + if ( + tensor_model_parallel_size == 1 + and pipeline_model_parallel_size == 1 + and devices > 1 + ): + self._device = torch.device(f"cuda:{trainer.global_rank}") + self._rank = trainer.global_rank + self._world_size = trainer.world_size + self.model = load_model( + path, + trainer, + tensor_model_parallel_size=tensor_model_parallel_size, + pipeline_model_parallel_size=pipeline_model_parallel_size, + ).cuda() + self.tokenizer = self.model.tokenizer + self.app_state = setup_distributed_environment(trainer) + + self._max_length = max_length + self._batch_size = int(batch_size) + self._max_gen_toks = max_gen_toks + + @classmethod + def create_from_arg_string(cls, arg_string, additional_config=None): + args = simple_parse_args_string(arg_string) + if additional_config: + args["batch_size"] = additional_config.get("batch_size", 1) + + return cls(**args) + + @property + def eot_token_id(self): + try: + return self.tokenizer.eos_id + except AttributeError: + return None + + @property + def max_length(self): + return self._max_length + + @property + def max_gen_toks(self): + return self._max_gen_toks + + @property + def batch_size(self): + return self._batch_size + + @property + def device(self): + return self._device + + @property + def rank(self): + return self._rank + + @property + def world_size(self): + return self._world_size + + @property + def accelerator(self): + return self._Accelerator(self.world_size) + + class _Accelerator: + def __init__(self, world_size): + self.world_size = world_size + + def wait_for_everyone(self): + torch.distributed.barrier() + + def gather(self, local_tensor): + gathered_tensors = [ + torch.zeros(1, dtype=local_tensor.dtype).cuda() + for _ in range(self.world_size) + ] + torch.distributed.all_gather(gathered_tensors, local_tensor) + return torch.cat(gathered_tensors) + + def tok_encode(self, string: str): + return self.tokenizer.text_to_ids(string) + + def tok_decode(self, tokens): + return self.tokenizer.ids_to_text(tokens) + + def _encode_pair(self, context, continuation): + n_spaces = len(context) - len(context.rstrip()) + if n_spaces > 0: + continuation = context[-n_spaces:] + continuation + context = context[:-n_spaces] + whole_enc = self.tok_encode(context + continuation) + context_enc = self.tok_encode(context) + context_enc_len = len(context_enc) + continuation_enc = whole_enc[context_enc_len:] + return context_enc, continuation_enc + + def loglikelihood(self, requests): + new_reqs = [] + for context, continuation in [req.args for req in requests]: + if context == "": + # end of text as context + context_enc, continuation_enc = ( + [self.eot_token_id], + self.tok_encode(continuation), + ) + else: + context_enc, continuation_enc = self._encode_pair(context, continuation) + + new_reqs.append(((context, continuation), context_enc, continuation_enc)) + + return self._loglikelihood_tokens(new_reqs) + + def loglikelihood_rolling( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): + rolling_token_windows = list( + map( + make_disjoint_window, + get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.eot_token_id, + max_seq_len=self.max_length - 1, + context_len=1, + ), + ) + ) + + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + ) + + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + return loglikelihoods + + def _loglikelihood_tokens(self, requests, disable_tqdm=False): + res = [] + + def _collate(x): + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = Collator(requests, sort_fn=_collate) + chunks = re_ord.get_batched(n=self.batch_size, batch_fn=None) + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running loglikelihood requests", + ) + for chunk in chunks: + inps = [] + ctxlens = [] + contlens = [] + + for _, context_enc, continuation_enc in chunk: + # Leave one token for generation. Tokens_to_generate = 0 breaks NeMo. + inp = (context_enc + continuation_enc)[-(self.max_length - 1) :] + + ctxlen = len(context_enc) - max( + 0, len(context_enc) + len(continuation_enc) - (self.max_length - 1) + ) + ctxlens.append(ctxlen) + contlens.append(len(continuation_enc)) + + inps.append(self.tok_decode(inp)) + + output = self.generate( + self.model, + inputs=inps, + tokens_to_generate=1, + min_tokens_to_generate=1, + compute_logprob=True, + all_probs=True, + ) + + batch_token_ids = np.asarray(output["token_ids"])[:, :-1] + batch_logprobs = output["logprob"][:, :-1] + batch_full_logprob = output["full_logprob"][:, :-1, :] + + # Compute greedy tokens for entire batch rather than calling it with proper ctxlen for each sample. + # Additional tokens for each sample will be trimmed later. + min_ctxlen = min(ctxlens) + + # Use min_ctxlen-1 instead of min_ctxlen since full_logprobs are not returns for the first token. + batch_greedy_tokens = ( + torch.argmax(batch_full_logprob[:, min_ctxlen - 1 :, :], -1) + .cpu() + .numpy() + ) + + for token_ids, greedy_tokens, logprobs, ctxlen, contlen, ( + cache_key, + _, + _, + ) in zip( + batch_token_ids, + batch_greedy_tokens, + batch_logprobs, + ctxlens, + contlens, + chunk, + ): + # Trim at contlen since shorter contexts in a batch will have more than one token generated. + # Use ctxlen-1 instead of ctxlen same as for full_logprob in batch_greedy_tokens calculation + logprobs = (logprobs[ctxlen - 1 :])[:contlen] + logprob = sum(logprobs).tolist() + + continuation_tokens = (token_ids[ctxlen:])[:contlen] + len_diff = ctxlen - min_ctxlen + is_greedy = continuation_tokens == (greedy_tokens[len_diff:])[:contlen] + if not isinstance(is_greedy, bool): + is_greedy = is_greedy.all() + answer = (logprob, is_greedy) + + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + + res.append(answer) + pbar.update(1) + + pbar.close() + + return re_ord.get_original(res) + + def generate_until(self, requests): + if not requests: + return [] + res = [] + + def get_until(req_args): + until = req_args.get("until", []) + until = deepcopy(until) # prevent from modifying req_args for cache_key + if self.tokenizer.ids_to_tokens([self.eot_token_id])[0] not in until: + until.append(self.tokenizer.ids_to_tokens([self.eot_token_id])[0]) + return until + + def _collate(x): + toks = self.tok_encode(x[0]) + return len(toks), x[0] + + re_ords = Collator( + [reg.args for reg in requests], sort_fn=_collate, group_by="gen_kwargs" + ) + chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None) + for chunk in chunks: + contexts, all_gen_kwargs = zip(*chunk) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + req_args = all_gen_kwargs[0] + # unpack our keyword arguments. + until = get_until(req_args) + max_gen_toks = req_args.get("max_gen_toks", self.max_gen_toks) + + remaining_length = self.max_length - max_gen_toks + contexts = [] + for context, _ in chunk: + encoded_context = self.tok_encode(context) + encoded_context = encoded_context[-remaining_length:] + contexts.append(self.tok_decode(encoded_context)) + + output = self.generate( + self.model, + inputs=contexts, + tokens_to_generate=max_gen_toks, + end_strings=until, + greedy=True, + ) + + answers = output["sentences"] + + continuations = [] + for context, answer in zip(contexts, answers): + continuations.append(answer[len(context) :]) + + for term in until: + continuations = [answer.split(term)[0] for answer in continuations] + + for request, answer in zip(chunk, continuations): + self.cache_hook.add_partial("greedy_until", request, answer) + res.append(answer) + + return re_ords.get_original(res) diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/neuralmagic.py b/venv/lib/python3.10/site-packages/lm_eval/models/neuralmagic.py new file mode 100644 index 0000000000000000000000000000000000000000..7c16b06d50b2b8117cf0b6d6b33d9d4a2b681923 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/neuralmagic.py @@ -0,0 +1,426 @@ +import copy +from typing import List, Optional, Tuple, Union + +import numpy +import transformers +from tqdm import tqdm + +import lm_eval.models.utils +from lm_eval import utils +from lm_eval.api.instance import Instance +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.huggingface import HFLM + + +eval_logger = utils.eval_logger + + +@register_model("sparseml") +class SparseMLLM(HFLM): + """ + SparseML is an open-source model optimization toolkit that enables you to create + inference-optimized sparse models using pruning, quantization, and distillation + algorithms. Models optimized with SparseML can then be exported to the ONNX format and + deployed with DeepSparse for GPU-class performance on CPU hardware. + + This class is a wrapper around the HuggingFace LM class to enable SparseML + integration with the lm-evaluation-harness. + """ + + def _create_model( + self, + pretrained: str, + revision: Optional[str] = "main", + dtype: Optional[str] = "auto", + trust_remote_code: Optional[bool] = False, + **kwargs, + ) -> None: + try: + from sparseml.transformers import SparseAutoModelForCausalLM + except ModuleNotFoundError: + raise Exception( + "Package `sparseml` is not installed. " + "Please install it via `pip install sparseml[transformers]`" + ) + + model_kwargs = kwargs if kwargs else {} + + if "device_map" not in model_kwargs: + # set a device_map to initialize model on the right GPU. + # this is needed because it seems that the default behavior + # for quantized models now seems to be device_map="auto" + # which breaks data-parallel mode. + if hasattr(self, "accelerator"): + model_kwargs.update( + {"device_map": {"": f"cuda:{self.accelerator.local_process_index}"}} + ) + else: + model_kwargs.update({"device_map": {"": str(self.device)}}) + + relevant_kwarg_names = [ + "offload_folder", + "device_map", + ] + relevant_kwargs = { + k: v for k, v in model_kwargs.items() if k in relevant_kwarg_names + } + + # Log the difference between model_kwargs and relevant_kwargs so we can see + # what is being ignored + ignored_kwargs = {} + for k, v in model_kwargs.items(): + if k not in relevant_kwargs.keys(): + ignored_kwargs[k] = v + eval_logger.warning( + f"The sparseml integration is ignoring the following kwargs that are specified: {ignored_kwargs}" + ) + + model = SparseAutoModelForCausalLM.from_pretrained( + pretrained, + revision=revision, + torch_dtype=lm_eval.models.utils.get_dtype(dtype), + trust_remote_code=trust_remote_code, + **relevant_kwargs, + ) + self._model = model + + def _get_config(self, pretrained: str, **kwargs) -> None: + try: + from sparseml.transformers import SparseAutoConfig + except ModuleNotFoundError: + raise Exception( + "Package `sparseml` is not installed. " + "Please install it via `pip install sparseml[transformers]`" + ) + + self._config = SparseAutoConfig.from_pretrained( + pretrained_model_name_or_path=pretrained, **kwargs + ) + + def _create_tokenizer( + self, + pretrained: Union[str, transformers.PreTrainedModel], + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ], + **kwargs, + ) -> None: + try: + from sparseml.transformers import SparseAutoTokenizer + except ModuleNotFoundError: + raise Exception( + "Package `sparseml` is not installed. " + "Please install it via `pip install sparseml[transformers]`" + ) + + if tokenizer: + if isinstance(tokenizer, str): + self.tokenizer = SparseAutoTokenizer.from_pretrained( + tokenizer, + **kwargs, + ) + else: + assert isinstance( + tokenizer, transformers.PreTrainedTokenizer + ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) + self.tokenizer = tokenizer + else: + # Get tokenizer based on 'pretrained' + if isinstance(pretrained, str): + model_name = pretrained + else: + # get the HF hub name via accessor on model + model_name = self.model.name_or_path + self.tokenizer = SparseAutoTokenizer.from_pretrained( + model_name, + **kwargs, + ) + return None + + +@register_model("deepsparse") +class DeepSparseLM(LM): + """ + Wrapper around DeepSparse, a sparsity-aware deep learning + inference runtime for CPUs, to make it compatible with the + lm-evaluation-harness. + """ + + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + pretrained: str, + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ] = None, + batch_size: Optional[Union[int, str]] = 1, + max_gen_toks: Optional[int] = 256, + max_length: Optional[int] = None, + ): + super().__init__() + + try: + import deepsparse + except ModuleNotFoundError: + raise Exception( + "Package `deepsparse` is not installed. " + "Please install it via `pip install deepsparse[transformers]`" + ) + + if isinstance(batch_size, str) and not batch_size.isdigit(): + eval_logger.warning( + f"batch_size={batch_size} is not valid for deepsparse because it is not an integer. " + "Ignoring and using the default of 1." + ) + batch_size = 1 + + self.batch_size = int(batch_size) + self._max_length = max_length if max_length else self._DEFAULT_MAX_LENGTH + self._max_gen_toks = max_gen_toks + self.batch_sizes = {} + + # Initialize new model and tokenizer instances + self.model = deepsparse.TextGeneration( + model_path=pretrained, + sequence_length=self._max_length, + batch_size=batch_size, + ) + self.tokenizer = tokenizer if tokenizer else self.model.tokenizer + self.config = self.model.config + + def tok_encode(self, string: str) -> List[int]: + return self.tokenizer.encode(string) + + def tok_decode(self, tokens: List[int]) -> str: + return self.tokenizer.decode(tokens) + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + if self.tokenizer.bos_token_id is not None: + return self.tokenizer.bos_token_id + return self.tokenizer.eos_token_id + + @property + def max_length(self) -> int: + return self._max_length + + @property + def max_gen_toks(self) -> int: + return self._max_gen_toks + + def loglikelihood(self, requests) -> List[Tuple[float, bool]]: + """ + Copied directly from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py + """ + new_reqs = [] + for context, continuation in [req.args for req in requests]: + if context == "": + raise NotImplementedError( + "Implementing empty context is not supported yet" + ) + context_enc, continuation_enc = self._encode_pair(context, continuation) + + new_reqs.append(((context, continuation), context_enc, continuation_enc)) + + return self._loglikelihood_tokens(new_reqs) + + def _loglikelihood_tokens( + self, + requests: List[Tuple[Tuple[str, str], List[int], List[int]]], + disable_tqdm: bool = False, + ) -> List[Tuple[float, bool]]: + """ + The function to compute the loglikelihood of the continuation + tokens given the context tokens. + + This function is an adapted version of the original function from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py + """ + res = [] + + def _collate(x): + """Defines the key for the sorted method""" + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = utils.Reorderer(requests, _collate) + + for chunk in tqdm( + list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)), + disable=disable_tqdm, + ): + batch_inp = [] + batch_cache_key = [] + batch_continuation_enc = [] + # len(chunk) is the batch_size + for cache_key, context_enc, continuation_enc in chunk: + # how this all works (illustrated on a causal decoder-only setup): + # CTX CONT + # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1] + # model \ \ + # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the + # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice # noqa: E501 + + inp = (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1] + + batch_inp.append(self.tokenizer.decode(inp)) + batch_cache_key.append(cache_key) + batch_continuation_enc.append(continuation_enc) + + response = self.model( + prompt=batch_inp, + max_new_tokens=0, + output_scores=True, + include_prompt_logits=True, + ) + + for resp, continuation_enc, cache_key in zip( + response.generations, batch_continuation_enc, batch_cache_key + ): + # (seq_len, vocab_size) + multi_scores = resp.score + + from deepsparse.utils.data import numpy_log_softmax + + # (seq_len, vocab_size) but with softmax applied + multi_logits = numpy_log_softmax(multi_scores, axis=1) + # toss out the context half of the sequence + # (cont_len, vocab_size) + continuation_multi_logits = multi_logits[-len(continuation_enc) :] + + # pick out the logits for the continuation tokens + # (cont_len,) + continuation_logits = continuation_multi_logits[ + numpy.arange(len(continuation_enc)), continuation_enc + ] + # check if the tokens generated greedly are the same + # as the expected continuation + greedy_tokens = continuation_multi_logits.argmax(axis=1) + max_equal = greedy_tokens.tolist() == continuation_enc + + # Answer: (log prob, is-exact-match) + answer = (float(continuation_logits.sum()), bool(max_equal)) + + res.append(answer) + + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + + return re_ord.get_original(res) + + def loglikelihood_rolling(self, requests: List[Instance]) -> List[float]: + raise NotImplementedError( + "The method not required by any of our current task integrations so far" + ) + + def generate_until(self, requests: List[Instance]) -> List[str]: + """ + The function to generate a certain number of new tokens + given a context. + + This function is an adapted version of the original function from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/openai_completions.py + """ + if not requests: + return [] + res = [] + requests = [req.args for req in requests] + + def _collate(x): + toks = self.tok_encode(x[0]) + return len(toks), x[0] + + re_ord = utils.Reorderer(requests, _collate) + + def sameuntil_chunks(xs, size): + ret = [] + lastuntil = xs[0][1] + for x in xs: + if len(ret) >= size or x[1] != lastuntil: + yield ret, lastuntil + ret = [] + lastuntil = x[1] + ret.append(x) + + if ret: + yield ret, lastuntil + + pbar = tqdm(total=len(requests)) + for chunk, request_args in tqdm( + list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)) + ): + inps = [] + + # make a deepcopy since we are changing arguments + request_args = copy.deepcopy(request_args) + + self._max_gen_toks = request_args.pop("max_gen_toks", self.max_gen_toks) + + for context, _ in chunk: + # add context (prompts) to the list + inps.append(context) + + until = request_args.pop("until", ["<|endoftext|>"]) + request_args.pop("do_sample", None) + request_args["temperature"] = request_args.get("temperature", 0) + + # run inference (generate max_gen_toks tokens) + out = self.model( + sequences=inps, + max_new_tokens=self.max_gen_toks - 1, + stop=until, + **request_args, + ) + + for resp, (context, args_) in zip(out.generations, chunk): + text = resp.text + until_ = until + # split the text at the first occurrence of any of the until tokens + for term in until_: + if len(term) > 0: + text = text.split(term)[0] + + res.append(text) + + self.cache_hook.add_partial( + "generate_until", (context, {"until": until_}), text + ) + pbar.update(1) + + pbar.close() + + return re_ord.get_original(res) + + def _encode_pair( + self, context: str, continuation: str + ) -> Tuple[List[int], List[int]]: + """ + Copied directly from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py + """ + n_spaces = len(context) - len(context.rstrip()) + if n_spaces > 0: + continuation = context[-n_spaces:] + continuation + context = context[:-n_spaces] + whole_enc = self.tok_encode(context + continuation) + context_enc = self.tok_encode(context) + context_enc_len = len(context_enc) + continuation_enc = whole_enc[context_enc_len:] + return context_enc, continuation_enc diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/neuron_optimum.py b/venv/lib/python3.10/site-packages/lm_eval/models/neuron_optimum.py new file mode 100644 index 0000000000000000000000000000000000000000..e3f53bcf5fd8c19a7dcf517edf23ab0394064efc --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/neuron_optimum.py @@ -0,0 +1,736 @@ +import copy +import json +import logging +import subprocess +from collections import defaultdict +from typing import List, Optional, Union + +import torch +import torch.nn.functional as F +import transformers +from packaging import version +from tqdm import tqdm +from transformers import GenerationConfig +from transformers.generation import StoppingCriteriaList + +import lm_eval.models.utils +from lm_eval import utils +from lm_eval.api.model import TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import stop_sequences_criteria + + +try: + NEURON_AVAILABLE = True + from optimum.neuron import NeuronModelForCausalLM + from optimum.neuron.generation import TokenSelector + from optimum.neuron.version import __version__ as optimum_neuron_version +except ImportError: + NeuronModelForCausalLM = object + NEURON_AVAILABLE = False + + +logger = logging.getLogger(__name__) + + +def get_nc_count() -> Union[int, None]: + """Returns the number of neuron cores on the current instance.""" + try: + cmd = "neuron-ls --json-output" + result = subprocess.run(cmd, shell=True, capture_output=True) + print(f"inferring nc_count from `neuron-ls` {result.stdout}") + json_output = json.loads(result.stdout) + count = sum([x["nc_count"] for x in json_output]) + print(f"nc_count={count}") + return count + except Exception: + return None + + +def wrap_constant_batch_size(func): + def _decorator(self, input_ids): + """input_ids a 2D array with batch_size on dim=0 + + makes sure the func runs with self.batch_size + """ + # access a from TestSample + batch_size = input_ids.shape[0] + + if batch_size < self.batch_size: + # handle the event of input_ids.shape[0] != batch_size + # Neuron cores expect constant batch_size + input_ids = torch.concat( + ( + input_ids, + # add missing_batch_size dummy + torch.zeros( + [self.batch_size - batch_size, *input_ids.size()[1:]], + dtype=input_ids.dtype, + device=input_ids.device, + ), + ), + dim=0, + ) + elif batch_size > self.batch_size: + raise ValueError( + f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})" + ) + # return the forward pass that requires constant batch size + return func(self, input_ids)[:batch_size] + + return _decorator + + +class CustomNeuronModelForCausalLM(NeuronModelForCausalLM): + """NeuronModelForCausalLM with `stopping_criteria` in `generate`""" + + def generate( + self, + input_ids: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + stopping_criteria: Optional["StoppingCriteriaList"] = None, + generation_config: Optional["GenerationConfig"] = None, + **kwargs, + ) -> torch.LongTensor: + r""" + A streamlined generate() method overriding the transformers.GenerationMixin.generate() method. + + This method uses the same logits processors/warpers and stopping criteria as the transformers library + `generate()` method but restricts the generation to greedy search and sampling. + + It does not support transformers `generate()` advanced options. + + Please refer to https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.GenerationMixin.generate + for details on generation configuration. + + Parameters: + input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. + generation_config (`~transformers.generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~transformers.generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + + Returns: + `torch.Tensor`: A `torch.FloatTensor`. + """ + # The actual generation configuration is a combination of config and parameters + generation_config = copy.deepcopy( + self.generation_config if generation_config is None else generation_config + ) + model_kwargs = generation_config.update( + **kwargs + ) # All unused kwargs must be model kwargs + # Check model kwargs are actually used by either prepare_inputs_for_generation or forward + self._validate_model_kwargs(model_kwargs) + + # Instantiate a TokenSelector for the specified configuration + selector = TokenSelector.create( + input_ids, generation_config, self, self.max_length + ) + selector.stopping_criteria.append(stopping_criteria) + # Verify that the inputs are compatible with the model static input dimensions + batch_size, sequence_length = input_ids.shape + if sequence_length > self.max_length: + raise ValueError( + f"The input sequence length ({sequence_length}) exceeds the model static sequence length ({self.max_length})" + ) + padded_input_ids = input_ids + padded_attention_mask = attention_mask + if batch_size > self.batch_size: + raise ValueError( + f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})" + ) + elif batch_size < self.batch_size: + logger.warning( + "Inputs will be padded to match the model static batch size. This will increase latency." + ) + padding_shape = [self.batch_size - batch_size, sequence_length] + padding = torch.full( + padding_shape, fill_value=self.config.eos_token_id, dtype=torch.int64 + ) + padded_input_ids = torch.cat([input_ids, padding]) + if attention_mask is not None: + padding = torch.zeros(padding_shape, dtype=torch.int64) + padded_attention_mask = torch.cat([attention_mask, padding]) + # Drop the current generation context and clear the Key/Value cache + self.reset_generation() + + output_ids = self.generate_tokens( + padded_input_ids, + selector, + batch_size, + attention_mask=padded_attention_mask, + **model_kwargs, + ) + return output_ids[:batch_size, :] + + +@register_model("neuronx") +class NEURON_HF(TemplateLM): + """ + Enables usage with on AWS Neuron + using the HuggingFace Transformers + Transformers neuronx library. + Tested with neuron 2.17.0 + """ + + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + pretrained: Optional[str] = "TinyLlama/TinyLlama-1.1B-Chat-v1.0", + revision: Optional[str] = "main", + tp_degree: Optional[int] = None, + subfolder: Optional[str] = None, + tokenizer: Optional[str] = None, + truncation: Optional[bool] = False, + max_length: Optional[int] = None, + dtype: Optional[Union[str, torch.dtype]] = "auto", + batch_size: Optional[int] = 1, + low_cpu_mem_usage: Optional[bool] = True, + trust_remote_code: Optional[bool] = False, + use_fast_tokenizer: Optional[bool] = True, + add_bos_token: Optional[bool] = False, + ) -> None: + if not NEURON_AVAILABLE: + raise Exception( + "Tried to load neuron model, but neuron is not installed ", + "please install neuron via pip install transformers-neuron ", + "also make sure you are running on an AWS inf2 instance", + ) + if version.parse(optimum_neuron_version) != version.parse("0.0.17"): + logger.warning( + '`optimum-neuron` model requires `pip install "optimum[neuronx]>=0.0.17" ' + "preferably using the Hugging Face Neuron Deep Learning AMI (Ubuntu 22.04) " + "https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2 " + f"You are using optimum-neuron={optimum_neuron_version}" + ) + super().__init__() + + assert isinstance(pretrained, str) + assert isinstance(batch_size, (int, str)) + + self.batch_size_per_gpu = int(batch_size) + batch_size = int(batch_size) + if tp_degree is None: + # execute `neuron-ls --json-output | jq '.[0].nc_count'`` + # to get the number of neuron cores on your instance + tp_degree = get_nc_count() + + assert isinstance(tp_degree, int), ( + f"model_args must include tp_degree. tp_degree must be set to an integer," + f" but is tp_degree=`{tp_degree}` with type=`{type(tp_degree)}`." + "Set it to number of neuron cores on your instance." + " For inf2.xlarge and inf2.8xlarge, set it to `2`." + " For inf2.24xlarge, set it to `12`." + " For inf2.48xlarge, set it to `24`." + ) + + # TODO: update this to be less of a hack once subfolder is fixed in HF + revision = revision + ("/" + subfolder if subfolder is not None else "") + + self._config = transformers.AutoConfig.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + ) + torch_dtype = lm_eval.models.utils.get_dtype(dtype) + + assert torch_dtype in [ + torch.float16, + torch.bfloat16, + ], "Only float16 and bfloat16 are supported" + + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + pretrained if tokenizer is None else tokenizer, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + + # Neuron specific code + if torch_dtype == torch.float16: + self.amp_dtype = "f16" + elif torch_dtype == torch.bfloat16: + self.amp_dtype = "bf16" + elif torch_dtype == torch.float32: + self.amp_dtype = "f32" + else: + raise NotImplementedError("Only float16 and bfloat16 are implemented.") + + compiler_args = {"num_cores": tp_degree, "auto_cast_type": self.amp_dtype} + input_shapes = { + "batch_size": batch_size, + "sequence_length": self._DEFAULT_MAX_LENGTH, + } + + print( + f"{'='*20} \n loading model to neuron with" + f" {compiler_args}, {input_shapes}..." + ) + self.model = CustomNeuronModelForCausalLM.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + low_cpu_mem_usage=low_cpu_mem_usage, + export=True, + **compiler_args, + **input_shapes, + ) + print(f"SUCCESS: neuron model compiled. \n {'='*20}") + + self.truncation = truncation + + self.vocab_size = self.tokenizer.vocab_size + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + self.add_bos_token = add_bos_token + + self._max_length = max_length + + self.batch_schedule = 1 + self.batch_sizes = {} + + @property + def config(self): + # return the associated transformers.AutoConfig for the given pretrained model. + return self._config + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + return self.tokenizer.bos_token_id or self.tokenizer.eos_token_id + + @property + def max_length(self): + if self._max_length: # if max length manually set, return it + return self._max_length + seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") + for attr in seqlen_config_attrs: + if hasattr(self.model.config, attr): + return getattr(self.model.config, attr) + if hasattr(self.tokenizer, "model_max_length"): + if self.tokenizer.model_max_length == 1000000000000000019884624838656: + return self._DEFAULT_MAX_LENGTH + return self.tokenizer.model_max_length + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + return self.batch_size_per_gpu + + @property + def device(self): + """device are neuron cores, but the created tensors are on CPU.""" + return "cpu" + + @property + def rank(self): + return 0 + + @property + def world_size(self): + return 1 + + def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None): + """ """ + if add_special_tokens is None: + add_special_tokens = False or self.add_bos_token + + encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens) + + # left-truncate the encoded context to be at most `left_truncate_len` tokens long + if left_truncate_len: + encoding = encoding[-left_truncate_len:] + + return encoding + + def tok_batch_encode( + self, + strings: List[str], + padding_side: str = "left", + left_truncate_len: int = None, + truncation: bool = False, + ): + # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. + old_padding_side = self.tokenizer.padding_side + self.tokenizer.padding_side = padding_side + + add_special_tokens = False or self.add_bos_token + + encoding = self.tokenizer( + strings, + truncation=truncation, + padding="longest", + return_tensors="pt", + add_special_tokens=add_special_tokens, + ) + if left_truncate_len: + encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:] + encoding["attention_mask"] = encoding["attention_mask"][ + :, -left_truncate_len: + ] + self.tokenizer.padding_side = old_padding_side + + return encoding["input_ids"], encoding["attention_mask"] + + def tok_decode(self, tokens): + return self.tokenizer.decode(tokens) + + @wrap_constant_batch_size + def _model_call(self, input_ids: torch.Tensor): + """ + get logits for the entire sequence + + :param input_ids: torch.Tensor + A torch tensor of shape [batch, sequence_cont] + the size of sequence may vary from call to call + :return + A torch tensor of shape [batch, sequence, vocab] with the + logits returned from the model's decoder-lm head + """ + _, sequence_length = input_ids.shape + + with torch.inference_mode(): + cache_ids = torch.arange(0, sequence_length, dtype=torch.int32).split(1) + input_ids_split = input_ids.split(1, dim=1) + + return torch.concat( + [ + self.model.forward( + input_ids=input_id, cache_ids=cache_id, return_dict=False + )[0] + for input_id, cache_id in zip(input_ids_split, cache_ids) + ], + dim=1, + ) + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + # we require users to pass do_sample=True explicitly + # for non-greedy gen. This should be reevaluated when considering beam search. + + with torch.inference_mode(): + if "do_sample" not in generation_kwargs.keys(): + generation_kwargs["do_sample"] = False + + stopping_criteria = stop_sequences_criteria( + self.tokenizer, + stop + [self.tokenizer.decode([self.config.eos_token_id])], + 1, + context.shape[0], + ) + + return self.model.generate( + input_ids=context, + max_length=max_length, + stopping_criteria=stopping_criteria, + pad_token_id=self.eot_token_id, + use_cache=True, + **generation_kwargs, + ) + + def _select_cont_toks(self, logits, contlen=None, inplen=None): + assert ( + contlen and inplen + ), "Must pass input len and cont. len to select scored logits for causal LM" + # discard right-padding. + # also discard the input/context tokens. we'll only score continuations. + logits = logits[inplen - contlen : inplen] + + return logits + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + loglikelihoods = [] + + adaptive_batch_size = None + + for (string,) in tqdm( + [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0)) + ): + rolling_token_windows = list( + map( + utils.make_disjoint_window, + utils.get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.prefix_token_id, + max_seq_len=self.max_length, + context_len=1, + ), + ) + ) + + # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + pad_amnt = 0 + if self.world_size > 1: + # We pad out the external document-level iterator so the inner iterator doesn't hang + mytensor = torch.tensor(len(rolling_token_windows), device=self.device) + gathered = ( + self.accelerator.gather(mytensor).cpu().detach().numpy().tolist() + ) + + pad_amnt = max(gathered) - gathered[self.rank] + if pad_amnt > 0: + rolling_token_windows += pad_amnt * [rolling_token_windows[0]] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + disable_tqdm=True, + override_bs=adaptive_batch_size, + ) + + if (self.world_size > 1) and (pad_amnt > 0): + string_nll = [x[0] for x in string_nll[:-pad_amnt]] + else: + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + + return loglikelihoods + + def _loglikelihood_tokens( + self, requests, disable_tqdm: bool = False, override_bs=None + ): + # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context + res = [] + + def _collate(x): + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = utils.Reorderer(requests, _collate) + + n_reordered_requests = len(re_ord.get_reordered()) # noqa + # automatic (variable) batch size detection for vectorization + # pull longest context sample from request + + chunks = lm_eval.models.utils.chunks( + re_ord.get_reordered(), + n=self.batch_size, + fn=None, + ) + + for chunk in tqdm(chunks, disable=(disable_tqdm or (self.rank != 0))): + inps = [] + cont_toks_list = [] + inplens = [] + + conts = [] # noqa + encoder_attns = [] # noqa + + padding_len_inp = None + padding_len_cont = None # noqa + # because vectorizing is annoying, we first convert each (context, continuation) pair to padded + # tensors, then we pack them together into a batch, call the model, and then pick it all apart + # again because vectorizing is annoying + + for _, context_enc, continuation_enc in chunk: + # sanity check + assert len(context_enc) > 0 + assert len(continuation_enc) > 0 + assert len(continuation_enc) <= self.max_length + + # how this all works (illustrated on a causal decoder-only setup): + # CTX CONT + # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1] + # model \ \ + # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the + # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice + + # when too long to fit in context, truncate from the left + inp = torch.tensor( + (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1], + dtype=torch.long, + device=self.device, + ) + (inplen,) = inp.shape + + padding_len_inp = ( + max(padding_len_inp, inplen) + if padding_len_inp is not None + else inplen + ) + + inps.append(inp) # [1, inp_length] + cont_toks_list.append(continuation_enc) + inplens.append(inplen) + + # create encoder attn mask and batched conts, if seq2seq + call_kwargs = {} + batched_inps = lm_eval.models.utils.pad_and_concat( + padding_len_inp, inps, padding_side="right" + ) # [batch, padding_len_inp] + + multi_logits = F.log_softmax( + self._model_call(batched_inps, **call_kwargs), dim=-1 + ) # [batch, padding_length (inp or cont), vocab] + + for (cache_key, _, _), logits, inplen, cont_toks in zip( + chunk, multi_logits, inplens, cont_toks_list + ): + # Slice to original seq length + contlen = len(cont_toks) + # take only logits in the continuation + # (discard context toks if decoder-only ; discard right-padding) + # also discards + checks for "virtual tokens" in the causal LM's input window + # from prompt/prefix tuning tokens, if applicable + ctx_len = inplen + (logits.shape[0] - padding_len_inp) + logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len) + logits = logits.unsqueeze(0) # [1, seq, vocab] + + # Check if per-token argmax is exactly equal to continuation + greedy_tokens = logits.argmax(dim=-1) + cont_toks = torch.tensor( + cont_toks, dtype=torch.long, device=self.device + ).unsqueeze(0) # [1, seq] + max_equal = (greedy_tokens == cont_toks).all() + + # Obtain log-probs at the corresponding continuation token indices + # last_token_slice = logits[:, -1, :].squeeze(0).tolist() + logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze( + -1 + ) # [1, seq] + + # Answer: (log prob, is-exact-match) + answer = (float(logits.sum()), bool(max_equal)) + + res.append(answer) + + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + + return re_ord.get_original(res) + + def generate_until(self, requests, disable_tqdm: bool = False): + res = defaultdict(list) + re_ords = {} + + def _collate(x): + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + toks = self.tok_encode(x[0]) + return -len(toks), x[0] + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1])) + for key, reqs in grouper.get_grouped().items(): + # within each set of reqs for given kwargs, we reorder by token length, descending. + re_ords[key] = utils.Reorderer([req.args for req in reqs], _collate) + + pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0))) + + # for each different set of kwargs, we execute all requests, by batch. + for key, re_ord in re_ords.items(): + chunks = lm_eval.models.utils.chunks( + re_ord.get_reordered(), n=self.batch_size + ) + for chunk in tqdm(chunks, disable=self.rank != 0): + contexts, all_gen_kwargs = zip(*chunk) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + gen_kwargs = all_gen_kwargs[0] + # unpack our keyword arguments. + until = None + if isinstance(gen_kwargs, dict): + kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1 + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [until] + elif not isinstance(until, list): + raise ValueError( + f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}" + ) + else: + raise ValueError( + f"Expected `kwargs` to be of type `dict` but got {kwargs}" + ) + # add EOS token to stop sequences + eos = self.tok_decode(self.eot_token_id) + if not until: + until = [eos] + else: + until.append(eos) + if "max_gen_toks" in kwargs.keys(): + max_gen_toks = kwargs.pop("max_gen_toks") + else: + max_gen_toks = self.max_gen_toks + # first stop sequence is used to halt generation upon encountering + primary_until = [until[0]] + + max_ctx_len = self.max_length - max_gen_toks + + # encode, pad, and truncate contexts for this batch + context_enc, attn_masks = self.tok_batch_encode( + contexts, + left_truncate_len=max_ctx_len, + truncation=self.truncation, + ) + context_enc = context_enc.to(self.device) + attn_masks = attn_masks.to(self.device) + + if "max_length" not in kwargs: + kwargs["max_length"] = context_enc.shape[1] + max_gen_toks + + # perform batched generation + cont = self._model_generate( + context=context_enc, + attention_mask=attn_masks, + stop=primary_until, + **kwargs, + ) + + cont_toks_list = cont.tolist() + for cont_toks, context in zip(cont_toks_list, contexts): + # discard context + left-padding toks if using causal decoder-only LM + cont_toks = cont_toks[context_enc.shape[1] :] + + s = self.tok_decode(cont_toks) + + # use secondary stop seqs to cut off should-have-been-stopped content post-hoc + for term in until: + if len(term) > 0: + # ignore '' separator, + # for seq2seq case where self.tok_decode(self.eot_token_id) = '' + s = s.split(term)[0] + + res[key].append(s) + + self.cache_hook.add_partial( + "generate_until", (context, gen_kwargs), s + ) + pbar.update(1) + # reorder this group of results back to original unsorted form + res[key] = re_ord.get_original(res[key]) + + pbar.close() + + return grouper.get_original(res) diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/openai_completions.py b/venv/lib/python3.10/site-packages/lm_eval/models/openai_completions.py new file mode 100644 index 0000000000000000000000000000000000000000..492c41fb3144c9ca0df2f0d503e2ac2a851ffc52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/openai_completions.py @@ -0,0 +1,478 @@ +import copy +import os +from collections import defaultdict +from importlib.util import find_spec +from typing import List, Literal, Optional, Tuple + +from tqdm import tqdm + +import lm_eval.models.utils +from lm_eval import utils +from lm_eval.api.model import LM, TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import retry_on_specific_exceptions +from lm_eval.utils import eval_logger + + +def get_result(response) -> Tuple[float, bool]: + """Process results from OpenAI API response. + + :param response: dict + OpenAI API Response + :return: + continuation_logprobs: np.array + Log probabilities of continuation tokens + is_greedy: bool + whether argmax matches given continuation exactly + """ + is_greedy = True + logprobs = response.logprobs.token_logprobs + continuation_logprobs = sum(logprobs) + + for i in range(len(response.logprobs.token_logprobs)): + token = response.logprobs.token_logprobs[i] + top_tokens = response.logprobs.top_logprobs[i] + top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x]) + if top_token != token: + is_greedy = False + break + + return continuation_logprobs, is_greedy + + +def oa_completion(client, chat: bool = False, **kwargs): + """Query OpenAI API for completion. + + Retry with back-off until they respond + """ + if not find_spec("openai") or not find_spec("tiktoken"): + raise Exception( + "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. " + "Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`" + ) + else: + import openai + + def _exception_callback(e: Exception, sleep_time: float) -> None: + import traceback + + traceback.print_exc() + + @retry_on_specific_exceptions( + on_exceptions=[openai.OpenAIError], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def completion(): + if chat: + return client.chat.completions.create(**kwargs) + else: + return client.completions.create(**kwargs) + + return completion() + + +@register_model("openai-completions", "local-completions") +class OpenaiCompletionsLM(TemplateLM): + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + model: str, + base_url: str = None, + tokenizer: Optional[str] = None, + tokenizer_backend: Literal["tiktoken", "huggingface"] = "tiktoken", + truncate: bool = False, + max_gen_toks: int = 256, + batch_size: int = 1, + seed: int = 1234, + max_length: Optional[int] = None, + ) -> None: + """ + + :param engine: str + OpenAI API engine (e.g. gpt-3.5-turbo-instruct) + :param truncate: bool + Truncate input if too long (if False and input is too long, throw error) + """ + super().__init__() + self.seed = seed + try: + import openai # noqa: E401 + import tiktoken + except ModuleNotFoundError: + raise Exception( + "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \ + please install these via `pip install lm-eval[openai]` or `pip install -e .\"[openai]\"`", + ) + self.model = model + self.base_url = base_url + self.tokenizer_backend = tokenizer_backend + self.truncate = truncate + self._batch_size = int(batch_size) + self._max_gen_toks = max_gen_toks + self._max_length = max_length + + # if we have a local model, use HF tokenizer over tiktoken + if self.tokenizer_backend == "huggingface": + import transformers # noqa: E401 + + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + tokenizer if tokenizer else self.model + ) + self.vocab_size = self.tokenizer.vocab + self.end_of_text_token_id = self.tokenizer.eos_token + elif self.tokenizer_backend == "tiktoken": + if self.base_url: + eval_logger.warning( + f"Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. " + "Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken." + ) + + self.tokenizer = tiktoken.encoding_for_model(self.model) + self.vocab_size = self.tokenizer.n_vocab + self.end_of_text_token_id = self.tokenizer.eot_token + else: + raise ValueError( + f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}" + ) + + # Read from environment variable OPENAI_API_KEY + # Set to EMPTY for local + openai.api_key = os.environ["OPENAI_API_KEY"] + if self.base_url: + self.client = openai.OpenAI(base_url=self.base_url) + else: + self.client = openai.OpenAI() + + @property + def eot_token_id(self): + return self.end_of_text_token_id + + @property + def max_length(self) -> int: + if self._max_length: + return self._max_length + else: + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self) -> int: + return self._max_gen_toks + + @property + def batch_size(self) -> int: + return self._batch_size + + @property + def device(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def tok_encode(self, string: str, **kwargs) -> List[int]: + return self.tokenizer.encode(string) + + def tok_decode(self, tokens: List[int]) -> str: + return self.tokenizer.decode(tokens) + + def _loglikelihood_tokens( + self, requests, disable_tqdm: bool = False + ) -> List[Tuple[float, bool]]: + res = [] + + def _collate(x): + # this doesn't efficiently handle last-token differences yet, but those are kinda annoying because + # it's not guaranteed that the 100 or so logprobs we get to see actually contain all the continuations + # we care about, and so we need some kind of backup for when it isn't + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = utils.Reorderer(requests, _collate) + + for chunk in tqdm( + list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)), + disable=disable_tqdm, + ): + inps = [] + ctxlens = [] + for cache_key, context_enc, continuation_enc in chunk: + # max_length+1 because the API takes up to 2049 tokens, including the first context token + inp = (context_enc + continuation_enc)[-(self.max_length + 1) :] + # TODO: the logic is much simpler if we just look at the length of continuation tokens + ctxlen = len(context_enc) - max( + 0, len(context_enc) + len(continuation_enc) - (self.max_length + 1) + ) + + inps.append(inp) + ctxlens.append(ctxlen) + + response = oa_completion( + client=self.client, + model=self.model, + prompt=inps, + max_tokens=0, + temperature=0.0, + logprobs=10, + seed=self.seed, + ) + + for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip( + response.choices, ctxlens, chunk + ): + answer = get_result(resp) + + res.append(answer) + + # partial caching + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + return re_ord.get_original(res) + + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + if not requests: + return [] + res = [] + requests = [req.args for req in requests] + + def _collate(x): + toks = self.tok_encode(x[0]) + return len(toks), x[0] + + re_ord = utils.Reorderer(requests, _collate) + + def sameuntil_chunks(xs, size): + ret = [] + lastuntil = xs[0][1] + for x in xs: + if len(ret) >= size or x[1] != lastuntil: + yield ret, lastuntil + ret = [] + lastuntil = x[1] + ret.append(x) + + if ret: + yield ret, lastuntil + + # todo: more intelligent batching for heterogeneous `until` + for chunk, request_args in tqdm( + list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)), + disable=disable_tqdm, + ): + inps = [] + self._max_gen_toks = request_args.get("max_gen_toks", self.max_gen_toks) + for context, _ in chunk: + context_enc = self.tok_encode(context) + inp = context_enc[-(self.max_length - self.max_gen_toks) :] + inps.append(inp) + + until = request_args.get("until", ["<|endoftext|>"]) + request_args["temperature"] = request_args.get("temperature", 0) + + response = oa_completion( + client=self.client, + model=self.model, + prompt=inps, + max_tokens=self.max_gen_toks, + stop=until, + seed=self.seed, + **{ + k: v + for k, v in request_args.items() + if k not in {"do_sample", "max_gen_toks", "until"} + }, + ) + for resp, (context, args_) in zip(response.choices, chunk): + s = getattr(resp, "text") + + until_ = until + + for term in until_: + if len(term) > 0: + s = s.split(term)[0] + + # partial caching + self.cache_hook.add_partial( + "generate_until", (context, {"until": until_}), s + ) + + res.append(s) + return re_ord.get_original(res) + + def _model_call(self, inps): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def _model_generate(self, context, max_length, eos_token_id): + # Isn't used because we override generate_until + raise NotImplementedError() + + def loglikelihood_rolling( + self, requests, disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): + rolling_token_windows = list( + map( + utils.make_disjoint_window, + utils.get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.eot_token_id, + max_seq_len=self.max_length, + context_len=1, + ), + ) + ) + + # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + disable_tqdm=True, + ) + + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + return loglikelihoods + + +@register_model("openai-chat-completions", "local-chat-completions") +class OpenaiChatCompletionsLM(LM): + def __init__( + self, + model: str = "gpt-3.5-turbo", # GPT model or Local model using HuggingFace model paths + base_url: str = None, + truncate: bool = False, + **kwargs, + ) -> None: + """ + + :param model: str + Implements an OpenAI-style chat completion API for + accessing both OpenAI OR locally-hosted models using + HuggingFace Tokenizer + OpenAI API model (e.g. gpt-3.5-turbo) + using the **gen_kwargs passed on init + :param truncate: bool + Truncate input if too long (if False and input is too long, throw error) + """ + super().__init__() + try: + import openai # noqa: E401 + except ModuleNotFoundError: + raise Exception( + "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \ + please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`", + ) + self.model = model + self.base_url = base_url + self.truncate = truncate + + # Read from environment variable OPENAI_API_KEY + # Set to EMPTY for local + if self.base_url: + self.client = openai.OpenAI(base_url=self.base_url) + else: + self.client = openai.OpenAI() # openai.AsyncOpenAI() + + @property + def max_length(self) -> int: + # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token + return 2048 + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + @property + def device(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + res = defaultdict(list) + re_ords = {} + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1])) + for key, reqs in grouper.get_grouped().items(): + # within each set of reqs for given kwargs, we reorder by token length, descending. + re_ords[key] = utils.Reorderer( + [req.args for req in reqs], lambda x: (-len(x[0]), x[0]) + ) + + pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0))) + for key, re_ord in re_ords.items(): + # n needs to be 1 because messages in + # chat completion are not batch but + # is regarded as a single conversation. + chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1) + for chunk in chunks: + contexts, all_gen_kwargs = zip(*chunk) + inps = [{"role": "user", "content": context} for context in contexts] + + gen_kwargs = all_gen_kwargs[0] + until = None + if isinstance(kwargs := copy.deepcopy(gen_kwargs), dict): + if "do_sample" in kwargs.keys(): + kwargs.pop("do_sample") + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [until] + elif not isinstance(until, list): + raise ValueError( + f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}" + ) + kwargs["stop"] = until + kwargs["max_tokens"] = kwargs.pop("max_gen_toks", self.max_gen_toks) + else: + raise ValueError( + f"Expected repr(kwargs) to be of type repr(dict) but got {kwargs}" + ) + + response = oa_completion( + client=self.client, + chat=True, + messages=inps, + model=self.model, + **kwargs, + ) + + for resp, (context, args_) in zip(response.choices, chunk): + s = resp.message.content + + if until is not None: + for term in until: + if len(term) > 0: + s = s.split(term)[0] + + res[key].append(s) + + self.cache_hook.add_partial( + "generate_until", (context, {"until": until}), s + ) + pbar.update(1) + # reorder this group of results back to original unsorted form + res[key] = re_ord.get_original(res[key]) + + pbar.close() + + return grouper.get_original(res) + + def loglikelihood(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/optimum_lm.py b/venv/lib/python3.10/site-packages/lm_eval/models/optimum_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..70d44abdaca859fa79bd1beed789c96ad2c22ca9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/optimum_lm.py @@ -0,0 +1,87 @@ +import json +from importlib.util import find_spec +from pathlib import Path + +from lm_eval import utils +from lm_eval.api.registry import register_model +from lm_eval.models.huggingface import HFLM + + +eval_logger = utils.eval_logger + + +@register_model("openvino") +class OptimumLM(HFLM): + """ + Optimum Intel provides a simple interface to optimize Transformer models and convert them to \ + OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on \ + Intel® architectures using OpenVINO™ runtime. + + To use an OpenVINO config, use `--model_args ov_config` to point to a json file with an OpenVINO config: + `lm_eval --model openvino --model_args pretrained=gpt2,ov_config=config.json --task lambada_openai` + Example json file contents: {"INFERENCE_PRECISION_HINT": "f32", "CACHE_DIR": "model_cache"} + """ + + def __init__( + self, + device="cpu", + **kwargs, + ) -> None: + if "backend" in kwargs: + # optimum currently only supports causal models + assert ( + kwargs["backend"] == "causal" + ), "Currently, only OVModelForCausalLM is supported." + + self.openvino_device = device + + super().__init__( + device=self.openvino_device, + backend=kwargs.pop("backend", "causal"), + **kwargs, + ) + + def _create_model( + self, + pretrained: str, + revision="main", + dtype="auto", + trust_remote_code=False, + **kwargs, + ) -> None: + if not find_spec("optimum"): + raise Exception( + "package `optimum` is not installed. Please install it via `pip install optimum[openvino]`" + ) + else: + from optimum.intel.openvino import OVModelForCausalLM + + model_kwargs = kwargs if kwargs else {} + if "ov_config" in model_kwargs: + if not Path(model_kwargs["ov_config"]).exists(): + raise ValueError( + "ov_config should point to a .json file containing an OpenVINO config" + ) + with open(model_kwargs["ov_config"]) as f: + model_kwargs["ov_config"] = json.load(f) + eval_logger.info( + f"Using custom OpenVINO config: {model_kwargs['ov_config']}" + ) + + else: + model_kwargs["ov_config"] = {} + model_kwargs["ov_config"].setdefault("CACHE_DIR", "") + model_file = Path(pretrained) / "openvino_model.xml" + if model_file.exists(): + export = False + else: + export = True + + self._model = OVModelForCausalLM.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + export=export, + device=self.openvino_device.upper(), + **model_kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/textsynth.py b/venv/lib/python3.10/site-packages/lm_eval/models/textsynth.py new file mode 100644 index 0000000000000000000000000000000000000000..a14f6287b6f11b21cfc69ca471bcbe99a631be12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/textsynth.py @@ -0,0 +1,172 @@ +"""TextSynth API +Implementation provided by Fabrice Bellard: + https://github.com/EleutherAI/lm-evaluation-harness/issues/295 + +In order to use the API, you must have a valid TextSynth account and +enough credits. + +Example usage: + + python main.py --model textsynth --model_args engine=gptj_6B --no_cache --tasks piqa + +Homepage: https://textsynth.com/index.html +""" + +import logging +import os + +import requests as _requests +from tqdm import tqdm + +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import retry_on_specific_exceptions + + +logger = logging.getLogger(__name__) + + +def textsynth_completion(**kwargs): + """Query TextSynth API for completion. + Retry with back-off until they respond. + """ + + def _exception_callback(e: Exception, sleep_time: float) -> None: + import traceback + + traceback.print_exc() + + @retry_on_specific_exceptions( + on_exceptions=[_requests.exceptions.RequestException], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def completion(): + return _requests.post(**kwargs) + + return completion() + + +@register_model("textsynth") +class TextSynthLM(LM): + def __init__(self, engine, truncate: bool = False, **kwargs) -> None: + """ + :param engine: str + TextSynth API engine (e.g. `gptj_6B`) + :param truncate: bool + Truncate input if too long (if False and input is too long, throw error) + """ + super().__init__() + + self.engine = engine + self.truncate = truncate + self.api_url = "https://api.textsynth.com" + # Read from environment variable TEXTSYNTH_API_SECRET_KEY + self.api_key = os.environ["TEXTSYNTH_API_SECRET_KEY"] + + @property + def eot_token_id(self): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + @property + def max_length(self) -> int: + # NOTE: Turn on truncation to avoid errors on long inputs. + return 2048 + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + @property + def device(self): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + def tok_encode(self, string: str): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + def tok_decode(self, tokens): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + def loglikelihood(self, requests, disable_tqdm: bool = False): + res = [] + for context, continuation in tqdm(requests, disable=disable_tqdm): + response = textsynth_completion( + url=self.api_url + "/v1/engines/" + self.engine + "/logprob", + headers={"Authorization": "Bearer " + self.api_key}, + json={"context": context, "continuation": continuation}, + ) + resp = response.json() + if "logprob" in resp: + logprob = resp["logprob"] + is_greedy = resp["is_greedy"] + res.append((logprob, is_greedy)) + + self.cache_hook.add_partial( + "loglikelihood", (context, continuation), (logprob, is_greedy) + ) + else: + logger.error( + f"The following response does not contain `logprobs`. Got:\n{resp}" + ) + assert False + return res + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + # TODO: The TextSynth API does not support tokenized inputs so we cannot + # manually partition long contexts into smaller rolling windows as + # done for other models derived from `BaseLM`. Override this method + # with a windowing scheme that works for direct string inputs. + raise NotImplementedError( + "`loglikelihood_rolling` is currently not supported due to lack of " + "input tokenization support from TextSynth." + ) + + def generate_until(self, requests, disable_tqdm: bool = False): + if not requests: + return [] + + res = [] + for request in tqdm(requests, disable=disable_tqdm): + inp = request[0] + request_args = request[1] + until = request_args["until"] + response = textsynth_completion( + url=self.api_url + "/v1/engines/" + self.engine + "/completions", + headers={"Authorization": "Bearer " + self.api_key}, + json={ + "prompt": inp, + "max_tokens": self.max_gen_toks, + "top_k": 1, + "stop": until, + }, + ) + resp = response.json() + if "text" in resp: + s = resp["text"] + res.append(s) + + self.cache_hook.add_partial("generate_until", (inp, request_args), s) + else: + logger.error( + "The following response does not contain generated `text`. " + "Got:\n{resp}" + ) + assert False + return res + + def _model_call(self, inps): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def _model_generate(self, context, max_length, eos_token_id): + # Isn't used because we override generate_until + raise NotImplementedError() diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/utils.py b/venv/lib/python3.10/site-packages/lm_eval/models/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8a81e5deca280f4e48b584a4eac78fb44d1feda2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/utils.py @@ -0,0 +1,666 @@ +import collections +import fnmatch +import gc +import itertools +import time +from functools import wraps +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Literal, + Optional, + Tuple, + Type, + Union, +) + +import torch +import transformers + +from lm_eval.utils import eval_logger + + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizerBase + from transformers.configuration_utils import PretrainedConfig + + +def chunks(iter, n: int = 0, fn=None): + """ + Divides an iterable into chunks of specified size or based on a given function. + Useful for batching + + Parameters: + - iter: The input iterable to be divided into chunks. + - n: An integer representing the size of each chunk. Default is 0. + - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None. + + Returns: + An iterator that yields chunks of the input iterable. + + Example usage: + ``` + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for chunk in chunks(data, 3): + print(chunk) + ``` + Output: + ``` + [1, 2, 3] + [4, 5, 6] + [7, 8, 9] + [10] + ``` + """ + arr = [] + for i, x in enumerate(iter): + arr.append(x) + if len(arr) == (fn(i, iter) if fn else n): + yield arr + arr = [] + + if arr: + yield arr + + +class MultiChoice: + def __init__(self, choices) -> None: + self.choices = choices + + # Simple wildcard support (linux filename patterns) + def __contains__(self, values) -> bool: + for value in values.split(","): + if len(fnmatch.filter(self.choices, value)) == 0: + eval_logger.info("Available tasks to choose:") + for choice in self.choices: + eval_logger.info(f" - {choice}") + raise ValueError("'{}' is not in task list".format(value)) + return True + + def __iter__(self) -> Iterator: + for choice in self.choices: + yield choice + + +class Grouper: + """ + takes an array `arr` and function `fn` and returns a dictionary + with keys fn(ob) for each ob in `arr` and with values `self.arr[key]` a list of all + objects in `arr` satisfying `key == fn(ob)`. + """ + + def __init__(self, arr, fn) -> None: + # self.orig_arr = arr + self.size = len(arr) + arr = list(enumerate(arr)) + + def group_return_dict(arr, fn): + res = collections.defaultdict(list) + + for ob in arr: + res[fn(ob)].append(ob) + return res + + arr = group_return_dict(arr, lambda x: fn(x[1])) + + # self.arr has format Dict[Tuple[int, ]] + self.arr = arr + self._grouped = None + + def get_grouped(self): + # return the contents but not indices for our grouped dict. + if self._grouped: + return self._grouped + grouped = {} + for key in self.arr.keys(): + # drop the index from each element of self.arr + grouped[key] = [y[1] for y in self.arr[key]] + self._grouped = grouped + return grouped + + def get_original(self, grouped_dict): + # take in a grouped dictionary with e.g. results for each key listed + # in the same order as the instances in `self.arr`, and + # return the results in the same (single list) order as `self.orig_arr`. + res = [None] * self.size + cov = [False] * self.size + # orig = [None] * self.size + + assert grouped_dict.keys() == self.arr.keys() + + for key in grouped_dict.keys(): + for (ind, _), v in zip(self.arr[key], grouped_dict[key]): + res[ind] = v + cov[ind] = True + # orig[ind] = _ + + assert all(cov) + # assert orig == self.orig_arr + + return res + + +def pad_and_concat( + max_length: int, + tensors: List[torch.Tensor], + padding_side: Literal["right", "left"] = "right", +): + """ + Method for padding a list of tensors given the maximum tensor + length in the batch. Used for batching inputs and continuations in + seq2seq models. + """ + assert ( + padding_side == "left" or padding_side == "right" + ), f"Unrecognized padding type: '{padding_side}' not 'left' or 'right'" + + for i, tensor in enumerate(tensors): + if len(tensor.shape) == 2: + tensor = tensor.squeeze(0) # squeeze, in case passed [1, seq] size + tensor_len = tensor.shape[0] + if tensor_len < max_length: + if padding_side == "right": + # right-pad + tensors[i] = torch.cat( + [ + tensor, # [seq] + torch.zeros( + max_length - tensor_len, + dtype=torch.long, + device=tensor.device, + ), # [padding_length - seq] + ], + dim=0, + ).unsqueeze(0) + else: + # left-pad + tensors[i] = torch.cat( + [ + torch.zeros( + max_length - tensor_len, + dtype=torch.long, + device=tensor.device, + ), # [padding_length - seq] + tensor, # [seq] + ], + dim=0, + ).unsqueeze(0) + else: + tensors[i] = tensor.unsqueeze(0) + + return torch.cat(tensors, dim=0) + + +def clear_torch_cache() -> None: + gc.collect() + torch.cuda.empty_cache() + + +def get_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype: + """Converts `dtype` from `str` to torch.dtype when possible. Does not use an instantiated HF AutoConfig""" + if isinstance(dtype, str) and dtype != "auto": + # Convert `str` args torch dtype: `float16` -> `torch.float16` + _torch_dtype = getattr(torch, dtype) + else: + _torch_dtype = dtype + return _torch_dtype + + +class MultiTokenEOSCriteria(transformers.StoppingCriteria): + """Criteria to stop on the specified multi-token sequence.""" + + def __init__( + self, + sequence: str, + tokenizer: transformers.PreTrainedTokenizer, + initial_decoder_input_length: int, + batch_size: int, + ) -> None: + self.initial_decoder_input_length = initial_decoder_input_length + self.done_tracker = [False] * batch_size + self.sequence = sequence + self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False) + # print(sequence, self.sequence_ids) + # we look back for 2 more tokens than it takes to encode our stop sequence + # because tokenizers suck, and a model might generate `['\n', '\n']` but our `sequence` is `['\n\n']` + # and we don't want to mistakenly not stop a generation because our + # (string) stop sequence was output in a different tokenization + + # NOTE: there is a minor danger that this will end up looking back 2 tokens into the past, into the inputs to the model, + # and stopping generation immediately as a result. With only 2 extra tokens of lookback, this risk is minimized + # Additionally, in lookback_ids_batch we should prevent ever looking back into the inputs as described. + self.sequence_id_len = len(self.sequence_ids) + 2 + self.tokenizer = tokenizer + + def __call__(self, input_ids, scores, **kwargs) -> bool: + # For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence + lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :] + + lookback_ids_batch = lookback_ids_batch[:, -self.sequence_id_len :] + + lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch) + + for i, done in enumerate(self.done_tracker): + if not done: + self.done_tracker[i] = self.sequence in lookback_tokens_batch[i] + return False not in self.done_tracker + + +def stop_sequences_criteria( + tokenizer: transformers.PreTrainedTokenizer, + stop_sequences: List[str], + initial_decoder_input_length: int, + batch_size: int, +) -> transformers.StoppingCriteriaList: + return transformers.StoppingCriteriaList( + [ + *[ + MultiTokenEOSCriteria( + sequence, tokenizer, initial_decoder_input_length, batch_size + ) + for sequence in stop_sequences + ], + ] + ) + + +def undistribute(iterable): + """ + Undoes https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.distribute . + + Re-interleaves results that have been split using more_itertools.distribute: + >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 3, 5] + >>> list(group_2) + [2, 4, 6] + >>> undistribute([group_1, group_2]) + [1, 2, 3, 4, 5, 6] + + Handles non-uniform component lengths: + + >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 4, 7], [2, 5], [3, 6]] + >>> undistribute(children) + [1, 2, 3, 4, 5, 6, 7] + + Also handles when some iterables are empty: + + >>> children = distribute(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + >>> undistribute(children) + [1, 2, 3] + + """ + + return [ + x + for x in itertools.chain.from_iterable( + itertools.zip_longest(*[list(x) for x in iterable]) + ) + if x is not None + ] + + +def retry_on_specific_exceptions( + on_exceptions: List[Type[Exception]], + max_retries: Optional[int] = None, + backoff_time: float = 3.0, + backoff_multiplier: float = 1.5, + on_exception_callback: Optional[Callable[[Exception, float], Any]] = None, +): + """Retry on an LLM Provider's rate limit error with exponential backoff + For example, to use for OpenAI, do the following: + ``` + from openai import RateLimitError + + # Recommend specifying max_retries to avoid infinite loops! + @retry_on_specific_exceptions([RateLimitError], max_retries=3) + def completion(...): + # Wrap OpenAI completion function here + ... + ``` + """ + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + sleep_time = backoff_time + attempt = 0 + while max_retries is None or attempt < max_retries: + try: + return func(*args, **kwargs) + except tuple(on_exceptions) as e: + if on_exception_callback is not None: + on_exception_callback(e, sleep_time) + time.sleep(sleep_time) + sleep_time *= backoff_multiplier + attempt += 1 + + return wrapper + + return decorator + + +class Collator: + """ + A class for reordering and batching elements of an array. + + This class allows for sorting an array based on a provided sorting function, grouping elements based on a grouping function, and generating batches from the sorted and grouped data. + + Objects of this class have the group_by attribute which determines the method for grouping + the data while batching it. Three options include "gen_kwargs", "contexts", or None: + If group_by == "gen_kwargs" then requests will be grouped by gen_kwargs + If group_by == "contexts" then requests will be grouped by context + cont[:-1] + If None then requests will just be reordered by length descending. + """ + + def __init__( + self, + arr: List, + sort_fn: Callable = lambda x: x, + group_fn: Callable = lambda x: x[1], + group_by: Union[Literal["gen_kwargs", "contexts"], None] = None, + ) -> None: + self._group_by = group_by + # 0 indices are enumerated indices. Apply functions to original arr. + self._sort_fn = lambda x: sort_fn(x[1]) + self._group_fn = lambda x: group_fn(x[1]) + self._reorder_indices: List = [] + self._size = len(arr) + self._arr_with_indices: Union[Dict, Tuple[Tuple[int, Any], ...]] = tuple( + enumerate(arr) + ) # [indices, (arr)] + if self._group_by == "contexts": + self._group_by_context() + elif self._group_by == "gen_kwargs": + self._group_by_index() + + def _group_by_index(self) -> None: + """Group the elements of a list based on their indices.""" + self._arr_with_indices = self.group( + self._arr_with_indices, fn=self._group_fn, group_by="gen_kwargs" + ) + + def _group_by_context(self) -> None: + """Group the array with indices by context.""" + self._arr_with_indices = self.group( + self._arr_with_indices, fn=self._group_fn, group_by="contexts" + ) + + def get_batched(self, n: int = 1, batch_fn: Optional[Callable] = None) -> Iterator: + """ + Generates and yields batches from the reordered array. The method of grouping and batching + depends on the parameter `group_by`. + If `group_by` is set to "gen_kwargs", it will batch the + re-ordered values with same gen_kwargs for each batch. + If `group_by` is "contexts", it caches the requests by context before batching. + If `group_by` is neither "gen_kwargs" nor "contexts", it yields the reordered array + + Parameters: + - n (int): The size of each batch. Defaults to 1. + - batch_fn ([Callable[[int, Iterable], int]] | None): A function to determine the size of + each batch. Optional, defaults to None. + + Returns: + Iterator: An iterator over batches of reordered elements grouped as per the `group_by` + attribute. + + Yields: + List of batched elements according to the `group_by` attribute. + """ + if self._group_by == "gen_kwargs": + for ( + key, + values, + ) in self._arr_with_indices.items(): # type: ignore + values = self._reorder(values) + batch = self.get_chunks(values, n=n, fn=batch_fn) + yield from batch + elif self._group_by == "contexts": + # Get one sample from each key + values = self._reorder( + [value[0] for value in self._arr_with_indices.values()] + ) + batch = self.get_chunks(values, n=n, fn=batch_fn) + yield from batch + else: + values = self._reorder(self._arr_with_indices) # type: ignore + batch = self.get_chunks(values, n=n, fn=batch_fn) + yield from batch + + def get_cache( + self, + req_str: Tuple[str, str] = None, + cxt_toks: List[int] = None, + cont_toks: List[int] = None, + logits: torch.Tensor = None, + ) -> Iterator[Tuple[Tuple[str, str], List[int], torch.Tensor]]: + """ + Retrieves cached single-token continuations and their associated arguments, updating indices as necessary. + + The behavior of this function varies depending on how the `group_by` attribute is set: + + - When `group_by` is "contexts": + The function identifies single-token continuations by checking for keys that equate to + [context+continuation][-1] and logs the indices for re-ordering. + In this mode, this function can work in two scenarios: + + 1. Cache Hit - Single Match: + If a single matching context-continuation pair is found in the cache, + the function yields the original arguments. + + 2. Cache Hit - Multiple Matches: + If multiple matching context-continuation pairs are found in the cache, + the function expands the logits batch dimension to match the number of cache hits. + It updates the original requests and continuation tokens. + + - When `group_by` is not set to "contexts": + This method yields the original arguments, logits and continuation tokens, + without checking for one-token continuations. + + Parameters: + - req_str (tuple[str, str]): Original strings used for CachingLM. + - cxt_toks (list[int]): Full context tokens used for lookup. + - cont_toks (list[int]): Continuation tokens for which logits were generated. + - logits (torch.Tensor [1, seq_length, vocab_size]): Logits generated by the model given context and continuation keys. + + Yields: + - Iterator: + - req_str (tuple[str, str]): strings used for CachingLM. + - cont_toks (list[int]) : continuation tokens. + - logits (torch.Tensor [1, seq_length, vocab_size]): The original logits (repeated cache hit times) + """ + if self._group_by == "contexts": + cache_hit: List[ + Tuple[int, Tuple[Tuple[str, str], List[int], List[int]]] + ] = self._arr_with_indices.pop(tuple(cxt_toks + cont_toks[:-1])) + if (cache_size := len(cache_hit)) == 1: + self._reorder_indices.extend(x[0] for x in cache_hit) + yield req_str, cont_toks, logits + else: + # If we have matching requests then expand the batch dimension (no-op) and + # yield each along with its corresponding args. + multilogits = logits.expand(cache_size, -1, -1).chunk(cache_size) + indices, req_str, cont_toks = zip( + *[(x[0], x[1][0], x[-1][-1]) for x in cache_hit] + ) + self._reorder_indices.extend(indices) + for c_key, cont_tok, logit in zip(req_str, cont_toks, multilogits): + yield c_key, cont_tok, logit + else: + yield req_str, cont_toks, logits + + def _reorder(self, arr: Union[List, Tuple[Tuple[int, Any], ...]]) -> Iterator: + """ + Reorders the elements in the array based on the sorting function. + + Parameters: + - arr (list | tuple[tuple[int, Any], ...]]): The array or iterable to be reordered. + + Yields: + Iterator + """ + arr = sorted(arr, key=self._sort_fn) + if not self._group_by == "contexts": + # If grouped by contexts then indices will be set in get_cache() + self._reorder_indices.extend([x[0] for x in arr]) + yield from [x[1] for x in arr] + + def get_original(self, newarr: List) -> List: + """ + Restores the original order of elements from the reordered list. + + Parameters: + - newarr (list): The reordered array. + + Returns: + list: The array with elements restored to their original order. + """ + res = [None] * self._size + cov = [False] * self._size + + for ind, v in zip(self._reorder_indices, newarr): + res[ind] = v + cov[ind] = True + + assert all(cov) + + return res + + def __len__(self): + return self._size + + @staticmethod + def group( + arr: Iterable, + fn: Callable, + group_by: Literal["gen_kwargs", "contexts"] = "gen_kwargs", + ) -> dict: + """ + Groups elements of an iterable based on a provided function. + + + The `group_by` parameter determines the method of grouping. + If `group_by` is "contexts", the elements are grouped by [context + cont][:-1]. + If `group_by` is "gen_kwargs", the elements are grouped based on the gen_kwargs dict. + + Parameters: + - arr (Iterable): The iterable to be grouped. + - fn (Callable): The function to determine the grouping. + - values (bool): If True, returns the values of the group. Defaults to False. + + Returns: + Iterator: An iterable of grouped elements. + """ + res = collections.defaultdict(list) + for ob in arr: + # where ob == [context + cont] + if group_by == "contexts": + res[tuple(fn(ob))].append(ob) + else: + try: + hashable_dict = tuple( + ( + key, + tuple(value) + if isinstance(value, collections.abc.Iterable) + else value, + ) + for key, value in sorted(fn(ob).items()) + ) + res[hashable_dict].append(ob) + except (TypeError, AttributeError): + res[tuple(fn(ob))].append(ob) + return res + + @staticmethod + def get_chunks(_iter, n: int = 0, fn=None): + """ + Divides an iterable into chunks of specified size or based on a given function. + Useful for batching + + Parameters: + - iter: The input iterable to be divided into chunks. + - n: An integer representing the size of each chunk. Default is 0. + - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None. + + Returns: + An iterator that yields chunks of the input iterable. + + Example usage: + ``` + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for chunk in chunks(data, 3): + print(chunk) + ``` + Output: + ``` + [1, 2, 3] + [4, 5, 6] + [7, 8, 9] + [10] + ``` + """ + arr = [] + _iter = tuple(_iter) + for i, x in enumerate(_iter): + arr.append(x) + if len(arr) == (fn(i, _iter) if fn else n): + yield arr + arr = [] + + if arr: + yield arr + + +def configure_pad_token( + tokenizer: "PreTrainedTokenizerBase", + model_config: Optional["PretrainedConfig"] = None, +) -> "PreTrainedTokenizerBase": + """ + This function checks if the (Hugging Face) tokenizer has a padding token and sets it if not present. + Some tokenizers require special handling. + + Args: + tokenizer: The tokenizer for which the padding token is to be handled. + model_config: The configuration of the model. Default is None. + + Returns: + The tokenizer after the padding token has been handled. + + Raises: + AssertionError: If the tokenizer is of type RWKVWorldTokenizer or Rwkv5Tokenizer and the padding token id is not 0. + """ + if tokenizer.pad_token: + pass + elif tokenizer.unk_token: + tokenizer.pad_token_id = tokenizer.unk_token_id + elif tokenizer.eos_token: + tokenizer.pad_token_id = tokenizer.eos_token_id + else: + # handle special cases + if model_config and getattr(model_config, "model_type", None) == "qwen": + # Qwen's trust_remote_code tokenizer does not allow for adding special tokens + tokenizer.pad_token = "<|endoftext|>" + elif ( + tokenizer.__class__.__name__ == "RWKVWorldTokenizer" + or tokenizer.__class__.__name__ == "Rwkv5Tokenizer" + ): + # The RWKV world tokenizer, does not allow for adding special tokens / setting the pad token (which is set as 0) + # The additional tokenizer name check is needed, as there exists rwkv4 models with neox tokenizer + # --- + # Note that the world tokenizer class name, might change in the future for the final huggingface merge + # https://github.com/huggingface/transformers/pull/26963 + assert tokenizer.pad_token_id == 0 + else: + tokenizer.add_special_tokens({"pad_token": "<|pad|>"}) + + return tokenizer diff --git a/venv/lib/python3.10/site-packages/lm_eval/models/vllm_causallms.py b/venv/lib/python3.10/site-packages/lm_eval/models/vllm_causallms.py new file mode 100644 index 0000000000000000000000000000000000000000..bddbeb19da8e0c2ef132ffbadb2694773caee8c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/models/vllm_causallms.py @@ -0,0 +1,545 @@ +import copy +from importlib.metadata import version +from importlib.util import find_spec +from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union + +from more_itertools import distribute +from packaging.version import parse as parse_version +from tqdm import tqdm + +from lm_eval.api.instance import Instance +from lm_eval.api.model import TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import Collator, configure_pad_token, undistribute +from lm_eval.utils import ( + eval_logger, + get_rolling_token_windows, + make_disjoint_window, +) + + +try: + import ray + from vllm import LLM, SamplingParams + from vllm.lora.request import LoRARequest + from vllm.transformers_utils.tokenizer import get_tokenizer +except ModuleNotFoundError: + pass + +if TYPE_CHECKING: + pass + +eval_logger = eval_logger + + +@register_model("vllm") +class VLLM(TemplateLM): + _DEFAULT_MAX_LENGTH = 4096 + + def __init__( + self, + pretrained: str, + dtype: Literal["float16", "bfloat16", "float32", "auto"] = "auto", + revision: Optional[str] = None, + trust_remote_code: Optional[bool] = False, + tokenizer: Optional[str] = None, + tokenizer_mode: Literal["auto", "slow"] = "auto", + tokenizer_revision: Optional[str] = None, + add_bos_token: Optional[bool] = False, + prefix_token_id: Optional[int] = None, + tensor_parallel_size: int = 1, + quantization: Optional[str] = None, + max_gen_toks: int = 4096, + swap_space: int = 4, + batch_size: Union[str, int] = 1, + max_batch_size=None, + max_length: int = None, + max_model_len: int = None, + seed: int = 1234, + gpu_memory_utilization: float = 0.8, + device: str = "cuda", + data_parallel_size: int = 1, + lora_local_path: str = None, + olmes_task: str = "dummy", + olmes_batchsize: int = 0, + **kwargs, + ): + super().__init__() + + if not find_spec("vllm"): + raise Exception( + "attempted to use 'vllm' LM type, but package `vllm` is not installed. " + "Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`" + ) + + assert "cuda" in device or device is None, "vLLM only supports CUDA" + assert ( + max_length is None or max_model_len is None + ), "Either max_length or max_model_len may be provided, but not both" + + self._max_length = max_model_len if max_model_len is not None else max_length + self.tensor_parallel_size = int(tensor_parallel_size) + self.data_parallel_size = int(data_parallel_size) + self.model_args = { + "model": pretrained, + "gpu_memory_utilization": float(gpu_memory_utilization), + "revision": revision, + "dtype": dtype, + "tokenizer": tokenizer, + "tokenizer_mode": tokenizer_mode, + "tokenizer_revision": tokenizer_revision, + "trust_remote_code": trust_remote_code, + "tensor_parallel_size": int(tensor_parallel_size), + "max_model_len": int(self._max_length) if self._max_length else None, + "max_num_seqs": 256, + "swap_space": int(swap_space), + "quantization": quantization, + "seed": int(seed), + "olmes_task" : olmes_task, + "olmes_batchsize" : olmes_batchsize, + } + self.model_args.update(kwargs) + self.batch_size = ( + "auto" + if isinstance(batch_size, str) and "auto" in batch_size + else batch_size + ) + if self.data_parallel_size <= 1: + self.model = LLM(**self.model_args) + else: + eval_logger.warning( + "You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached." + ) + self.model_args["worker_use_ray"] = True + self.batch_size = "auto" + eval_logger.info("Manual batching is not compatible with data parallelism.") + + from transformers import AutoConfig + + self._config = AutoConfig.from_pretrained( + pretrained, trust_remote_code=trust_remote_code, revision=revision + ) + self.tokenizer = get_tokenizer( + tokenizer if tokenizer else pretrained, + tokenizer_mode=tokenizer_mode, + trust_remote_code=trust_remote_code, + tokenizer_revision=tokenizer_revision, + ) + self.tokenizer = configure_pad_token(self.tokenizer) + self.add_bos_token = add_bos_token + if "gemma" in pretrained.lower(): + self.add_bos_token = True + eval_logger.info( + "Found 'gemma' in model name, a BOS token will be used as Gemma underperforms without it." + ) + + self.custom_prefix_token_id = prefix_token_id + if prefix_token_id is not None: + eval_logger.info( + f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}" + ) + + self._max_gen_toks = max_gen_toks + + if lora_local_path is not None: + assert parse_version(version("vllm")) > parse_version( + "0.3.0" + ), "lora adapters only compatible with vllm > v0.3.0." + self.lora_request = LoRARequest("finetuned", 1, lora_local_path) + else: + self.lora_request = None + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + if self.custom_prefix_token_id is not None: + return self.custom_prefix_token_id + if self.tokenizer.bos_token_id is not None: + return self.tokenizer.bos_token_id + return self.tokenizer.eos_token_id + + @property + def max_length(self): + if self._max_length: # if max length manually set, return it + return self._max_length + if self.data_parallel_size <= 1: + return self.model.llm_engine.model_config.max_model_len + else: + seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") + for attr in seqlen_config_attrs: + if hasattr(self._config, attr): + return getattr(self._config, attr) + if hasattr(self.tokenizer, "model_max_length"): + if self.tokenizer.model_max_length == 1000000000000000019884624838656: + return self._DEFAULT_MAX_LENGTH + return self.tokenizer.model_max_length + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self): + return self._max_gen_toks + + def apply_chat_template(self, chat_history: List[Dict[str, str]]) -> str: + """ + Method to apply a chat template to a list of chat history between user and model. + """ + return self.tokenizer.apply_chat_template( + chat_history, tokenize=False, add_generation_prompt=True + ) + + @property + def chat_template(self) -> str: + if self.tokenizer.chat_template is not None: + return self.tokenizer.chat_template + return self.tokenizer.default_chat_template + + @property + def tokenizer_name(self) -> str: + return self.tokenizer.name_or_path.replace("/", "__") + + def tok_encode( + self, + string: Union[str, List[str]], + left_truncate_len: int = None, + add_special_tokens: bool = False, + truncation: bool = False, + ) -> Union[List[int], List[List[int]]]: + if not add_special_tokens: + add_special_tokens = False or self.add_bos_token + encoding: Union[List[List[int]], List[int]] = self.tokenizer( + string, + add_special_tokens=add_special_tokens, + truncation=truncation, + return_attention_mask=False, + ).input_ids + + # left-truncate the encoded context to be at most `left_truncate_len` tokens long + if left_truncate_len: + if not isinstance(string, str): + encoding = [enc[-left_truncate_len:] for enc in encoding] + else: + encoding = encoding[-left_truncate_len:] + + return encoding + + def _model_generate( + self, + requests: List[List[int]] = None, + generate: bool = False, + max_tokens: int = None, + stop: Optional[List[str]] = None, + **kwargs, + ): + if generate: + kwargs = self.modify_gen_kwargs(kwargs) + sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs) + else: + sampling_params = SamplingParams( + temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False + ) + if self.data_parallel_size > 1: + # vLLM hangs if tensor_parallel > 1 and resources are set in ray.remote + # also seems to only work with decorator and not with ray.remote() fn + # see https://github.com/vllm-project/vllm/issues/973 + # note: this has changed on 0.3.3, and it only works now if num_gpus are set. + # but then tensor_parallel breaks + @ray.remote + def run_inference_one_model( + model_args: dict, sampling_params, requests: List[List[int]] + ): + llm = LLM(**model_args) + return llm.generate( + prompt_token_ids=requests, sampling_params=sampling_params + ) + + # dispatch requests to all self.data_parallel_size workers, in interleaved fashion + # interleaved important to balance context lengths across workers + requests = [list(x) for x in distribute(self.data_parallel_size, requests)] + inputs = ((self.model_args, sampling_params, req) for req in requests) + object_refs = [run_inference_one_model.remote(*x) for x in inputs] + results = ray.get(object_refs) + # Invoke ray.shutdown() to prevent hang-ups if subsequent calls required. + ray.shutdown() + # flatten results + return undistribute(results) + + if self.lora_request is not None: + outputs = self.model.generate( + prompt_token_ids=requests, + sampling_params=sampling_params, + use_tqdm=True if self.batch_size == "auto" else False, + lora_request=self.lora_request, + ) + else: + outputs = self.model.generate( + prompt_token_ids=requests, + sampling_params=sampling_params, + use_tqdm=True if self.batch_size == "auto" else False, + ) + return outputs + + def loglikelihood_rolling( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): + rolling_token_windows = list( + map( + make_disjoint_window, + get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.eot_token_id, + max_seq_len=self.max_length - 1, + context_len=1, + ), + ) + ) + + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + ) + + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + return loglikelihoods + + def generate_until( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[str]: + res = [] + + # batch tokenize contexts + context, all_gen_kwargs = zip(*(req.args for req in requests)) + context_encoding: List[List[int]] = self.tok_encode( + context, add_special_tokens=self.add_bos_token + ) + requests = [ + ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs) + ] + + def _collate_gen(_requests): + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + return -len(_requests[0][1]), _requests[0][0] + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + re_ords = Collator(requests, _collate_gen, group_by="gen_kwargs") + chunks = re_ords.get_batched( + n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None + ) + + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running generate_until requests", + ) + # for each different set of kwargs, we execute all requests, by batch. + for chunk in chunks: + context_and_encoding, all_gen_kwargs = zip(*chunk) + context, context_encoding = zip(*context_and_encoding) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + gen_kwargs = all_gen_kwargs[0] + # unpack our keyword arguments. + until = None + if isinstance(gen_kwargs, dict): + kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1 + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [until] + elif not isinstance(until, list): + raise ValueError( + f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}" + ) + else: + raise ValueError( + f"Expected `kwargs` to be of type `dict` but got {gen_kwargs}" + ) + # add EOS token to stop sequences + eos = self.tokenizer.decode(self.eot_token_id) + if not until: + until = [eos] + else: + until.append(eos) + if "max_gen_toks" in kwargs.keys(): + max_gen_toks = kwargs.pop("max_gen_toks") + else: + max_gen_toks = self.max_gen_toks + + # set the max length in tokens of inputs ("context_enc") + # max len for inputs = max length, minus room to generate the max new tokens + max_ctx_len = self.max_length - max_gen_toks + context_encoding = [x[-max_ctx_len:] for x in context_encoding] + + # perform batched generation + cont = self._model_generate( + requests=context_encoding, + generate=True, + max_tokens=max_gen_toks, + stop=until, + **kwargs, + ) + + # cache generations + for output, context in zip(cont, context): + generated_text = output.outputs[0].text + res.append(generated_text) + self.cache_hook.add_partial( + "generate_until", (context, gen_kwargs), generated_text + ) + pbar.update(1) + + pbar.close() + # reorder all group of results back to original unsorted form + return re_ords.get_original(res) + + def _loglikelihood_tokens( + self, + requests: List[Tuple[Tuple[str, str], List[int], List[int]]], + disable_tqdm: bool = False, + ) -> List[Tuple[float, bool]]: + res = [] + + def _collate(x): + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + # Reorder requests by length and batch + re_ord = Collator(requests, sort_fn=_collate) + chunks = re_ord.get_batched( + n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None + ) + + pbar = tqdm( + total=len(requests), + disable=disable_tqdm, + desc="Running loglikelihood requests", + ) + for chunk in chunks: + inputs = [] + ctxlens = [] + for cache_key, context_enc, continuation_enc in chunk: + inp = (context_enc + continuation_enc)[-(self.max_length) :] + ctxlen = len(context_enc) - max( + 0, len(context_enc) + len(continuation_enc) - (self.max_length) + ) + + inputs.append(inp) + ctxlens.append(ctxlen) + + outputs = self._model_generate(requests=inputs, generate=False) + + for output, ctxlen, (cache_key, _, _), inp in zip( + outputs, ctxlens, chunk, inputs + ): + answer = self._parse_logprobs( + tokens=inp, + outputs=output, + ctxlen=ctxlen, + ) + + res.append(answer) + + # partial caching + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + pbar.update(1) + pbar.close() + return re_ord.get_original(res) + + @staticmethod + def _parse_logprobs(tokens: List, outputs, ctxlen: int) -> Tuple[float, bool]: + """Process logprobs and tokens. + + :param tokens: list + Input tokens (potentially left-truncated) + :param outputs: RequestOutput + Contains prompt_logprobs + :param ctxlen: int + Length of context (so we can slice them away and only keep the predictions) + :return: + continuation_logprobs: float + Log probabilities of continuation tokens + is_greedy: bool + Whether argmax matches given continuation exactly + """ + + # The first entry of prompt_logprobs is None because the model has no previous tokens to condition on. + continuation_logprobs_dicts = outputs.prompt_logprobs + + def coerce_logprob_to_num(logprob): + # vLLM changed the return type of logprobs from float + # to a Logprob object storing the float value + extra data + # (https://github.com/vllm-project/vllm/pull/3065). + # If we are dealing with vllm's Logprob object, return + # the logprob value stored as an attribute. Otherwise, + # return the object itself (which should be a float + # for older versions of vLLM). + return getattr(logprob, "logprob", logprob) + + continuation_logprobs_dicts = [ + { + token: coerce_logprob_to_num(logprob) + for token, logprob in logprob_dict.items() + } + if logprob_dict is not None + else None + for logprob_dict in continuation_logprobs_dicts + ] + + # Calculate continuation_logprobs + # assume ctxlen always >= 1 + continuation_logprobs = sum( + logprob_dict.get(token) + for token, logprob_dict in zip( + tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:] + ) + ) + + # Determine if is_greedy + is_greedy = True + for token, logprob_dict in zip( + tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:] + ): + # Get the token with the maximum log probability from the logprob_dict + if logprob_dict: # Ensure the logprob_dict is not None + top_token = max(logprob_dict, key=logprob_dict.get) + if top_token != token: + is_greedy = False + break + + return continuation_logprobs, is_greedy + + @staticmethod + def modify_gen_kwargs(kwargs: dict) -> dict: + # sampling_params + do_sample = kwargs.pop("do_sample", None) + if do_sample is False and "temperature" not in kwargs: + eval_logger.debug( + "Got `do_sample=False` and no temperature value, setting VLLM temperature to 0.0 ..." + ) + kwargs["temperature"] = 0.0 + # hf defaults + kwargs["skip_special_tokens"] = kwargs.get("skip_special_tokens", False) + kwargs["spaces_between_special_tokens"] = kwargs.get( + "spaces_between_special_tokens", False + ) + return kwargs diff --git a/venv/lib/python3.10/site-packages/lm_eval/prompts/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/prompts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f814214de4afaabd1367854c74dc2143c346744 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/prompts/__init__.py @@ -0,0 +1,126 @@ +import ast +import os +from typing import Dict + +from lm_eval import utils +from lm_eval.utils import eval_logger + + +# Prompt library. +# Stores prompts in a dictionary indexed by 2 levels: +# prompt category name, and prompt name. +# This allows us to access prompts +PROMPT_REGISTRY: Dict[str, Dict[str, str]] = { + "qa-basic": { + "question-newline-answer": "Question: {{question}}\nAnswer:", + "q-newline-a": "Q: {{question}}\nA:", + }, +} + + +def get_prompt(prompt_id: str, dataset_name: str = None, subset_name: str = None): + # unpack prompt name + category_name, prompt_name = prompt_id.split(":") + if subset_name is None: + dataset_full_name = dataset_name + else: + dataset_full_name = f"{dataset_name}-{subset_name}" + eval_logger.info(f"Loading prompt from {category_name} for {dataset_full_name}") + if category_name == "promptsource": + try: + from promptsource.templates import DatasetTemplates + except ModuleNotFoundError: + raise Exception( + "Tried to load a Promptsource template, but promptsource is not installed ", + "please install promptsource via pip install lm-eval[promptsource] or pip install -e .[promptsource]", + ) + try: + if subset_name is None: + prompts = DatasetTemplates(dataset_name=dataset_name) + else: + prompts = DatasetTemplates( + dataset_name=dataset_name, subset_name=subset_name + ) + except Exception: + raise ValueError(f"{dataset_name} and {subset_name} not found") + if prompt_name in prompts.all_template_names: + return prompts[prompt_name] + else: + raise ValueError( + f"{prompt_name} not in prompt list {prompts.all_template_names}" + ) + elif ".yaml" in category_name: + import yaml + + with open(category_name, "rb") as file: + prompt_yaml_file = yaml.full_load(file) + + prompt_string = prompt_yaml_file["prompts"][prompt_name] + return PromptString(prompt_string) + else: + try: + return PROMPT_REGISTRY[category_name][prompt_name] + except Exception: + raise ValueError( + f"expected only a single `:` as separator between \ + prompt category and name, but got `{prompt_id}` instead" + ) + + +def load_prompt_list( + use_prompt: str, dataset_name=None, subset_name=None, yaml_path=None, **kwargs +): + category_name, prompt_name = use_prompt.split(":") + + if category_name == "promptsource": + from promptsource.templates import DatasetTemplates + + if subset_name is None: + prompts = DatasetTemplates(dataset_name=dataset_name) + else: + prompts = DatasetTemplates( + dataset_name=dataset_name, subset_name=subset_name + ) + + prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) + + elif ".yaml" in category_name: + import yaml + + if yaml_path is not None: + category_name = os.path.realpath(os.path.join(yaml_path, category_name)) + + with open(category_name, "rb") as file: + prompt_yaml_file = yaml.full_load(file) + + prompt_list = utils.pattern_match( + prompt_name, prompt_yaml_file["prompts"].keys() + ) + + # category_name, *prompt_name = use_prompt.split(":") + # TODO allow to multiple prompt naming + # if len(prompt_name) > 1: + # prompt_list = [] + # for prompt in prompt_name: + # prompt_list.append(utils.pattern_match(prompt_name, prompts.all_template_names)) + # else: + # prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) + return [":".join([category_name, prompt]) for prompt in prompt_list] + + +class PromptString: + def __init__(self, prompt_string): + self.prompt_string = prompt_string + + def apply(self, doc): + doc_to_text = self.prompt_string["doc_to_text"] + doc_to_target = self.prompt_string["doc_to_target"] + + # TODO need a way to process doc_to_choice + if "doc_to_choice" in self.prompt_string: + raise Exception("Not yet implemented to accept doc_to_choice") + + text_string = utils.apply_template(doc_to_text, doc) + target_string = utils.apply_template(doc_to_target, doc) + + return [text_string, target_string] diff --git a/venv/lib/python3.10/site-packages/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5687b9da8d90915040905f69eee0a67fb0c12ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/prompts/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/prompts/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c90da0d74d30553f9006453a790f3d9b5fc0b26e Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/prompts/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bb04d4f279e4e63949f79dfe215a7edcc74be0d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/README.md @@ -0,0 +1,115 @@ + +# Tasks + + A list of supported tasks and task groupings can be viewed with `lm-eval --tasks list`. + + For more information, including a full list of task names and their precise meanings or sources, follow the links provided to the individual README.md files for each subfolder. + +| Task Family | Description | Language(s) | +|-------------|-------------|-------------| +| [aclue](aclue/README.md) | Tasks focusing on ancient Chinese language understanding and cultural aspects. | Ancient Chinese | +| [aexams](aexams/README.md) | Tasks in Arabic related to various academic exams covering a range of subjects. | Arabic | +| [agieval](agieval/README.md) | Tasks involving historical data or questions related to history and historical texts. | English, Chinese | +| [anli](anli/README.md) | Adversarial natural language inference tasks designed to test model robustness. | English | +| [arabicmmlu](arabicmmlu/README.md) | Localized Arabic version of MMLU with multiple-choice questions from 40 subjects. | Arabic | +| [arc](arc/README.md) | Tasks involving complex reasoning over a diverse set of questions. | English | +| [arithmetic](arithmetic/README.md) | Tasks involving numerical computations and arithmetic reasoning. | English | +| [asdiv](asdiv/README.md) | Tasks involving arithmetic and mathematical reasoning challenges. | English | +| [babi](babi/README.md) | Tasks designed as question and answering challenges based on simulated stories. | English | +| [basqueglue](basqueglue/README.md) | Tasks designed to evaluate language understanding in Basque language. | Basque | +| [bbh](bbh/README.md) | Tasks focused on deep semantic understanding through hypothesization and reasoning. | English, German | +| [belebele](belebele/README.md) | Language understanding tasks in a variety of languages and scripts. | Multiple (122 languages) | +| benchmarks | General benchmarking tasks that test a wide range of language understanding capabilities. | | +| [bertaqa](bertaqa/README.md) | Local Basque cultural trivia QA tests in English and Basque languages. | English, Basque, Basque (MT) | +| [bigbench](bigbench/README.md) | Broad tasks from the BIG-bench benchmark designed to push the boundaries of large models. | Multiple | +| [blimp](blimp/README.md) | Tasks testing grammatical phenomena to evaluate language model's linguistic capabilities. | English | +| [ceval](ceval/README.md) | Tasks that evaluate language understanding and reasoning in an educational context. | Chinese | +| [cmmlu](cmmlu/README.md) | Multi-subject multiple choice question tasks for comprehensive academic assessment. | Chinese | +| code_x_glue | Tasks that involve understanding and generating code across multiple programming languages. | Go, Java, JS, PHP, Python, Ruby | +| [commonsense_qa](commmonsense_qa/README.md) | CommonsenseQA, a multiple-choice QA dataset for measuring commonsense knowledge. | English | +| [copal_id](copal_id/README.md) | Indonesian causal commonsense reasoning dataset that captures local nuances. | Indonesian | +| [coqa](coqa/README.md) | Conversational question answering tasks to test dialog understanding. | English | +| [crows_pairs](crows_pairs/README.md) | Tasks designed to test model biases in various sociodemographic groups. | English, French | +| csatqa | Tasks related to SAT and other standardized testing questions for academic assessment. | Korean | +| [drop](drop/README.md) | Tasks requiring numerical reasoning, reading comprehension, and question answering. | English | +| [eq_bench](eq_bench/README.md) | Tasks focused on equality and ethics in question answering and decision-making. | English | +| [eus_exams](eus_exams/README.md) | Tasks based on various professional and academic exams in the Basque language. | Basque | +| [eus_proficiency](eus_proficiency/README.md) | Tasks designed to test proficiency in the Basque language across various topics. | Basque | +| [eus_reading](eus_reading/README.md) | Reading comprehension tasks specifically designed for the Basque language. | Basque | +| [eus_trivia](eus_trivia/README.md) | Trivia and knowledge testing tasks in the Basque language. | Basque | +| [fda](fda/README.md) | Tasks for extracting key-value pairs from FDA documents to test information extraction. | English | +| [fld](fld/README.md) | Tasks involving free-form and directed dialogue understanding. | English | +| [french_bench](french_bench/README.md) | Set of tasks designed to assess language model performance in French. | French| +| [glue](glue/README.md) | General Language Understanding Evaluation benchmark to test broad language abilities. | English | +| [gpqa](gpqa/README.md) | Tasks designed for general public question answering and knowledge verification. | English | +| [gsm8k](gsm8k/README.md) | A benchmark of grade school math problems aimed at evaluating reasoning capabilities. | English | +| [haerae](haerae/README.md) | Tasks focused on assessing detailed factual and historical knowledge. | Korean | +| [headqa](headqa/README.md) | A high-level education-based question answering dataset to test specialized knowledge. | Spanish, English | +| [hellaswag](hellaswag/README.md) | Tasks to predict the ending of stories or scenarios, testing comprehension and creativity. | English | +| [hendrycks_ethics](hendrycks_ethics/README.md) | Tasks designed to evaluate the ethical reasoning capabilities of models. | English | +| [hendrycks_math](hendrycks_math/README.md) | Mathematical problem-solving tasks to test numerical reasoning and problem-solving. | English | +| [ifeval](ifeval/README.md) | Interactive fiction evaluation tasks for narrative understanding and reasoning. | English | +| [kmmlu](kmmlu/README.md) | Knowledge-based multi-subject multiple choice questions for academic evaluation. | Korean | +| [kobest](kobest/README.md) | A collection of tasks designed to evaluate understanding in Korean language. | Korean | +| [kormedmcqa](kormedmcqa/README.md) | Medical question answering tasks in Korean to test specialized domain knowledge. | Korean | +| [lambada](lambada/README.md) | Tasks designed to predict the endings of text passages, testing language prediction skills. | English | +| [lambada_cloze](lambada_cloze/README.md) | Cloze-style LAMBADA dataset. | English | +| [lambada_multilingual](lambada_multilingual/README.md) | Multilingual LAMBADA dataset. This is a legacy version of the multilingual dataset, and users should instead use `lambada_multilingual_stablelm`. | German, English, Spanish, French, Italian | +| [lambada_multilingual_stablelm](lambada_multilingual_stablelm/README.md) | Multilingual LAMBADA dataset. Users should prefer evaluating on this version of the multilingual dataset instead of on `lambada_multilingual`. | German, English, Spanish, French, Italian, Dutch, Portuguese | +| [logiqa](logiqa/README.md) | Logical reasoning tasks requiring advanced inference and deduction. | English, Chinese | +| [logiqa2](logiqa2/README.md) | Large-scale logical reasoning dataset adapted from the Chinese Civil Service Examination. | English, Chinese | +| [mathqa](mathqa/README.md) | Question answering tasks involving mathematical reasoning and problem-solving. | English | +| [mc_taco](mc_taco/README.md) | Question-answer pairs that require temporal commonsense comprehension. | English | +| medmcqa | Medical multiple choice questions assessing detailed medical knowledge. | English | +| medqa | Multiple choice question answering based on the United States Medical License Exams. | | +| [mgsm](mgsm/README.md) | Benchmark of multilingual grade-school math problems. | Spanish, French, German, Russian, Chinese, Japanese, Thai, Swahili, Bengali, Telugu | +| [minerva_math](minerva_math/README.md) | Mathematics-focused tasks requiring numerical reasoning and problem-solving skills. | English | +| mmlu | Massive Multitask Language Understanding benchmark for broad domain language evaluation. Several variants are supported. | English | +| model_written_evals | Evaluation tasks auto-generated for evaluating a collection of AI Safety concerns. | | +| [mutual](mutual/README.md) | A retrieval-based dataset for multi-turn dialogue reasoning. | English | +| [nq_open](nq_open/README.md) | Open domain question answering tasks based on the Natural Questions dataset. | English | +| [okapi/arc_multilingual](okapi/arc_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (31 languages) **Machine Translated.** | +| [okapi/hellaswag_multilingual](okapi/hellaswag_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (30 languages) | +| okapi/mmlu_multilingual | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (34 languages) | +| [okapi/truthfulqa_multilingual](okapi/truthfulqa_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (31 languages) | +| [openbookqa](openbookqa/README.md) | Open-book question answering tasks that require external knowledge and reasoning. | English | +| [paloma](paloma/README.md) | Paloma is a comprehensive benchmark designed to evaluate open language models across a wide range of domains, ranging from niche artist communities to mental health forums on Reddit. | English | +| [paws-x](paws-x/README.md) | Paraphrase Adversaries from Word Scrambling, focusing on cross-lingual capabilities. | English, French, Spanish, German, Chinese, Japanese, Korean | +| [pile](pile/README.md) | Open source language modelling data set that consists of 22 smaller, high-quality datasets. | English | +| [pile_10k](pile_10k/README.md) | The first 10K elements of The Pile, useful for debugging models trained on it. | English | +| [piqa](piqa/README.md) | Physical Interaction Question Answering tasks to test physical commonsense reasoning. | English | +| [polemo2](polemo2/README.md) | Sentiment analysis and emotion detection tasks based on Polish language data. | Polish | +| [prost](prost/README.md) | Tasks requiring understanding of professional standards and ethics in various domains. | English | +| [pubmedqa](pubmedqa/README.md) | Question answering tasks based on PubMed research articles for biomedical understanding. | English | +| [qa4mre](qa4mre/README.md) | Question Answering for Machine Reading Evaluation, assessing comprehension and reasoning. | English | +| [qasper](qasper/README.md) | Question Answering dataset based on academic papers, testing in-depth scientific knowledge. | English | +| [race](race/README.md) | Reading comprehension assessment tasks based on English exams in China. | English | +| realtoxicityprompts | Tasks to evaluate language models for generating text with potential toxicity. | | +| [sciq](sciq/README.md) | Science Question Answering tasks to assess understanding of scientific concepts. | English | +| [scrolls](scrolls/README.md) | Tasks that involve long-form reading comprehension across various domains. | English | +| [siqa](siqa/README.md) | Social Interaction Question Answering to evaluate common sense and social reasoning. | English | +| [squad_completion](squad_completion/README.md) | A variant of the SQuAD question answering task designed for zero-shot evaluation of small LMs. | English | +| [squadv2](squadv2/README.md) | Stanford Question Answering Dataset version 2, a reading comprehension benchmark. | English | +| [storycloze](storycloze/README.md) | Tasks to predict story endings, focusing on narrative logic and coherence. | English | +| [super_glue](super_glue/README.md) | A suite of challenging tasks designed to test a range of language understanding skills. | English | +| [swag](swag/README.md) | Situations With Adversarial Generations, predicting the next event in videos. | English | +| [swde](swde/README.md) | Information extraction tasks from semi-structured web pages. | English | +| [tinyBenchmarks](tinyBenchmarks/README.md) | Evaluation of large language models with fewer examples using tiny versions of popular benchmarks. | English | +| [tmmluplus](tmmluplus/README.md) | An extended set of tasks under the TMMLU framework for broader academic assessments. | Traditional Chinese | +| [toxigen](toxigen/README.md) | Tasks designed to evaluate language models on their propensity to generate toxic content. | English | +| [translation](translation/README.md) | Tasks focused on evaluating the language translation capabilities of models. | Arabic, English, Spanish, Basque, Hindi, Indonesian, Burmese, Russian, Swahili, Telugu, Chinese | +| [triviaqa](triviaqa/README.md) | A large-scale dataset for trivia question answering to test general knowledge. | English | +| [truthfulqa](truthfulqa/README.md) | A QA task aimed at evaluating the truthfulness and factual accuracy of model responses. | English | +| [unitxt](unitxt/README.md) | A number of tasks implemented using the unitxt library for flexible, shareable, and reusable data preparation and evaluation for generative AI. | English | +| [unscramble](unscramble/README.md) | Tasks involving the rearrangement of scrambled sentences to test syntactic understanding. | English | +| [webqs](webqs/README.md) | Web-based question answering tasks designed to evaluate internet search and retrieval. | English | +| [wikitext](wikitext/README.md) | Tasks based on text from Wikipedia articles to assess language modeling and generation. | English | +| [winogrande](winogrande/README.md) | A large-scale dataset for coreference resolution, inspired by the Winograd Schema Challenge. | English | +| [wmdp](wmdp/README.md) | A benchmark with the objective of minimizing performance, based on potentially-sensitive multiple-choice knowledge questions. | English | +| [wmt2016](wmt2016/README.md) | Tasks from the WMT 2016 shared task, focusing on translation between multiple languages. | English, Czech, German, Finnish, Russian, Romanian, Turkish | +| [wsc273](wsc273/README.md) | The Winograd Schema Challenge, a test of commonsense reasoning and coreference resolution. | English | +| [xcopa](xcopa/README.md) | Cross-lingual Choice of Plausible Alternatives, testing reasoning in multiple languages. | Estonian, Haitian, Indonesian, Italian, Quechua, Swahili, Tamil, Thai, Turkish, Vietnamese, Chinese | +| [xnli](xnli/README.md) | Cross-Lingual Natural Language Inference to test understanding across different languages. | Arabic, Bulgarian, German, Greekm English, Spanish, French, Hindi, Russian, Swahili, Thai, Turkish, Urdu, Vietnamese, Chinese | +| [xnli_eu](xnli_eu/README.md) | Cross-lingual Natural Language Inference tasks in Basque. | Basque | +| [xstorycloze](xstorycloze/README.md) | Cross-lingual narrative understanding tasks to predict story endings in multiple languages. | Russian, Simplified Chinese, Spanish, Arabic, Hindi, Indonesian, Telugu, Swahili, Basque, Burmese | +| [xwinograd](xwinograd/README.md) | Cross-lingual Winograd schema tasks for coreference resolution in multiple languages. | English, French, Japanese, Portuguese, Russian, Chinese | diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/__init__.py b/venv/lib/python3.10/site-packages/lm_eval/tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1fd63cdb8110c8c86ad0a3a8c8de4e9b3403c242 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/__init__.py @@ -0,0 +1,470 @@ +import collections +import logging +import os +from functools import partial +from typing import Dict, List, Mapping, Optional, Union + +from lm_eval import utils +from lm_eval.api.task import ConfigurableTask, Task + + +class TaskManager: + """TaskManager indexes all tasks from the default `lm_eval/tasks/` + and an optional directory if provided. + + """ + + def __init__( + self, + verbosity="INFO", + include_path: Optional[Union[str, List]] = None, + include_defaults: bool = True, + ) -> None: + self.verbosity = verbosity + self.include_path = include_path + self.logger = utils.eval_logger + self.logger.setLevel(getattr(logging, f"{verbosity}")) + + self._task_index = self.initialize_tasks( + include_path=include_path, include_defaults=include_defaults + ) + self._all_tasks = sorted(list(self._task_index.keys())) + + self.task_group_map = collections.defaultdict(list) + + def initialize_tasks( + self, + include_path: Optional[Union[str, List]] = None, + include_defaults: bool = True, + ): + """Creates a dictionary of tasks index. + + :param include_path: Union[str, List] = None + An additional path to be searched for tasks recursively. + Can provide more than one such path as a list. + :param include_defaults: bool = True + If set to false, default tasks (those in lm_eval/tasks/) are not indexed. + :return + Dictionary of task names as key and task metadata + """ + if include_defaults: + all_paths = [os.path.dirname(os.path.abspath(__file__)) + "/"] + else: + all_paths = [] + if include_path is not None: + if isinstance(include_path, str): + include_path = [include_path] + all_paths.extend(include_path) + + task_index = {} + for task_dir in all_paths: + tasks = self._get_task_and_group(task_dir) + task_index = {**tasks, **task_index} + + return task_index + + @property + def all_tasks(self): + return self._all_tasks + + @property + def task_index(self): + return self._task_index + + def match_tasks(self, task_list): + return utils.pattern_match(task_list, self.all_tasks) + + def _name_is_registered(self, name) -> bool: + if name in self.all_tasks: + return True + return False + + def _name_is_task(self, name) -> bool: + if self._name_is_registered(name) and ("task" in self.task_index[name]["type"]): + return True + return False + + def _name_is_group(self, name) -> bool: + if self._name_is_registered(name) and ( + self.task_index[name]["type"] == "group" + ): + return True + return False + + def _name_is_python_task(self, name): + if self._name_is_registered(name) and ( + self.task_index[name]["type"] == "python_task" + ): + return True + return False + + def _config_is_task(self, config) -> bool: + if ("task" in config) and isinstance(config["task"], str): + return True + return False + + def _config_is_group(self, config) -> bool: + if ("task" in config) and isinstance(config["task"], list): + return True + return False + + def _config_is_python_task(self, config) -> bool: + if "class" in config: + return True + return False + + def _get_yaml_path(self, name): + if name not in self.task_index: + raise ValueError + return self.task_index[name]["yaml_path"] + + def _get_config(self, name): + if name not in self.task_index: + raise ValueError + yaml_path = self._get_yaml_path(name) + if yaml_path == -1: + return {} + else: + return utils.load_yaml_config(yaml_path, mode="full") + + def _get_tasklist(self, name): + if self._name_is_task(name): + raise ValueError + return self.task_index[name]["task"] + + def _process_alias(self, config, group=None): + # If the group is not the same as the original + # group which the group alias was intended for, + # Set the group_alias to None instead. + if ("group_alias" in config) and ("group" in config) and group is not None: + if config["group"] != group: + config["group_alias"] = None + return config + + def _load_individual_task_or_group( + self, + name_or_config: Optional[Union[str, dict]] = None, + parent_name: Optional[str] = None, + update_config: Optional[dict] = None, + yaml_path: Optional[str] = None, + ) -> Mapping: + def load_task(config, task, group=None, yaml_path=None): + if "include" in config: + if yaml_path is None: + raise ValueError + config = { + **utils.load_yaml_config( + yaml_path, + yaml_config={"include": config.pop("include")}, + mode="full", + ), + **config, + } + if self._config_is_python_task(config): + task_object = config["class"]() + else: + config = self._process_alias(config, group=group) + task_object = ConfigurableTask(config=config) + if group is not None: + task_object = (group, task_object) + return {task: task_object} + + if isinstance(name_or_config, str): + if update_config is not None: + # Process name_or_config as a dict instead + name_or_config = {"task": name_or_config, **update_config} + elif self._name_is_task(name_or_config): + task_config = self._get_config(name_or_config) + return load_task(task_config, task=name_or_config, group=parent_name) + else: + group_name = name_or_config + subtask_list = self._get_tasklist(name_or_config) + if subtask_list == -1: + group_config = self._get_config(name_or_config) + subtask_list = group_config["task"] + + # This checks if we're at the root. + if parent_name is None: + group_config = self._get_config(name_or_config) + if set(group_config.keys()) > {"task", "group"}: + update_config = { + k: v + for k, v in group_config.items() + if k not in ["task", "group"] + } + yaml_path = self._get_yaml_path(group_name) + + if (update_config is not None) and ("group_alias" in update_config): + group_name = update_config["group_alias"] + update_config.pop("group_alias") + + if isinstance(name_or_config, dict): + if update_config is not None: + name_or_config = { + **name_or_config, + **update_config, + } + + if self._config_is_task(name_or_config): + name = name_or_config["task"] + # If the name is registered as a group + # if self._name_is_task(name) is False: + if self._name_is_group(name): + group_name = name + update_config = { + k: v for k, v in name_or_config.items() if k != "task" + } + subtask_list = self._get_tasklist(name) + if subtask_list == -1: + subtask_list = self._get_config(name)["task"] + else: + if self._name_is_registered(name): + base_task_config = self._get_config(name) + + # Check if this is a duplicate. + if parent_name is not None: + name_or_config["group"] = parent_name + num_duplicate = len( + list( + filter( + lambda x: x.startswith(name), + self.task_group_map[parent_name], + ) + ) + ) + if num_duplicate > 0: + name = f"{name}-{num_duplicate}" + self.task_group_map[parent_name].append(name) + + task_config = { + **base_task_config, + **name_or_config, + } + else: + task_config = name_or_config + return load_task( + task_config, task=name, group=parent_name, yaml_path=yaml_path + ) + else: + group_name = name_or_config["group"] + subtask_list = name_or_config["task"] + if set(name_or_config.keys()) > {"task", "group"}: + update_config = { + k: v + for k, v in name_or_config.items() + if k not in ["task", "group"] + } + + all_subtasks = {} + if parent_name is not None: + all_subtasks = {group_name: (parent_name, None)} + + fn = partial( + self._load_individual_task_or_group, + parent_name=group_name, + update_config=update_config, + yaml_path=yaml_path, + ) + all_subtasks = { + **all_subtasks, + **dict(collections.ChainMap(*map(fn, subtask_list))), + } + return all_subtasks + + def load_task_or_group(self, task_list: Optional[Union[str, list]] = None) -> dict: + """Loads a dictionary of task objects from a list + + :param task_list: Union[str, list] = None + Single string or list of string of task names to be loaded + + :return + Dictionary of task objects + """ + if isinstance(task_list, str): + task_list = [task_list] + + all_loaded_tasks = dict( + collections.ChainMap(*map(self._load_individual_task_or_group, task_list)) + ) + return all_loaded_tasks + + def load_config(self, config: Dict): + return self._load_individual_task_or_group(config) + + def _get_task_and_group(self, task_dir: str): + """Creates a dictionary of tasks index with the following metadata, + - `type`, that can be either `task`, `python_task`, or `group`. + `task` refer to regular task configs, `python_task` are special + yaml files that only consists of `task` and `class` parameters. + `group` are group configs. + - `yaml_path`, path to the yaml file. If the entry is a `group` that + was configured through a task config, the yaml_path will be -1 + and all subtasks will be listed in `task` (see below) + - `task`, reserved for entries with `type` as `group`. This will list + all subtasks. When a group config is created (as opposed to task + config having `group` parameter set), this will be set to -1 to + avoid recursive indexing. The whole list of subtasks will be loaded + at evaluation. + + :param task_dir: str + A directory to check for tasks + + :return + Dictionary of task names as key and task metadata + """ + ignore_dirs = [ + "__pycache__", + ".ipynb_checkpoints", + ] + tasks_and_groups = collections.defaultdict() + for root, dirs, file_list in os.walk(task_dir): + dirs[:] = [d for d in dirs if d not in ignore_dirs] + for f in file_list: + if f.endswith(".yaml"): + yaml_path = os.path.join(root, f) + config = utils.load_yaml_config(yaml_path, mode="simple") + if self._config_is_python_task(config): + # This is a python class config + tasks_and_groups[config["task"]] = { + "type": "python_task", + "yaml_path": yaml_path, + } + elif self._config_is_group(config): + # This is a group config + tasks_and_groups[config["group"]] = { + "type": "group", + "task": -1, # This signals that + # we don't need to know + # the task list for indexing + # as it can be loaded + # when called. + "yaml_path": yaml_path, + } + + # # Registered the level 1 tasks from a group config + # for config in config["task"]: + # if isinstance(config, dict) and self._config_is_task(config): + # task = config["task"] + # tasks_and_groups[task] = { + # "type": "task", + # "yaml_path": yaml_path, + # } + + elif self._config_is_task(config): + # This is a task config + task = config["task"] + tasks_and_groups[task] = { + "type": "task", + "yaml_path": yaml_path, + } + + if "group" in config: + groups = config["group"] + if isinstance(config["group"], str): + groups = [groups] + + for group in groups: + if group not in tasks_and_groups: + tasks_and_groups[group] = { + "type": "group", + "task": [task], + "yaml_path": -1, + } + else: + tasks_and_groups[group]["task"].append(task) + else: + self.logger.debug(f"File {f} in {root} could not be loaded") + + return tasks_and_groups + + +def get_task_name_from_config(task_config: Dict[str, str]) -> str: + if "task" in task_config: + return task_config["task"] + if "dataset_name" in task_config: + return "{dataset_path}_{dataset_name}".format(**task_config) + else: + return "{dataset_path}".format(**task_config) + + +def get_task_name_from_object(task_object): + if hasattr(task_object, "config"): + return task_object._config["task"] + + # TODO: scrap this + # this gives a mechanism for non-registered tasks to have a custom name anyways when reporting + return ( + task_object.EVAL_HARNESS_NAME + if hasattr(task_object, "EVAL_HARNESS_NAME") + else type(task_object).__name__ + ) + + +def get_task_dict( + task_name_list: Union[str, List[Union[str, Dict, Task]]], + task_manager: Optional[TaskManager] = None, +): + """Creates a dictionary of task objects from either a name of task, config, or prepared Task object. + + :param task_name_list: List[Union[str, Dict, Task]] + Name of model or LM object, see lm_eval.models.get_model + :param task_manager: TaskManager = None + A TaskManager object that stores indexed tasks. If not set, + task_manager will load one. This should be set by the user + if there are additional paths that want to be included + via `include_path` + + :return + Dictionary of task objects + """ + task_name_from_string_dict = {} + task_name_from_config_dict = {} + task_name_from_object_dict = {} + + if isinstance(task_name_list, str): + task_name_list = [task_name_list] + elif isinstance(task_name_list, list): + if not all([isinstance(task, (str, dict, Task)) for task in task_name_list]): + raise TypeError( + "Expected all list items to be of types 'str', 'dict', or 'Task', but at least one entry did not match." + ) + else: + raise TypeError( + f"Expected a 'str' or 'list' but received {type(task_name_list)}." + ) + + string_task_name_list = [task for task in task_name_list if isinstance(task, str)] + others_task_name_list = [ + task for task in task_name_list if not isinstance(task, str) + ] + if len(string_task_name_list) > 0: + if task_manager is None: + task_manager = TaskManager() + + task_name_from_string_dict = task_manager.load_task_or_group( + string_task_name_list + ) + + for task_element in others_task_name_list: + if isinstance(task_element, dict): + task_name_from_config_dict = { + **task_name_from_config_dict, + **task_manager.load_config(config=task_element), + } + + elif isinstance(task_element, Task): + task_name_from_object_dict = { + **task_name_from_object_dict, + get_task_name_from_object(task_element): task_element, + } + + if not set(task_name_from_string_dict.keys()).isdisjoint( + set(task_name_from_object_dict.keys()) + ): + raise ValueError + + return { + **task_name_from_string_dict, + **task_name_from_config_dict, + **task_name_from_object_dict, + } diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46b4b5c543731ce54dc8403ff9fd115616ab070b Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/__pycache__/__init__.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/tasks/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da499f2fb13c83a98dc4e8ebe54c235bbd21759b Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/tasks/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d8707c01bf555d54f14d825501f54428c33cbe89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/README.md @@ -0,0 +1,50 @@ +# ACLUE + +### Paper + +Can Large Language Model Comprehend Ancient Chinese? A Preliminary Test on ACLUE +https://arxiv.org/abs/2310.09550 + +The Ancient Chinese Language Understanding Evaluation (ACLUE) is an evaluation benchmark focused on ancient Chinese language comprehension. It aims to assess the performance of large-scale language models on understanding ancient Chinese. The benchmark comprises 15 tasks spanning various domains, including lexical, syntactic, semantic, inference, and knowledge. ACLUE's tasks are derived from a combination of manually curated questions from publicly available resources, and automatically +generated questions from classical Chinese language corpora. The range of questions span from the Xia dynasty (2070 BCE) to the Ming dynasty (1368 CE). ACLUE adopts a multiple-choice question format for all tasks. + +Homepage: https://github.com/isen-zhang/ACLUE + +### Citation + +```bibtex +@inproceedings{zhang-li-2023-large, + title = "Can Large Langauge Model Comprehend {A}ncient {C}hinese? A Preliminary Test on {ACLUE}", + author = "Zhang, Yixuan and Li, Haonan", + booktitle = "Proceedings of the Ancient Language Processing Workshop", + month = sep, + year = "2023", + address = "Varna, Bulgaria", + publisher = "INCOMA Ltd., Shoumen, Bulgaria", + url = "https://aclanthology.org/2023.alp-1.9", + pages = "80--87" +} +``` + +### Groups and Tasks + +#### Groups + +- `aclue`: All 15 subjects of the ACLUE dataset, evaluated following the methodology in CMMLU's original implementation. + +#### Tasks + +The following tasks evaluate subjects in the ACLUE dataset using loglikelihood-based multiple-choice scoring: +- `aclue_{subject_english}` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [x] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/__pycache__/_generate_configs.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/__pycache__/_generate_configs.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9fd50c9eb6cdcff25c14ff940a985045fb94e98 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/__pycache__/_generate_configs.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/_default_template_yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..d93ce382d57e8a449868deb79ab544551f7e605b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/_default_template_yaml @@ -0,0 +1,19 @@ +group: aclue +dataset_path: tyouisen/aclue +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/_generate_configs.py b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd1792ae3d200b422c6f804ef7d89252591b2a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/_generate_configs.py @@ -0,0 +1,82 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" + +import argparse +import os + +import yaml +from tqdm import tqdm + +from lm_eval.utils import eval_logger + + +SUBJECTS = { + "古文单字多义": "polysemy_resolution", + "诗词情感分类": "poetry_sentiment_analysis", + "古汉语命名体识别": "named_entity_recognition", + "古汉语知识": "basic_ancient_chinese", + "古诗词上下句预测": "poetry_context_prediction", + "古文断句": "sentence_segmentation", + "对联": "couplet_prediction", + "古诗词曲鉴赏": "poetry_appreciate", + "国学常识": "ancient_chinese_culture", + "古音学": "ancient_phonetics", + "通假字": "homographic_character_resolution", + "古代文学知识": "ancient_literature", + "医古文": "ancient_medical", + "古诗词质量评估": "poetry_quality_assessment", + "古文阅读理解": "reading_comprehension", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="aclue") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path, encoding="utf-8") as f: + cot_file = json.load(f) + + for subject_zh, subject_eng in tqdm(SUBJECTS.items()): + if args.cot_prompt_path is not None: + description = cot_file[subject_eng] + else: + description = ( + f"以下是关于{subject_zh}的单项选择题,请直接给出正确答案的选项。\n\n" + ) + + yaml_dict = { + "include": base_yaml_name, + "task": f"aclue_{args.task_prefix}_{subject_eng}" + if args.task_prefix != "" + else f"aclue_{subject_eng}", + "dataset_name": subject_eng, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml" + eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9f52077dedd24ce500247a4b606eea83fac6320 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_chinese_culture" +"description": "以下是关于国学常识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_chinese_culture" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_literature.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_literature.yaml new file mode 100644 index 0000000000000000000000000000000000000000..641befa3aa1920d8dca1c7007a4fe8cd24ab8e77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_literature.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_literature" +"description": "以下是关于古代文学知识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_literature" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_medical.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_medical.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bceaa702c53a1526fc84cf8f5141570352581a44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_medical.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_medical" +"description": "以下是关于医古文的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_medical" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fe908e531a07466a66f58f2f5009d5111d5a02d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_phonetics" +"description": "以下是关于古音学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_phonetics" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5afb88be88b8778fde06cff3a2084bce14397174 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "basic_ancient_chinese" +"description": "以下是关于古汉语知识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_basic_ancient_chinese" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..63124eed8eb2c2987e7145ee4633e010407641be --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml @@ -0,0 +1,4 @@ +"dataset_name": "couplet_prediction" +"description": "以下是关于对联的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_couplet_prediction" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7d50e35d5f31badfc13b1815fffa487b3fc64c82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml @@ -0,0 +1,4 @@ +"dataset_name": "homographic_character_resolution" +"description": "以下是关于通假字的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_homographic_character_resolution" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..566e93019b994528bb003f46fb458ed725ef8af1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "named_entity_recognition" +"description": "以下是关于古汉语命名体识别的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_named_entity_recognition" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4642992674a1f159fe101859dead4509df6c8166 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_appreciate" +"description": "以下是关于古诗词曲鉴赏的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_appreciate" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b408b659657b4677e056f93c59f2a59ef60cb95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_context_prediction" +"description": "以下是关于古诗词上下句预测的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_context_prediction" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7a7bee2c4ca59e0dc7b2f3fdc08371a9a585d42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_quality_assessment" +"description": "以下是关于古诗词质量评估的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_quality_assessment" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e1367f8043d7e1e9ebcd01dfbaacfbdeb0f9fec --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_sentiment_analysis" +"description": "以下是关于诗词情感分类的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_sentiment_analysis" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee0deea16f6bcb6906fd68e2e65bf72ea276e74a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml @@ -0,0 +1,4 @@ +"dataset_name": "polysemy_resolution" +"description": "以下是关于古文单字多义的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_polysemy_resolution" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92f2455d8089bcc3b7d1ff8b99c03144b5b7d61d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml @@ -0,0 +1,4 @@ +"dataset_name": "reading_comprehension" +"description": "以下是关于古文阅读理解的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_reading_comprehension" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d81c3fe6eae35a6adc888d9c73430aa891bfe86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sentence_segmentation" +"description": "以下是关于古文断句的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_sentence_segmentation" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/README.md new file mode 100644 index 0000000000000000000000000000000000000000..799c6c1ff90b9b38d71c92a30e787b073e139073 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/README.md @@ -0,0 +1,53 @@ +# Arabic EXAMS + +### Paper + +EXAMS: a resource specialized in multilingual high school exam questions. +The original paper [EXAMS](https://aclanthology.org/2020.emnlp-main.438/) + +The Arabic EXAMS dataset includes five subjects + + - Islamic studies + - Biology + - Physics + - Science + - Social + +The original dataset [EXAMS-QA](https://github.com/mhardalov/exams-qa) + +EXAMS is a benchmark dataset for cross-lingual and multilingual question answering for high school examinations. +With 24,000 high-quality high school exam questions in 16 languages, covering 8 language families and 24 school subjects from Natural Sciences and Social Sciences, among others. +EXAMS offers unique fine-grained evaluation framework across multiple languages and subjects + +Homepage for Arabic EXAMS: [EXAMS Arabic Homepage](https://github.com/FreedomIntelligence/AceGPT/tree/main/eval/benchmark_eval/benchmarks/EXAMS_Arabic) + +### Citation + + +### Groups and Tasks + +#### Groups + +- `EXAMS Arabic`: include IslamicStudies, Biology, Science, Physics, Social. + +#### Tasks + + +The following tasks evaluate subjects in Arabic EXAMS dataset using loglikelihood-based multiple-choice scoring: +- `aexams_IslamicStudies` +- `aexams_Biology` +- `aexams_Science` +- `aexams_Physics` +- `aexams_Social` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [x] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/_default_template_yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..12438deb7f3ec3ffddf44ec85a4dc5150b9c6411 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/_default_template_yaml @@ -0,0 +1,19 @@ +group: aexams +dataset_path: Hennara/aexams +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nالجواب:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Biology.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ee2e33b5844ef438da4ac51bfd916af04cb53e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "Biology" +"description": "قم بالإجابة على مايلي في مجال العلوم الحيوية\n\n" +"include": "_default_template_yaml" +"task": "aexams_Biology" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_IslamicStudies.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_IslamicStudies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..831afc376ec25fdeddbb18bf5e4063d2e3c17ebf --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_IslamicStudies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "IslamicStudies" +"description": "قم بالإجابة على مايلي في مجال العلوم الإسلامية \n\n" +"include": "_default_template_yaml" +"task": "aexams_IslamicStudies" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Physics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2764a06ef2680a1c81ccca0e76dcbcf1ba52672 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "Physics" +"description": "قم بالإجابة على مايلي في مجال الفيزياء \n\n" +"include": "_default_template_yaml" +"task": "aexams_Physics" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Science.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c89dc8c8ca6d32b922483f48ee8da427e027a92b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "Science" +"description": "قم بالإجابة على مايلي في مجال العلوم \n\n" +"include": "_default_template_yaml" +"task": "aexams_Science" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Social.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Social.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3042a419e6e3902ddd0090028fc4b875a148a213 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/aexams/aexams_Social.yaml @@ -0,0 +1,4 @@ +"dataset_name": "Social" +"description": "قم بالإجابة على مايلي في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "aexams_Social" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/README.md new file mode 100644 index 0000000000000000000000000000000000000000..faaf47b6beab877c7ee341a8dc2fc3e14a04b021 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/README.md @@ -0,0 +1,114 @@ +# AGIEval + +### Paper + +Title: AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models + +Abstract: https://arxiv.org/abs/2304.06364.pdf + +AGIEval is a human-centric benchmark specifically designed to evaluate the general abilities of foundation models in tasks pertinent to human cognition and problem-solving. +This benchmark is derived from 20 official, public, and high-standard admission and qualification exams intended for general human test-takers, such as general college admission tests (e.g., Chinese College Entrance Exam (Gaokao) and American SAT), law school admission tests, math competitions, lawyer qualification tests, and national civil service exams. + +Homepage: https://github.com/ruixiangcui/AGIEval + +### Citation + +``` +@misc{zhong2023agieval, + title={AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models}, + author={Wanjun Zhong and Ruixiang Cui and Yiduo Guo and Yaobo Liang and Shuai Lu and Yanlin Wang and Amin Saied and Weizhu Chen and Nan Duan}, + year={2023}, + eprint={2304.06364}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +Please make sure to cite all the individual datasets in your paper when you use them. We provide the relevant citation information below: + +``` +@inproceedings{ling-etal-2017-program, + title = "Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems", + author = "Ling, Wang and + Yogatama, Dani and + Dyer, Chris and + Blunsom, Phil", + booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", + month = jul, + year = "2017", + address = "Vancouver, Canada", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/P17-1015", + doi = "10.18653/v1/P17-1015", + pages = "158--167", + abstract = "Solving algebraic word problems requires executing a series of arithmetic operations{---}a program{---}to obtain a final answer. However, since programs can be arbitrarily complicated, inducing them directly from question-answer pairs is a formidable challenge. To make this task more feasible, we solve these problems by generating answer rationales, sequences of natural language and human-readable mathematical expressions that derive the final answer through a series of small steps. Although rationales do not explicitly specify programs, they provide a scaffolding for their structure via intermediate milestones. To evaluate our approach, we have created a new 100,000-sample dataset of questions, answers and rationales. Experimental results show that indirect supervision of program learning via answer rationales is a promising strategy for inducing arithmetic programs.", +} + +@inproceedings{hendrycksmath2021, + title={Measuring Mathematical Problem Solving With the MATH Dataset}, + author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, + journal={NeurIPS}, + year={2021} +} + +@inproceedings{Liu2020LogiQAAC, + title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning}, + author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang}, + booktitle={International Joint Conference on Artificial Intelligence}, + year={2020} +} + +@inproceedings{zhong2019jec, + title={JEC-QA: A Legal-Domain Question Answering Dataset}, + author={Zhong, Haoxi and Xiao, Chaojun and Tu, Cunchao and Zhang, Tianyang and Liu, Zhiyuan and Sun, Maosong}, + booktitle={Proceedings of AAAI}, + year={2020}, +} + +@article{Wang2021FromLT, + title={From LSAT: The Progress and Challenges of Complex Reasoning}, + author={Siyuan Wang and Zhongkun Liu and Wanjun Zhong and Ming Zhou and Zhongyu Wei and Zhumin Chen and Nan Duan}, + journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing}, + year={2021}, + volume={30}, + pages={2201-2216} +} +``` + +### Groups and Tasks + +#### Groups + +- `agieval`: Evaluates all tasks listed below. + +- `agieval_en`: Evaluates all English subtasks: `agieval_aqua_rat`, `agieval_gaokao_english`, `agieval_logiqa_en`, `agieval_lsat_*`, `agieval_sat_*`, `agieval_math` + +- `agieval_cn`: Evaluates all Chinese subtasks: +`agieval_gaokao_biology`, `agieval_gaokao_chemistry`, `agieval_gaokao_chinese`, `agieval_gaokao_geography`, +`agieval_gaokao_history`, `agieval_gaokao_mathqa`, `agieval_gaokao_mathcloze`, `agieval_gaokao_physics`, `agieval_jec_qa_ca`, `agieval_jec_qa_kd`, `agieval_logiqa_zh` + +- `agieval_nous`: Evaluates a specific subset of AGIEval tasks (multiple-choice and english-only), namely those in https://github.com/teknium1/LLM-Benchmark-Logs/blob/main/benchmark-logs/Mistral-7B-Base.md + +#### Tasks + +- `agieval_aqua_rat` +- `agieval_gaokao_biology` +- `agieval_gaokao_chemistry` +- `agieval_gaokao_chinese` +- `agieval_gaokao_english` +- `agieval_gaokao_geography` +- `agieval_gaokao_history` +- `agieval_gaokao_mathqa` +- `agieval_gaokao_mathcloze` +- `agieval_gaokao_physics` +- `agieval_jec_qa_ca` +- `agieval_jec_qa_kd` +- `agieval_logiqa_en` +- `agieval_logiqa_zh` +- `agieval_lsat_ar` +- `agieval_lsat_lr` +- `agieval_lsat_rc` +- `agieval_sat_en` +- `agieval_sat_en_without_passage` +- `agieval_sat_math` +- `agieval_math` diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/__pycache__/utils.cpython-312.pyc b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9555a607ce187799c965aad255ee1aa02e65265b Binary files /dev/null and b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/aqua-rat.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/aqua-rat.yaml new file mode 100644 index 0000000000000000000000000000000000000000..babebf638edcf0e9c5a2432adb6a2fdaf4793c1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/aqua-rat.yaml @@ -0,0 +1,24 @@ +group: + - agieval + - agieval_en + - agieval_nous +task: agieval_aqua_rat +dataset_path: hails/agieval-aqua-rat +dataset_name: null +output_type: multiple_choice +training_split: null +validation_split: null +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{gold}}" +doc_to_choice: "{{choices}}" +process_results: !function utils.process_results_mcqa +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-biology.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36c44cbbeeb730f05c9d425c20f02c78acc81563 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-biology.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_biology +dataset_path: hails/agieval-gaokao-biology diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-chemistry.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69810122eb274cdcb285232330a19807886ee50d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-chemistry.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_chemistry +dataset_path: hails/agieval-gaokao-chemistry diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-chinese.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-chinese.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30d249b9d5544a3441e50284929aac6f081d6b76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-chinese.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_chinese +dataset_path: hails/agieval-gaokao-chinese diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-english.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-english.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a540fcf25f503be64d3f5810be7b037a2e7c0504 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-english.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_en # categorizing as EN because the AGIEval codebase lists this as in `english_qa_tasks` +task: agieval_gaokao_english +dataset_path: hails/agieval-gaokao-english diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-geography.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fe43bfd2cb620328dfb28ba4a4e9e6d6d093c07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-geography.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_geography +dataset_path: hails/agieval-gaokao-geography diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-history.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9c9c630fa2c843da5c8311b1e0570bb1cc267f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-history.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_history +dataset_path: hails/agieval-gaokao-history diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-mathcloze.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-mathcloze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..74cbad1c0325c4fb9fe78df83304741553c06134 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-mathcloze.yaml @@ -0,0 +1,25 @@ +group: + - agieval + - agieval_cn +task: agieval_gaokao_mathcloze +dataset_path: hails/agieval-gaokao-mathcloze +dataset_name: null +output_type: generate_until +training_split: null +validation_split: null +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{answer}}" +process_results: !function utils.process_results +generation_kwargs: + max_gen_toks: 32 + do_sample: False + temperature: 0.0 + until: + - "Q:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-mathqa.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-mathqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa94e8eec85a931e5acbdb843730b58e8c1506e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-mathqa.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_mathqa +dataset_path: hails/agieval-gaokao-mathqa diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-physics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..175dd6cca03fab93107e0bab827ea356ceb127eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/gaokao-physics.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_physics +dataset_path: hails/agieval-gaokao-physics diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/jec-qa-ca.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/jec-qa-ca.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f93b47a5b1418d839933b71e71b523fd38696691 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/jec-qa-ca.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_jec_qa_ca +dataset_path: hails/agieval-jec-qa-ca diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/jec-qa-kd.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/jec-qa-kd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0458eb7ea8356df569ac6c3b50af0bd4097ea857 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/jec-qa-kd.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_jec_qa_kd +dataset_path: hails/agieval-jec-qa-kd diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/logiqa-en.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/logiqa-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7112418659c4478c4e59f9bdcdebb6d64e7b9bb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/logiqa-en.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_logiqa_en +dataset_path: hails/agieval-logiqa-en diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/logiqa-zh.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/logiqa-zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..82e688006b8272e015a74b01412ad35cfe33561e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/logiqa-zh.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_logiqa_zh +dataset_path: hails/agieval-logiqa-zh diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-ar.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..302f9b519ee268831c1725fb96322d6628b9fdf9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-ar.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_lsat_ar +dataset_path: hails/agieval-lsat-ar diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-lr.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-lr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62158e5cec196c0c7887a7236e1020ba2946da26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-lr.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_lsat_lr +dataset_path: hails/agieval-lsat-lr diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-rc.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-rc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de155af78aa8d5ad3b14849d8a2807a7194f6744 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/lsat-rc.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_lsat_rc +dataset_path: hails/agieval-lsat-rc diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/math.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8ec9eec608c4eaced456c36dcb5dc9047ccd84e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/math.yaml @@ -0,0 +1,25 @@ +group: + - agieval + - agieval_en +task: agieval_math +dataset_path: hails/agieval-math +dataset_name: null +output_type: generate_until +training_split: null +validation_split: null +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{answer}}" +process_results: !function utils.process_results +generation_kwargs: + max_gen_toks: 32 + do_sample: False + temperature: 0.0 + until: + - "Q:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-en-without-passage.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-en-without-passage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01490d9ee10aba867a1863e9d6a74b678f4f5588 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-en-without-passage.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_sat_en_without_passage +dataset_path: hails/agieval-sat-en-without-passage diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-en.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a45dba1507a562ace2f56f9a0096ff25f767f1e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-en.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_sat_en +dataset_path: hails/agieval-sat-en diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-math.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5b644ee062975dbdb74870428d71189e297343a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/sat-math.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_sat_math +dataset_path: hails/agieval-sat-math diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/utils.py b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa6e544f1a7e15e853b99be2fe01502baadefcee --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/agieval/utils.py @@ -0,0 +1,274 @@ +# Answer parsing and normalization code, from +# https://github.com/ruixiangcui/AGIEval/blob/main/src/ +# math_equivalence.py and post_process.py +import re +from typing import Dict, List + +import numpy as np + + +def parse_math_answer(raw_string): + def remove_boxed(s): + left = "\\boxed{" + try: + assert s[: len(left)] == left + assert s[-1] == "}" + answer = s[len(left) : -1] + if "=" in answer: + answer = answer.split("=")[-1].lstrip(" ") + return answer + except Exception: + return None + + def last_boxed_only_string(string): + idx = string.rfind("\\boxed") + if idx < 0: + idx = string.rfind("\\fbox") + if idx < 0: + return None + i = idx + right_brace_idx = None + num_left_braces_open = 0 + while i < len(string): + if string[i] == "{": + num_left_braces_open += 1 + if string[i] == "}": + num_left_braces_open -= 1 + if num_left_braces_open == 0: + right_brace_idx = i + break + i += 1 + + if right_brace_idx is None: + retval = None + else: + retval = string[idx : right_brace_idx + 1] + + return retval + + def get_answer_with_dollar_sign(s): + first_pattern = "\$(.*)\$" + last_match = None + matches = re.findall(first_pattern, s) + if matches: + last_match = matches[-1] + if "=" in last_match: + last_match = last_match.split("=")[-1].lstrip(" ") + return last_match + + def get_answer_without_dollar_sign(s): + last_match = None + if "=" in s: + last_match = s.split("=")[-1].lstrip(" ").rstrip(".") + if "\\n" in last_match: + last_match = last_match.split("\\n")[0] + else: + pattern = "(?:\\$)?\d+(?:\.\d+)?(?![\w\d])" + matches = re.findall(pattern, s) + if matches: + last_match = matches[-1] + return last_match + + if "\\boxed" in raw_string: + answer = remove_boxed(last_boxed_only_string(raw_string)) + else: + answer = get_answer_with_dollar_sign(raw_string) + if not answer: + answer = get_answer_without_dollar_sign(raw_string) + return answer + + +# code from https://github.com/hendrycks/math/blob/main/modeling/math_equivalence.py +def _fix_fracs(string): + substrs = string.split("\\frac") + new_str = substrs[0] + if len(substrs) > 1: + substrs = substrs[1:] + for substr in substrs: + new_str += "\\frac" + if substr[0] == "{": + new_str += substr + else: + try: + assert len(substr) >= 2 + except Exception: + return string + a = substr[0] + b = substr[1] + if b != "{": + if len(substr) > 2: + post_substr = substr[2:] + new_str += "{" + a + "}{" + b + "}" + post_substr + else: + new_str += "{" + a + "}{" + b + "}" + else: + if len(substr) > 2: + post_substr = substr[2:] + new_str += "{" + a + "}" + b + post_substr + else: + new_str += "{" + a + "}" + b + string = new_str + return string + + +def _fix_a_slash_b(string): + if len(string.split("/")) != 2: + return string + a = string.split("/")[0] + b = string.split("/")[1] + try: + a = int(a) + b = int(b) + assert string == "{}/{}".format(a, b) + new_string = "\\frac{" + str(a) + "}{" + str(b) + "}" + return new_string + except Exception: + return string + + +def _remove_right_units(string): + # "\\text{ " only ever occurs (at least in the val set) when describing units + if "\\text{ " in string: + splits = string.split("\\text{ ") + assert len(splits) == 2 + return splits[0] + else: + return string + + +def _fix_sqrt(string): + if "\\sqrt" not in string: + return string + splits = string.split("\\sqrt") + new_string = splits[0] + for split in splits[1:]: + if split[0] != "{": + a = split[0] + new_substr = "\\sqrt{" + a + "}" + split[1:] + else: + new_substr = "\\sqrt" + split + new_string += new_substr + return new_string + + +def _strip_string(string): + # linebreaks + string = string.replace("\n", "") + # print(string) + + # remove inverse spaces + string = string.replace("\\!", "") + # print(string) + + # replace \\ with \ + string = string.replace("\\\\", "\\") + # print(string) + + # replace tfrac and dfrac with frac + string = string.replace("tfrac", "frac") + string = string.replace("dfrac", "frac") + # print(string) + + # remove \left and \right + string = string.replace("\\left", "") + string = string.replace("\\right", "") + # print(string) + + # Remove circ (degrees) + string = string.replace("^{\\circ}", "") + string = string.replace("^\\circ", "") + + # remove dollar signs + string = string.replace("\\$", "") + + # remove units (on the right) + string = _remove_right_units(string) + + # remove percentage + string = string.replace("\\%", "") + string = string.replace("\%", "") + + # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string + string = string.replace(" .", " 0.") + string = string.replace("{.", "{0.") + # if empty, return empty string + if len(string) == 0: + return string + if string[0] == ".": + string = "0" + string + + # to consider: get rid of e.g. "k = " or "q = " at beginning + if len(string.split("=")) == 2: + if len(string.split("=")[0]) <= 2: + string = string.split("=")[1] + + # fix sqrt3 --> sqrt{3} + string = _fix_sqrt(string) + + # remove spaces + string = string.replace(" ", "") + + # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b} + string = _fix_fracs(string) + + # manually change 0.5 --> \frac{1}{2} + if string == "0.5": + string = "\\frac{1}{2}" + + # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y + string = _fix_a_slash_b(string) + + return string + + +def is_equiv(str1, str2, verbose=False): + if str1 is None and str2 is None: + print("WARNING: Both None") + return True + if str1 is None or str2 is None: + return False + + str1, str2 = parse_math_answer(str1), parse_math_answer(str2) + + try: + ss1 = _strip_string(str1) + ss2 = _strip_string(str2) + if verbose: + print(ss1, ss2) + return ss1 == ss2 + except Exception: + return str1 == str2 + + +def process_results(doc: dict, results: List[str]) -> Dict[str, int]: + candidate = results[0] + + gold = doc["answer"] + + if not gold: + print(doc, candidate, gold) + if is_equiv(candidate, gold): + retval = 1 + else: + retval = 0 + + results = { + "acc": retval, + } + return results + + +# use a custom process_results() function, because AGIEval can have multiple valid answers +def process_results_mcqa(doc, results): + results = [result[0] for result in results] + + gold = doc["gold"] + + acc = 1.0 if int(np.argmax(results)) in gold else 0.0 + completion_len = np.array([float(len(i)) for i in doc["choices"]]) + acc_norm = 1.0 if int(np.argmax(results / completion_len)) in gold else 0.0 + + return { + "acc": acc, + "acc_norm": acc_norm, + } diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/copa_ar/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/copa_ar/README.md new file mode 100644 index 0000000000000000000000000000000000000000..972acb9f7431d34c216bbd27fee35f7ca138dcf5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/copa_ar/README.md @@ -0,0 +1,40 @@ +#Arabic COPA + +### Paper + +Original Title: `COPA` + + + +The Choice Of Plausible Alternatives (COPA) evaluation provides researchers with a tool for assessing progress in open-domain commonsense causal reasoning. + +[Homepage](https://people.ict.usc.edu/~gordon/copa.html) + +AlGhafa has translated this dataset to Arabic[AlGafa](https://aclanthology.org/2023.arabicnlp-1.21.pdf) + +The link to the Arabic version of the dataset [PICA](https://gitlab.com/tiiuae/alghafa/-/tree/main/arabic-eval/copa_ar) + +### Citation + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `copa_ar` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/copa_ar/copa_ar.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/copa_ar/copa_ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e35d1688babf0b5386f70f563fa923242540d0d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/copa_ar/copa_ar.yaml @@ -0,0 +1,21 @@ +task: copa_ar +dataset_path: Hennara/copa_ar +dataset_name: null +output_type: multiple_choice +training_split: null +validation_split: null +test_split: test +doc_to_text: "السؤال: {{query}}\nالجواب:" +doc_to_choice: "{{[sol1, sol2]}}" +doc_to_target: label +should_decontaminate: true +doc_to_decontamination_query: query +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/piqa_ar/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/piqa_ar/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e1b71e93da4c00104c38c03b9d4486966e8ad567 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/piqa_ar/README.md @@ -0,0 +1,43 @@ +#Arabic PIQA + +### Paper + +Original Title: `PIQA: Reasoning about Physical Commonsense in Natural Language` + +Original paper: [PICA](https://arxiv.org/abs/1911.11641) + +Physical Interaction: Question Answering (PIQA) is a physical commonsense +reasoning and a corresponding benchmark dataset. PIQA was designed to investigate +the physical knowledge of existing models. To what extent are current approaches +actually learning about the world? + +[Homepage](https://yonatanbisk.com/piqa) + +AlGhafa has translated this dataset to Arabic[AlGafa](https://aclanthology.org/2023.arabicnlp-1.21.pdf) + +The link to the Arabic version of the dataset [PICA](https://gitlab.com/tiiuae/alghafa/-/tree/main/arabic-eval/pica_ar) + +### Citation + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `piqa_ar` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/piqa_ar/piqa_ar.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/piqa_ar/piqa_ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..19dfaee0c609f409d3bd6e37163054c2e80af37a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/alghafa/piqa_ar/piqa_ar.yaml @@ -0,0 +1,21 @@ +task: piqa_ar +dataset_path: Hennara/pica_ar +dataset_name: null +output_type: multiple_choice +training_split: null +validation_split: null +test_split: test +doc_to_text: "السؤال: {{goal}}\nالجواب:" +doc_to_choice: "{{[sol1, sol2]}}" +doc_to_target: label +should_decontaminate: true +doc_to_decontamination_query: goal +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ba3f99d4826f0604f583772a2b48fe676a6f3e06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/README.md @@ -0,0 +1,56 @@ +# ANLI + +### Paper + +Title: `Adversarial NLI: A New Benchmark for Natural Language Understanding` + +Paper Link: https://arxiv.org/abs/1910.14599 + +Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial +human-and-model-in-the-loop procedure. It consists of three rounds that progressively +increase in difficulty and complexity, and each question-answer includes annotator- +provided explanations. + +Homepage: https://github.com/facebookresearch/anli + +### Citation + +``` +@inproceedings{nie-etal-2020-adversarial, + title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding", + author = "Nie, Yixin and + Williams, Adina and + Dinan, Emily and + Bansal, Mohit and + Weston, Jason and + Kiela, Douwe", + booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", + year = "2020", + publisher = "Association for Computational Linguistics", +} +``` + +### Groups and Tasks + +#### Groups + +* `anli`: Evaluates `anli_r1`, `anli_r2`, and `anli_r3` + +#### Tasks +* `anli_r1`: The data collected adversarially in the first round. +* `anli_r2`: The data collected adversarially in the second round, after training on the previous round's data. +* `anli_r3`: The data collected adversarially in the third round, after training on the previous multiple rounds of data. + + +### Checklist + +For adding novel benchmarks/datasets to the library: + * [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r1.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcf7674ee1bfc91f35e1566a6ddc5dc946c0ba72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r1.yaml @@ -0,0 +1,26 @@ +group: + - anli +task: anli_r1 +dataset_path: anli +dataset_name: null +output_type: multiple_choice +training_split: train_r1 +validation_split: dev_r1 +test_split: test_r1 +doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:" +# True = entailment +# False = contradiction +# Neither = neutral +doc_to_target: "{{['True', 'Neither', 'False'][label]}}" +doc_to_choice: + - "True" + - "Neither" + - "False" +should_decontaminate: true +doc_to_decontamination_query: premise +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r2.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85f28d67cf230fa36cd38dd8d6a345f6e679c53e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r2.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r2 +training_split: train_r2 +validation_split: dev_r2 +test_split: test_r2 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r3.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b9f98a867f7d03b90e84a425dc8b044b4cc96fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/anli/anli_r3.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r3 +training_split: train_r3 +validation_split: dev_r3 +test_split: test_r3 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..90de14b7fc6fb5295b7c597379a3d120abbb5ad7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/README.md @@ -0,0 +1,40 @@ +# ArabicMMLU + +### Paper + +Title: ArabicMMLU: Assessing Massive Multitask Language Understanding in Arabic + +Abstract: https://arxiv.org/abs/2402.12840 + +The focus of language model evaluation has +transitioned towards reasoning and knowledge intensive tasks, driven by advancements in pretraining large models. While state-of-the-art models are partially trained on large Arabic texts, evaluating their performance in Arabic remains challenging due to the limited availability of relevant datasets. To bridge this gap, we present ArabicMMLU, the first multi-task language understanding benchmark for Arabic language, sourced from school exams across diverse educational levels in different countries spanning North Africa, the Levant, and the Gulf regions. Our data comprises 40 tasks and 14,575 multiple-choice questions in Modern Standard Arabic (MSA), and is carefully constructed by collaborating with native speakers in the region. Our comprehensive evaluations of 35 models reveal substantial room for improvement, particularly among the best open-source models. Notably, BLOOMZ, mT0, LLama2, and Falcon struggle to achieve a score of 50%, while even the top-performing Arabic centric model only achieves a score of 62.3%. + +The authors of the paper conducted studies by varying the language of the initial prompt and answer keys between English and Arabic. However, they set English initial prompts and answer keys as the standard, which is the version implemented in this task. + +Homepage: https://github.com/mbzuai-nlp/ArabicMMLU + + +### Citation + +``` +@misc{koto2024arabicmmlu, + title={ArabicMMLU: Assessing Massive Multitask Language Understanding in Arabic}, + author={Fajri Koto and Haonan Li and Sara Shatnawi and Jad Doughman and Abdelrahman Boda Sadallah and Aisha Alraeesi and Khalid Almubarak and Zaid Alyafeai and Neha Sengupta and Shady Shehata and Nizar Habash and Preslav Nakov and Timothy Baldwin}, + year={2024}, + eprint={2402.12840}, + archivePrefix={arXiv}, + primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'} +} +``` + +### Groups and Tasks + +#### Groups + +* `arabicmmlu`: evaluates all ArabicMMLU tasks. + +* `arabicmmlu_stem`: evaluates STEM ArabicMMLU tasks. +* `arabicmmlu_stem_social_science`: evaluates social science ArabicMMLU tasks. +* `arabicmmlu_stem_humanities`: evaluates humanities ArabicMMLU tasks. +* `arabicmmlu_stem_language`: evaluates Arabic language ArabicMMLU tasks. +* `arabicmmlu_stem_other`: evaluates other ArabicMMLU tasks. diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/_default_template_yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..9af55ba7e7066b6d73e8ef806514bd5f6259483f --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/_default_template_yaml @@ -0,0 +1,13 @@ +dataset_path: yazeed7/ArabicMMLU +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: !function utils.doc_to_text +doc_to_choice: !function utils.doc_to_choice +doc_to_target: "Answer Key" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/_generate_configs.py b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..83e6594e50cf7de81bbc30302889ed7339e36232 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/_generate_configs.py @@ -0,0 +1,119 @@ +""" +Take in a YAML, and output all "other" splits with this YAML +""" + +import argparse +import logging +import os + +import yaml +from tqdm import tqdm + + +eval_logger = logging.getLogger("lm-eval") + + +SUBJECTS = { + "Driving Test": "other", + "High Geography": "social_science", + "High History": "humanities", + "Islamic Studies": "humanities", + "Univ Accounting": "social_science", + "Primary General Knowledge": "other", + "Univ Political Science": "social_science", + "Primary Math": "stem", + "Middle General Knowledge": "other", + "High Biology": "stem", + "Primary Natural Science": "stem", + "High Economics": "social_science", + "Middle Natural Science": "stem", + "Middle Geography": "social_science", + "Primary Social Science": "social_science", + "Middle Computer Science": "stem", + "Middle Islamic Studies": "humanities", + "Primary Computer Science": "stem", + "High Physics": "stem", + "Middle Social Science": "social_science", + "Middle Civics": "social_science", + "High Computer Science": "stem", + "General Knowledge": "other", + "High Civics": "social_science", + "Prof Law": "humanities", + "High Islamic Studies": "humanities", + "Primary Arabic Language": "language", + "High Arabic Language": "language", + "Arabic Language (Grammar)": "language", + "Primary History": "humanities", + "Middle History": "humanities", + "Univ Economics": "social_science", + "Arabic Language (General)": "language", + "Univ Computer Science": "stem", + "Primary Islamic Studies": "humanities", + "Primary Geography": "social_science", + "High Philosophy": "humanities", + "Middle Arabic Language": "language", + "Middle Economics": "social_science", + "Univ Management": "other", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", default="_default_template_yaml") + parser.add_argument("--save_prefix_path", default="arabicmmlu") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + ALL_CATEGORIES = [] + for subject, category in tqdm(SUBJECTS.items()): + if category not in ALL_CATEGORIES: + ALL_CATEGORIES.append(category) + + # description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n" + + yaml_dict = { + "include": base_yaml_name, + "group": f"arabicmmlu_{category}", + "group_alias": category.replace("_", " "), + "task": f"arabicmmlu_{subject.lower().replace(' ', '_')}", + "task_alias": subject, + "dataset_name": subject, + # "description": description, + } + + file_save_path = ( + args.save_prefix_path + + f"_{subject.lower().replace(' ', '_').replace('(', '').replace(')', '')}.yaml" + ) + eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + allow_unicode=True, + default_style='"', + ) + + arabicmmlu_subcategories = [f"arabicmmlu_{category}" for category in ALL_CATEGORIES] + + file_save_path = args.save_prefix_path + ".yaml" + + eval_logger.info(f"Saving benchmark config to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + { + "group": "arabicmmlu", + "task": arabicmmlu_subcategories, + }, + yaml_file, + indent=4, + default_flow_style=False, + ) diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89e9b3d598188616b651004df8dc8f785423d96a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu.yaml @@ -0,0 +1,7 @@ +group: arabicmmlu +task: +- arabicmmlu_other +- arabicmmlu_social_science +- arabicmmlu_humanities +- arabicmmlu_stem +- arabicmmlu_language diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_general.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_general.yaml new file mode 100644 index 0000000000000000000000000000000000000000..393109aa0b606ce94881894fa7d030883357f6cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_general.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Arabic Language (General)" +"group": "arabicmmlu_language" +"group_alias": "language" +"include": "_default_template_yaml" +"task": "arabicmmlu_arabic_language_(general)" +"task_alias": "Arabic Language (General)" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_grammar.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_grammar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b077a398cc214ccc3e63c5f3fd20f34c623fe437 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_grammar.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Arabic Language (Grammar)" +"group": "arabicmmlu_language" +"group_alias": "language" +"include": "_default_template_yaml" +"task": "arabicmmlu_arabic_language_(grammar)" +"task_alias": "Arabic Language (Grammar)" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_driving_test.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_driving_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..488633fd3088d0b79c9b395a80e00f368e77be2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_driving_test.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Driving Test" +"group": "arabicmmlu_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "arabicmmlu_driving_test" +"task_alias": "Driving Test" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_general_knowledge.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_general_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b328b4f918c90298750a5864099ce43c676c88be --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_general_knowledge.yaml @@ -0,0 +1,6 @@ +"dataset_name": "General Knowledge" +"group": "arabicmmlu_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "arabicmmlu_general_knowledge" +"task_alias": "General Knowledge" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_arabic_language.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_arabic_language.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d0005a216fba6c413b29d86d81fba9189122bfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_arabic_language.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Arabic Language" +"group": "arabicmmlu_language" +"group_alias": "language" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_arabic_language" +"task_alias": "High Arabic Language" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_biology.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1fc0b6a50c990d1ec4300d84f58106999e719904 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_biology.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Biology" +"group": "arabicmmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_biology" +"task_alias": "High Biology" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_civics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_civics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3bb13d55fb8d80dcae6fd0860dc9ad590462514 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_civics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Civics" +"group": "arabicmmlu_social_science" +"group_alias": "social science" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_civics" +"task_alias": "High Civics" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_computer_science.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d85f2f8aa854c14c75715ccca781634b390e66ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_computer_science.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Computer Science" +"group": "arabicmmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_computer_science" +"task_alias": "High Computer Science" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_economics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da2519d9b5eb3525563d1e20f42594778e730d8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_economics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Economics" +"group": "arabicmmlu_social_science" +"group_alias": "social science" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_economics" +"task_alias": "High Economics" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_geography.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88ec5a02034867e00178719bad2d7a91bd8137e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_geography.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Geography" +"group": "arabicmmlu_social_science" +"group_alias": "social science" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_geography" +"task_alias": "High Geography" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_history.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6cdfdefe1590b6b785bc48f3fd444a9326fcb374 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_history.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High History" +"group": "arabicmmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_history" +"task_alias": "High History" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_islamic_studies.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_islamic_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23a7834fd81795c7e38c55c8a3191aa513c3c2f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_islamic_studies.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Islamic Studies" +"group": "arabicmmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_islamic_studies" +"task_alias": "High Islamic Studies" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_philosophy.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_philosophy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f61e3c7ffce23bc34f4cc6f46e2873e94a70465 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_philosophy.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Philosophy" +"group": "arabicmmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_philosophy" +"task_alias": "High Philosophy" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_physics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e510b0de6f8929776bcd1bede2631f833ad8f97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_high_physics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "High Physics" +"group": "arabicmmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "arabicmmlu_high_physics" +"task_alias": "High Physics" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_islamic_studies.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_islamic_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d97bbdccc9df86dc9b63c222c8c6dcb4154ffbfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_islamic_studies.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Islamic Studies" +"group": "arabicmmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "arabicmmlu_islamic_studies" +"task_alias": "Islamic Studies" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_arabic_language.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_arabic_language.yaml new file mode 100644 index 0000000000000000000000000000000000000000..efb6dd42a753dc37a98d2455ef7fd55c999e79d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_arabic_language.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Middle Arabic Language" +"group": "arabicmmlu_language" +"group_alias": "language" +"include": "_default_template_yaml" +"task": "arabicmmlu_middle_arabic_language" +"task_alias": "Middle Arabic Language" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_civics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_civics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9de4060b3ee5f32ce9c85c7cd060a0bd84e97af9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_civics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Middle Civics" +"group": "arabicmmlu_social_science" +"group_alias": "social science" +"include": "_default_template_yaml" +"task": "arabicmmlu_middle_civics" +"task_alias": "Middle Civics" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_computer_science.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..635d09f65d88c18086adf865e00bdbd35480d8ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_computer_science.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Middle Computer Science" +"group": "arabicmmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "arabicmmlu_middle_computer_science" +"task_alias": "Middle Computer Science" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_economics.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef55891e6f4e4bfb7d5e9c94e568af2930ee5846 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_economics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Middle Economics" +"group": "arabicmmlu_social_science" +"group_alias": "social science" +"include": "_default_template_yaml" +"task": "arabicmmlu_middle_economics" +"task_alias": "Middle Economics" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_general_knowledge.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_general_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..993fd832d7b98690863fb02472af215112d2493a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_general_knowledge.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Middle General Knowledge" +"group": "arabicmmlu_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "arabicmmlu_middle_general_knowledge" +"task_alias": "Middle General Knowledge" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_geography.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c84a47bdebfbb7f94a8329c837decf124f9525e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_geography.yaml @@ -0,0 +1,6 @@ +"dataset_name": "Middle Geography" +"group": "arabicmmlu_social_science" +"group_alias": "social science" +"include": "_default_template_yaml" +"task": "arabicmmlu_middle_geography" +"task_alias": "Middle Geography" diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b3eea45b45f4ba80e437446011fbde62642bc636 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/README.md @@ -0,0 +1,54 @@ +# ARC + +### Paper + +Title: Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge + +Abstract: https://arxiv.org/abs/1803.05457 + +The ARC dataset consists of 7,787 science exam questions drawn from a variety +of sources, including science questions provided under license by a research +partner affiliated with AI2. These are text-only, English language exam questions +that span several grade levels as indicated in the files. Each question has a +multiple choice structure (typically 4 answer options). The questions are sorted +into a Challenge Set of 2,590 “hard” questions (those that both a retrieval and +a co-occurrence method fail to answer correctly) and an Easy Set of 5,197 questions. + +Homepage: https://allenai.org/data/arc + + +### Citation + +``` +@article{Clark2018ThinkYH, + title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge}, + author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord}, + journal={ArXiv}, + year={2018}, + volume={abs/1803.05457} +} +``` + +### Groups and Tasks + +#### Groups + +* `ai2_arc`: Evaluates `arc_easy` and `arc_challenge` + +#### Tasks + +* `arc_easy` +* `arc_challenge` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/arc_challenge.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/arc_challenge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ad5149095e17711073606124968aa174af4c55a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/arc_challenge.yaml @@ -0,0 +1,3 @@ +include: arc_easy.yaml +task: arc_challenge +dataset_name: ARC-Challenge diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/arc_easy.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/arc_easy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b07629d331eaf91a272da31e2bf1e7cad273130a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc/arc_easy.yaml @@ -0,0 +1,23 @@ +group: + - ai2_arc +task: arc_easy +dataset_path: allenai/ai2_arc +dataset_name: ARC-Easy +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{choices.label.index(answerKey)}}" +doc_to_choice: "{{choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{question}}\nAnswer:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5e1c6e401ab2b9b5ad112b0e5488a6b4178303a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/README.md @@ -0,0 +1,12 @@ +# arc mt + +arc mt is an implementation of tasks to support machine translated arc +challenge evals, to improve eval support across a number of additional +languages. + +The main page for the effort is +[here](https://huggingface.co/datasets/LumiOpen/arc_challenge_mt) and we will +include more data and analysis there. + +Initial datasets include a number of European languages, and we plan to expand +more in the future. diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_da.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f3efdc4ccafc6b2d710b445151dd21bc15649d62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_da.yaml @@ -0,0 +1,3 @@ +include: arc_challenge_mt_fi.yaml +task: arc_challenge_mt_da +dataset_name: da diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_de.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36fdf7be9653d8b9c4441c8eb975075d4c93f447 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_de.yaml @@ -0,0 +1,3 @@ +include: arc_challenge_mt_fi.yaml +task: arc_challenge_mt_de +dataset_name: de diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_el.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_el.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d97580b09e1b49855d2aa2a83192e7b01a06eadc --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_el.yaml @@ -0,0 +1,3 @@ +include: arc_challenge_mt_fi.yaml +task: arc_challenge_mt_el +dataset_name: el diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_es.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7dffc6c7b976c84c71fb9f1468d6af65c2d00d20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_es.yaml @@ -0,0 +1,3 @@ +include: arc_challenge_mt_fi.yaml +task: arc_challenge_mt_es +dataset_name: es diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_fi.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_fi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2032e34a4f25b3e83371aac1dad1e59ac6a176d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_fi.yaml @@ -0,0 +1,23 @@ +group: + - arc_challenge_mt +task: arc_challenge_mt_fi +dataset_path: LumiOpen/arc_challenge_mt +dataset_name: fi +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{choices.label.index(answerKey)}}" +doc_to_choice: "{{choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{question}}\nAnswer:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_hu.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_hu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..03d5ac1725ca425bd25790d1910a986648dbd442 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_hu.yaml @@ -0,0 +1,3 @@ +include: arc_challenge_mt_fi.yaml +task: arc_challenge_mt_hu +dataset_name: hu diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_is.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_is.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1591d7eb8f55d5b80597d1a059c5a76eb98192b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_is.yaml @@ -0,0 +1,22 @@ +group: + - arc_challenge_mt +task: arc_challenge_mt_is +dataset_path: mideind/icelandic-arc-challenge +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{choices.label.index(answerKey)}}" +doc_to_choice: "{{choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{question}}\nAnswer:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_it.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_it.yaml new file mode 100644 index 0000000000000000000000000000000000000000..995f7a3dc944279b760c8433c552f0ecee78367a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_it.yaml @@ -0,0 +1,3 @@ +include: arc_challenge_mt_fi.yaml +task: arc_challenge_mt_it +dataset_name: it diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_nb.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_nb.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aceaa14b5f4dc28d13a49f1e2a932f82a32e264e --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arc_mt/arc_challenge_mt_nb.yaml @@ -0,0 +1,3 @@ +include: arc_challenge_mt_fi.yaml +task: arc_challenge_mt_nb +dataset_name: nb diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arithmetic/README.md b/venv/lib/python3.10/site-packages/lm_eval/tasks/arithmetic/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e7d7f89efbbd3af29e5e1c28b1af1adb93073569 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arithmetic/README.md @@ -0,0 +1,60 @@ +# Arithmetic + +### Paper + +Title: `Language Models are Few-Shot Learners` +Abstract: https://arxiv.org/abs/2005.14165 + +A small battery of 10 tests that involve asking language models a simple arithmetic +problem in natural language. + +Homepage: https://github.com/openai/gpt-3/tree/master/data + + +### Citation + +``` +@inproceedings{NEURIPS2020_1457c0d6, + author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel and Wu, Jeffrey and Winter, Clemens and Hesse, Chris and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {1877--1901}, + publisher = {Curran Associates, Inc.}, + title = {Language Models are Few-Shot Learners}, + url = {https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf}, + volume = {33}, + year = {2020} +} +``` + +### Groups and Tasks + +#### Groups + +* `arithmetic`: Evaluates `1dc` to `5ds` + +#### Tasks + +* `arithmetic_1dc` +* `arithmetic_2da` +* `arithmetic_2dm` +* `arithmetic_2ds` +* `arithmetic_3da` +* `arithmetic_3ds` +* `arithmetic_4da` +* `arithmetic_4ds` +* `arithmetic_5da` +* `arithmetic_5ds` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/venv/lib/python3.10/site-packages/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml b/venv/lib/python3.10/site-packages/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e8d414a60c1f9df7c635fafd34b7a2f39a36865 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml @@ -0,0 +1,18 @@ +group: + - arithmetic +task: arithmetic_1dc +dataset_path: EleutherAI/arithmetic +dataset_name: arithmetic_1dc +output_type: loglikelihood +validation_split: validation +test_split: null +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/venv/lib/python3.10/site-packages/lm_eval/utils.py b/venv/lib/python3.10/site-packages/lm_eval/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..486f960fd851745805597fb6deea2ed032140742 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lm_eval/utils.py @@ -0,0 +1,487 @@ +import collections +import fnmatch +import functools +import hashlib +import importlib.util +import inspect +import json +import logging +import os +import re +from dataclasses import asdict, is_dataclass +from itertools import islice +from typing import Any, Callable, List + +import numpy as np +import yaml +from jinja2 import BaseLoader, Environment, StrictUndefined + + +logging.basicConfig( + format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", + datefmt="%Y-%m-%d:%H:%M:%S", + level=logging.INFO, +) +eval_logger = logging.getLogger("lm-eval") + +SPACING = " " * 47 + +HIGHER_IS_BETTER_SYMBOLS = { + True: "↑", + False: "↓", +} + + +def hash_string(string: str) -> str: + return hashlib.sha256(string.encode("utf-8")).hexdigest() + + +def escaped_split(text, sep_char, maxsplit=-1): + """Split text into a list on occurrences of the given separation + character `sep_char`. The separation character may be escaped by a + backslash to avoid splitting at that location. + + The separation character must be a string of size 1. + + If `maxsplit` is given, at most `maxsplit` splits are done (thus, + the list will have at most `maxsplit + 1` elements). If `maxsplit` + is not specified or less than 0, then there is no limit on the + number of splits (all possible splits are made). + """ + assert ( + len(sep_char) == 1 + ), "separation string must be a single character for escaped splitting" + + if maxsplit == 0: + return text + maxsplit = max(0, maxsplit) + + return re.split(r"(? str: + """ + Given the sample results filenames, extracts and returns the task name. + """ + return filename[filename.find("_") + 1 : filename.rfind("_")] + + +def get_file_datetime(filename: str) -> str: + """ + Given the results and sample results filenames, extracts and returns the datetime. + """ + return filename[filename.rfind("_") + 1 :].replace(".json", "") + + +def sanitize_model_name(model_name: str) -> str: + """ + Given the model name, returns a sanitized version of it. + """ + return re.sub(r"[\"<>:/\|\\?\*\[\]]+", "__", model_name) + + +def sanitize_task_name(task_name: str) -> str: + """ + Given the task name, returns a sanitized version of it. + """ + return re.sub(r"\W", "_", task_name) + + +def get_latest_filename(filenames: List[str]) -> str: + """ + Given a list of filenames, returns the filename with the latest datetime. + """ + return max(filenames, key=lambda f: get_file_datetime(f)) + + +def get_results_filenames(filenames: List[str]) -> List[str]: + """ + Extracts filenames that correspond to aggregated results. + """ + return [f for f in filenames if "/results_" in f and ".json" in f] + + +def get_sample_results_filenames(filenames: List[str]) -> List[str]: + """ + Extracts filenames that correspond to sample results. + """ + return [f for f in filenames if "/samples_" in f and ".json" in f] + + +def get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len): + """ + - context_len allows for a rolling window context, allowing each prediction window to potentially + condition on some context + + :param token_list: list + List of tokens to be PREDICTED + :param max_seq_len: int + max_seq_len of model (or max_seq_len we want to use) + :param context_len: int + Amount of desired token context for prediction. Needs to be at least 1. + :param prefix_token: token + Dummy token like so the first token has something to condition on + :return: generator + Generator of tuples + (input_tokens, pred_tokens) + Note: Score only the last len(pred_tokens) logits of the LM + """ + assert 1 <= context_len <= max_seq_len + if not token_list: + return + # +1 offset, going from input->preds + pred_len = max_seq_len - context_len + 1 + predicted = 0 + + # Special handling for first window: predict all tokens + first_seq_len = min(max_seq_len, len(token_list)) + yield ([prefix_token] + token_list[: first_seq_len - 1], token_list[:first_seq_len]) + predicted += first_seq_len + + while predicted < len(token_list): + window_pred_len = min(len(token_list) - predicted, pred_len) + window_end = predicted + window_pred_len + + yield ( + token_list[window_end - max_seq_len - 1 : window_end - 1], + token_list[window_end - window_pred_len : window_end], + ) + predicted += window_pred_len + + +def make_disjoint_window(pair): + """Takes output from get_rolling_token_windows and makes the context not overlap with the continuation""" + a, b = pair + return a[: len(a) - (len(b) - 1)], b + + +class EnhancedJSONEncoder(json.JSONEncoder): + """ + Provides a proper json encoding for the loggers and trackers json dumps. + Notably manages the json encoding of dataclasses. + """ + + def default(self, o): + if is_dataclass(o): + return asdict(o) + return super().default(o) + + +class Reorderer: + def __init__(self, arr: List[Any], fn: Callable) -> None: + """Reorder an array according to some function + + Args: + arr (List[Any]): The initial array + fn (Callable[[Any], Any]): A function to determine the priority of elements + """ + self.size = len(arr) + arr = list(enumerate(arr)) + arr = group(arr, lambda x: fn(x[1])) + # arr = [([y[0] for y in x], x[0][1]) for x in arr] + # TODO: overhaul reorderer. It currently grouped requests by content but we don't want this + arr = [([y[0]], x[0][1]) for x in arr for y in x] + arr.sort(key=lambda x: fn(x[1])) + + self.arr = arr + + def get_reordered(self): + """Gets the reordered array + + Returns: + List[Any]: The reordered array + """ + return [x[1] for x in self.arr] + + def get_original(self, newarr): + """Restores the original order of a new array based on the old array's order + + Args: + newarr (List[Any]): The array to be restored + + Returns: + List[Any]: The array restored to the original order + """ + res = [None] * self.size + cov = [False] * self.size + + for (inds, _), v in zip(self.arr, newarr): + for ind in inds: + res[ind] = v + cov[ind] = True + + assert all(cov) + + return res + + +def make_table(result_dict, column: str = "results", sort_results: bool = True): + """Generate table of results.""" + from pytablewriter import LatexTableWriter, MarkdownTableWriter + + if column == "results": + column_name = "Tasks" + elif column == "groups": + column_name = "Groups" + + all_headers = [ + column_name, + "Version", + "Filter", + "n-shot", + "Metric", + "", + "Value", + "", + "Stderr", + ] + + md_writer = MarkdownTableWriter() + latex_writer = LatexTableWriter() + md_writer.headers = all_headers + latex_writer.headers = all_headers + + values = [] + + keys = result_dict[column].keys() + if sort_results: + # sort entries alphabetically + keys = sorted(keys) + for k in keys: + dic = result_dict[column][k] + version = result_dict["versions"].get(k, "N/A") + n = str(result_dict["n-shot"][k]) + higher_is_better = result_dict.get("higher_is_better", {}).get(k, {}) + + if "alias" in dic: + k = dic.pop("alias") + + metric_items = dic.items() + if sort_results: + metric_items = sorted(metric_items) + + for (mf), v in metric_items: + m, _, f = mf.partition(",") + if m.endswith("_stderr"): + continue + + hib = HIGHER_IS_BETTER_SYMBOLS.get(higher_is_better.get(m), "") + + if m + "_stderr" + "," + f in dic: + se = dic[m + "_stderr" + "," + f] + if se != "N/A": + se = "%.4f" % se + values.append([k, version, f, n, m, hib, "%.4f" % v, "±", se]) + else: + values.append([k, version, f, n, m, hib, "%.4f" % v, "", ""]) + k = "" + version = "" + md_writer.value_matrix = values + latex_writer.value_matrix = values + + # todo: make latex table look good + # print(latex_writer.dumps()) + + return md_writer.dumps() + + +def positional_deprecated(fn): + """ + A decorator to nudge users into passing only keyword args (`kwargs`) to the + wrapped function, `fn`. + """ + + @functools.wraps(fn) + def _wrapper(*args, **kwargs): + if len(args) != 1 if inspect.ismethod(fn) else 0: + print( + f"WARNING: using {fn.__name__} with positional arguments is " + "deprecated and will be disallowed in a future version of " + "lm-evaluation-harness!" + ) + return fn(*args, **kwargs) + + return _wrapper + + +def ignore_constructor(loader, node): + return node + + +def import_function(loader, node): + function_name = loader.construct_scalar(node) + yaml_path = os.path.dirname(loader.name) + + *module_name, function_name = function_name.split(".") + if isinstance(module_name, list): + module_name = ".".join(module_name) + module_path = os.path.normpath(os.path.join(yaml_path, "{}.py".format(module_name))) + + spec = importlib.util.spec_from_file_location(module_name, module_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + function = getattr(module, function_name) + return function + + +def load_yaml_config(yaml_path=None, yaml_config=None, yaml_dir=None, mode="full"): + if mode == "simple": + constructor_fn = ignore_constructor + elif mode == "full": + constructor_fn = import_function + + # Add the import_function constructor to the YAML loader + yaml.add_constructor("!function", constructor_fn) + if yaml_config is None: + with open(yaml_path, "rb") as file: + yaml_config = yaml.full_load(file) + + if yaml_dir is None: + yaml_dir = os.path.dirname(yaml_path) + + assert yaml_dir is not None + + if "include" in yaml_config: + include_path = yaml_config["include"] + del yaml_config["include"] + + if isinstance(include_path, str): + include_path = [include_path] + + # Load from the last one first + include_path.reverse() + final_yaml_config = {} + for path in include_path: + # Assumes that path is a full path. + # If not found, assume the included yaml + # is in the same dir as the original yaml + if not os.path.isfile(path): + path = os.path.join(yaml_dir, path) + + try: + included_yaml_config = load_yaml_config(yaml_path=path, mode=mode) + final_yaml_config.update(included_yaml_config) + except Exception as ex: + # If failed to load, ignore + raise ex + + final_yaml_config.update(yaml_config) + return final_yaml_config + return yaml_config + + +def regex_replace(string, pattern, repl, count: int = 0): + """Implements the `re.sub` function as a custom Jinja filter.""" + return re.sub(pattern, repl, string, count=count) + + +env = Environment(loader=BaseLoader, undefined=StrictUndefined) +env.filters["regex_replace"] = regex_replace + + +def apply_template(template: str, doc: dict) -> str: + rtemplate = env.from_string(template) + return rtemplate.render(**doc) + + +def create_iterator(raw_iterator, *, rank=0, world_size=1, limit=None): + """ + Method for creating a (potentially) sliced and limited + iterator from a raw document iterator. Used for splitting data + among ranks in multigpu setting or only pulling a sample of documents + """ + return islice(raw_iterator, rank, limit, world_size) diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/LICENSE b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c77c54e2000bce9d89c581402ef4ec0074aabd6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2024 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/METADATA b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3cea7ed85609bef162a0798cdc89ab7dc7e1507a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/METADATA @@ -0,0 +1,103 @@ +Metadata-Version: 2.1 +Name: tabledata +Version: 1.3.4 +Summary: tabledata is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc. +Home-page: https://github.com/thombashi/tabledata +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Changelog, https://github.com/thombashi/tabledata/releases +Project-URL: Documentation, https://tabledata.rtfd.io/ +Project-URL: Source, https://github.com/thombashi/tabledata +Project-URL: Tracker, https://github.com/thombashi/tabledata/issues +Keywords: table +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Typing :: Typed +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: DataProperty<2,>=1.0.1 +Requires-Dist: typepy<2,>=1.2.0 +Provides-Extra: logging +Requires-Dist: loguru<1,>=0.4.1; extra == "logging" +Provides-Extra: test +Requires-Dist: pytablewriter>=0.46; extra == "test" +Requires-Dist: pytest; extra == "test" + +.. contents:: **tabledata** + :backlinks: top + :depth: 2 + +Summary +--------- +`tabledata `__ is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc. + +.. image:: https://badge.fury.io/py/tabledata.svg + :target: https://badge.fury.io/py/tabledata + :alt: PyPI package version + +.. image:: https://img.shields.io/pypi/pyversions/tabledata.svg + :target: https://pypi.org/project/tabledata + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/tabledata.svg + :target: https://pypi.org/project/tabledata + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/tabledata/actions/workflows/ci.yml/badge.svg + :target: https://github.com/thombashi/tabledata/actions/workflows/ci.yml + :alt: Linux/macOS/Windows CI status + +.. image:: https://coveralls.io/repos/github/thombashi/tabledata/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/tabledata?branch=master + :alt: Test coverage + +Installation +============ + +Install from PyPI +------------------------------ +:: + + pip install tabledata + +Install from PPA (for Ubuntu) +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-tabledata + + +Dependencies +============ +- Python 3.9+ +- `Mandatory Python package dependencies (automatically installed) `__ + +Optional Python packages +------------------------------------------------ +- `loguru `__ + - Used for logging if the package installed +- `pandas `__ + - required to get table data as a pandas data frame + +Documentation +=============== +https://tabledata.rtfd.io/ + diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/RECORD b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..8d31b42423bbe2a1868aa62e912ae0a9397b5f51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/RECORD @@ -0,0 +1,29 @@ +tabledata-1.3.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tabledata-1.3.4.dist-info/LICENSE,sha256=RDTzU6Et0CNqmeXT_Qf-OzpFP5Fk_hDFehHWyoa1DAg,1079 +tabledata-1.3.4.dist-info/METADATA,sha256=GHr_vEAX4m5otWMVEi6wMhYRBqvG04p9y9HKZmwVMYc,3681 +tabledata-1.3.4.dist-info/RECORD,, +tabledata-1.3.4.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91 +tabledata-1.3.4.dist-info/top_level.txt,sha256=wPYCjph2PxB5odPJWPADX_65iL1gAIjMQFlAyZi80iI,10 +tabledata/__init__.py,sha256=eudQG2H25UnOXi6v_OmSJOZ58y0vS9_xxGxAeNnN-Xg,762 +tabledata/__pycache__/__init__.cpython-310.pyc,, +tabledata/__pycache__/__version__.cpython-310.pyc,, +tabledata/__pycache__/_common.cpython-310.pyc,, +tabledata/__pycache__/_constant.cpython-310.pyc,, +tabledata/__pycache__/_converter.cpython-310.pyc,, +tabledata/__pycache__/_core.cpython-310.pyc,, +tabledata/__pycache__/error.cpython-310.pyc,, +tabledata/__pycache__/normalizer.cpython-310.pyc,, +tabledata/__version__.py,sha256=NyM2-aHThDM1QiBneU95Obvzpd74UpMKvROJkrqgH-A,268 +tabledata/_common.py,sha256=eB3xHflvbF5p5hz1f5D9xNHQCujy6Uk91NLPTy5fFHY,274 +tabledata/_constant.py,sha256=I763_Fx-9IT_ZQTTncxi04WsXd6tK78z2VBYZ3up5Aw,154 +tabledata/_converter.py,sha256=ud75_BoQAzhuzkHCZJK8QvkZoo26kCroUXCoUyCxdwA,909 +tabledata/_core.py,sha256=tT5R2MOR-LiYjqBfHzXl_nFuAgLIGa8OUAk3_AiuRLs,14554 +tabledata/_logger/__init__.py,sha256=Bvpnv_2rgv0Pj4CIiKiLRC-tPhQjGEckjfX408iTXvk,93 +tabledata/_logger/__pycache__/__init__.cpython-310.pyc,, +tabledata/_logger/__pycache__/_logger.cpython-310.pyc,, +tabledata/_logger/__pycache__/_null_logger.cpython-310.pyc,, +tabledata/_logger/_logger.py,sha256=2OlnpGVK0jGOs-at8Xgpue1zKm9Hmd62KG1D9Y5BUwA,815 +tabledata/_logger/_null_logger.py,sha256=QJuaErUIV_x6NjQ9qNX9eNSi_GB_9CrO7lKeXYZnuaw,1088 +tabledata/error.py,sha256=UGGJm3_9oLQi9GBWZz4cqp1dnzc5Kbu37c6CsiWozME,526 +tabledata/normalizer.py,sha256=MumDxG0LievF1r-D-YMoW8sc5-jV4RmT8qwCkPwSn70,6484 +tabledata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ae527e7d64811439e61b93aa375defb30e06edfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..43aaf2f47a3d66da5f27d0be87b84007bd70669a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata-1.3.4.dist-info/top_level.txt @@ -0,0 +1 @@ +tabledata diff --git a/venv/lib/python3.10/site-packages/tabledata/__init__.py b/venv/lib/python3.10/site-packages/tabledata/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d393e4239a70d6d37b6ac2c5e6bf66287300a3e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/__init__.py @@ -0,0 +1,29 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._common import convert_idx_to_alphabet +from ._constant import PatternMatch +from ._converter import to_value_matrix +from ._core import TableData +from ._logger import set_logger +from .error import DataError, InvalidHeaderNameError, InvalidTableNameError, NameValidationError + + +__all__ = ( + "__author__", + "__copyright__", + "__email__", + "__license__", + "__version__", + "convert_idx_to_alphabet", + "set_logger", + "to_value_matrix", + "PatternMatch", + "TableData", + "DataError", + "InvalidHeaderNameError", + "InvalidTableNameError", + "NameValidationError", +) diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e652ce18b360c7ce5cf870fea9aa3dac0b518dc6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/__version__.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/__version__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0f7965cafe8bfca6e2614368f98741e38af2a52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/__version__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..335d3f72698a9734131e5b675f8e09365fe72991 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..449d3d5a57ee49d8c6fc8c87673cea8133ef843a Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_converter.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea16b594cab2ef31a18f8bbea36b455226758f35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_converter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_core.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f69a5e1a2b045f24299e575198e4ecec778d27a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/error.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d862a3531cf4c2d084b36508e27454e4d867129 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/error.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/normalizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/normalizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0317ec0c9396d34761aed793a712cf008fcaf08a Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/normalizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__version__.py b/venv/lib/python3.10/site-packages/tabledata/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..9f0d87e2e79191b913fb35adc9bdbc8c7166df15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/__version__.py @@ -0,0 +1,9 @@ +from typing import Final + + +__author__: Final = "Tsuyoshi Hombashi" +__copyright__: Final = f"Copyright 2017-2024, {__author__}" +__license__: Final = "MIT License" +__version__ = "1.3.4" +__maintainer__: Final = __author__ +__email__: Final = "tsuyoshi.hombashi@gmail.com" diff --git a/venv/lib/python3.10/site-packages/tabledata/_common.py b/venv/lib/python3.10/site-packages/tabledata/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..944e9474385d5ac4cace526f532564308a1fd13a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_common.py @@ -0,0 +1,12 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + + +def convert_idx_to_alphabet(idx: int) -> str: + if idx < 26: + return chr(65 + idx) + + div, mod = divmod(idx, 26) + + return convert_idx_to_alphabet(div - 1) + convert_idx_to_alphabet(mod) diff --git a/venv/lib/python3.10/site-packages/tabledata/_constant.py b/venv/lib/python3.10/site-packages/tabledata/_constant.py new file mode 100644 index 0000000000000000000000000000000000000000..722f1372ff8416da2a9c5733c11d8351e87c792f --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_constant.py @@ -0,0 +1,11 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import enum + + +@enum.unique +class PatternMatch(enum.Enum): + OR = 0 + AND = 1 diff --git a/venv/lib/python3.10/site-packages/tabledata/_converter.py b/venv/lib/python3.10/site-packages/tabledata/_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..a8032d0e7037d8e46f367bb37eeca3c6273fb3b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_converter.py @@ -0,0 +1,36 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from collections.abc import Sequence +from typing import Any + +from .error import DataError + + +Row = tuple[int, Any] + + +def to_value_matrix(headers: Sequence[str], value_matrix: Sequence[Any]) -> list[Row]: + if not value_matrix: + return [] + + return [_to_row(headers, values, row_idx)[1] for row_idx, values in enumerate(value_matrix)] + + +def _to_row(headers: Sequence[str], values: Any, row_idx: int) -> Row: + if headers: + try: + values = values._asdict() + except AttributeError: + pass + + try: + return (row_idx, [values.get(header) for header in headers]) + except (TypeError, AttributeError): + pass + + if not isinstance(values, (tuple, list)): + raise DataError(f"row must be a list or tuple: actual={type(values)}") + + return (row_idx, values) diff --git a/venv/lib/python3.10/site-packages/tabledata/_core.py b/venv/lib/python3.10/site-packages/tabledata/_core.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca07c8b3f4f7090f433179f3a2f6ee6b222c0a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_core.py @@ -0,0 +1,511 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import re +from collections import OrderedDict, namedtuple +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Any, Optional, Union + +import dataproperty as dp +import typepy +from dataproperty import DataPropertyMatrix +from dataproperty.typing import TypeHint +from typepy import Nan + +from ._constant import PatternMatch +from ._converter import to_value_matrix +from ._logger import logger # type: ignore + + +if TYPE_CHECKING: + import pandas + + +class TableData: + """ + Class to represent a table data structure. + + :param table_name: Name of the table. + :param headers: Table header names. + :param rows: Data of the table. + """ + + def __init__( + self, + table_name: Optional[str], + headers: Sequence[str], + rows: Sequence, + dp_extractor: Optional[dp.DataPropertyExtractor] = None, + type_hints: Optional[Sequence[Union[str, TypeHint]]] = None, + max_workers: Optional[int] = None, + max_precision: Optional[int] = None, + ) -> None: + self.__table_name = table_name + self.__value_matrix: list[list[Any]] = [] + self.__value_dp_matrix: Optional[DataPropertyMatrix] = None + + if rows: + self.__rows = rows + else: + self.__rows = [] + + if dp_extractor: + self.__dp_extractor = copy.deepcopy(dp_extractor) + else: + self.__dp_extractor = dp.DataPropertyExtractor(max_precision=max_precision) + + if type_hints: + self.__dp_extractor.column_type_hints = type_hints + + self.__dp_extractor.strip_str_header = '"' + + if max_workers: + self.__dp_extractor.max_workers = max_workers + + if not headers: + self.__dp_extractor.headers = [] + else: + self.__dp_extractor.headers = headers + + def __repr__(self) -> str: + element_list = [f"table_name={self.table_name}"] + + try: + element_list.append("headers=[{}]".format(", ".join(self.headers))) + except TypeError: + element_list.append("headers=None") + + element_list.extend([f"cols={self.num_columns}", f"rows={self.num_rows}"]) + + return ", ".join(element_list) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, TableData): + return False + + return self.equals(other, cmp_by_dp=False) + + def __ne__(self, other: Any) -> bool: + if not isinstance(other, TableData): + return True + + return not self.equals(other, cmp_by_dp=False) + + @property + def table_name(self) -> Optional[str]: + """str: Name of the table.""" + + return self.__table_name + + @table_name.setter + def table_name(self, value: Optional[str]) -> None: + self.__table_name = value + + @property + def headers(self) -> Sequence[str]: + """Sequence[str]: Table header names.""" + + return self.__dp_extractor.headers + + @property + def rows(self) -> Sequence: + """Sequence: Original rows of tabular data.""" + + return self.__rows + + @property + def value_matrix(self) -> DataPropertyMatrix: + """DataPropertyMatrix: Converted rows of tabular data.""" + + if self.__value_matrix: + return self.__value_matrix + + self.__value_matrix = [ + [value_dp.data for value_dp in value_dp_list] for value_dp_list in self.value_dp_matrix + ] + + return self.__value_matrix + + @property + def has_value_dp_matrix(self) -> bool: + return self.__value_dp_matrix is not None + + @property + def max_workers(self) -> int: + return self.__dp_extractor.max_workers + + @max_workers.setter + def max_workers(self, value: Optional[int]) -> None: + self.__dp_extractor.max_workers = value + + @property + def num_rows(self) -> Optional[int]: + """Optional[int]: + Number of rows in the tabular data. + |None| if the ``rows`` is neither list nor tuple. + """ + + try: + return len(self.rows) + except TypeError: + return None + + @property + def num_columns(self) -> Optional[int]: + if typepy.is_not_empty_sequence(self.headers): + return len(self.headers) + + try: + return len(self.rows[0]) + except TypeError: + return None + except IndexError: + return 0 + + @property + def value_dp_matrix(self) -> DataPropertyMatrix: + """DataPropertyMatrix: DataProperty for table data.""" + + if self.__value_dp_matrix is None: + self.__value_dp_matrix = self.__dp_extractor.to_dp_matrix( + to_value_matrix(self.headers, self.rows) + ) + + return self.__value_dp_matrix + + @property + def header_dp_list(self) -> list[dp.DataProperty]: + return self.__dp_extractor.to_header_dp_list() + + @property + def column_dp_list(self) -> list[dp.ColumnDataProperty]: + return self.__dp_extractor.to_column_dp_list(self.value_dp_matrix) + + @property + def dp_extractor(self) -> dp.DataPropertyExtractor: + return self.__dp_extractor + + def is_empty_header(self) -> bool: + """bool: |True| if the data :py:attr:`.headers` is empty.""" + + return typepy.is_empty_sequence(self.headers) + + def is_empty_rows(self) -> bool: + """ + :return: |True| if the tabular data has no rows. + :rtype: bool + """ + + return self.num_rows == 0 + + def is_empty(self) -> bool: + """ + :return: + |True| if the data :py:attr:`.headers` or + :py:attr:`.value_matrix` is empty. + :rtype: bool + """ + + return any([self.is_empty_header(), self.is_empty_rows()]) + + def equals(self, other: "TableData", cmp_by_dp: bool = True) -> bool: + if cmp_by_dp: + return self.__equals_dp(other) + + return self.__equals_raw(other) + + def __equals_base(self, other: "TableData") -> bool: + compare_item_list = [self.table_name == other.table_name] + + if self.num_rows is not None: + compare_item_list.append(self.num_rows == other.num_rows) + + return all(compare_item_list) + + def __equals_raw(self, other: "TableData") -> bool: + if not self.__equals_base(other): + return False + + if self.headers != other.headers: + return False + + for lhs_row, rhs_row in zip(self.rows, other.rows): + if len(lhs_row) != len(rhs_row): + return False + + if not all( + [ + lhs == rhs + for lhs, rhs in zip(lhs_row, rhs_row) + if not Nan(lhs).is_type() and not Nan(rhs).is_type() + ] + ): + return False + + return True + + def __equals_dp(self, other: "TableData") -> bool: + if not self.__equals_base(other): + return False + + if self.header_dp_list != other.header_dp_list: + return False + + if self.value_dp_matrix is None or other.value_dp_matrix is None: + return False + + for lhs_list, rhs_list in zip(self.value_dp_matrix, other.value_dp_matrix): + if len(lhs_list) != len(rhs_list): + return False + + if any([lhs != rhs for lhs, rhs in zip(lhs_list, rhs_list)]): + return False + + return True + + def in_tabledata_list(self, other: Sequence["TableData"], cmp_by_dp: bool = True) -> bool: + for table_data in other: + if self.equals(table_data, cmp_by_dp=cmp_by_dp): + return True + + return False + + def validate_rows(self) -> None: + """ + :raises ValueError: + """ + + invalid_row_idx_list = [] + + for row_idx, row in enumerate(self.rows): + if isinstance(row, (list, tuple)) and len(self.headers) != len(row): + invalid_row_idx_list.append(row_idx) + + if isinstance(row, dict): + if not all([header in row for header in self.headers]): + invalid_row_idx_list.append(row_idx) + + if not invalid_row_idx_list: + return + + for invalid_row_idx in invalid_row_idx_list: + logger.debug(f"invalid row (line={invalid_row_idx}): {self.rows[invalid_row_idx]}") + + raise ValueError( + "table header length and row length are mismatch:\n" + + f" header(len={len(self.headers)}): {self.headers}\n" + + " # of miss match rows: {} ouf of {}\n".format( + len(invalid_row_idx_list), self.num_rows + ) + ) + + def as_dict(self, default_key: str = "table") -> dict[str, list["OrderedDict[str, Any]"]]: + """ + Args: + default_key: + Key of a returning dictionary when the ``table_name`` is empty. + + Returns: + dict: Table data as a |dict| instance. + + Sample Code: + .. code:: python + + from tabledata import TableData + + TableData( + "sample", + ["a", "b"], + [[1, 2], [3.3, 4.4]] + ).as_dict() + + Output: + .. code:: json + + {'sample': [OrderedDict([('a', 1), ('b', 2)]), OrderedDict([('a', 3.3), ('b', 4.4)])]} + """ # noqa + + dict_body = [] + for row in self.value_matrix: + if not row: + continue + + values = [ + (header, value) for header, value in zip(self.headers, row) if value is not None + ] + + if not values: + continue + + dict_body.append(OrderedDict(values)) + + table_name = self.table_name + if not table_name: + table_name = default_key + + return {table_name: dict_body} + + def as_tuple(self) -> Iterator[tuple]: + """ + :return: Rows of the tuple. + :rtype: list of |namedtuple| + + :Sample Code: + .. code:: python + + from tabledata import TableData + + records = TableData( + "sample", + ["a", "b"], + [[1, 2], [3.3, 4.4]] + ).as_tuple() + for record in records: + print(record) + + :Output: + .. code-block:: none + + Row(a=1, b=2) + Row(a=Decimal('3.3'), b=Decimal('4.4')) + """ + + Row = namedtuple("Row", self.headers) # type: ignore + + for value_dp_list in self.value_dp_matrix: + if typepy.is_empty_sequence(value_dp_list): + continue + + row = Row(*(value_dp.data for value_dp in value_dp_list)) + + yield row + + def as_dataframe(self) -> "pandas.DataFrame": + """ + :return: Table data as a ``pandas.DataFrame`` instance. + :rtype: pandas.DataFrame + + :Sample Code: + .. code-block:: python + + from tabledata import TableData + + TableData( + "sample", + ["a", "b"], + [[1, 2], [3.3, 4.4]] + ).as_dataframe() + + :Output: + .. code-block:: none + + a b + 0 1 2 + 1 3.3 4.4 + + :Dependency Packages: + - `pandas `__ + """ + + try: + from pandas import DataFrame + except ImportError: + raise RuntimeError("required 'pandas' package to execute as_dataframe method") + + dataframe = DataFrame(self.value_matrix) + if not self.is_empty_header(): + dataframe.columns = self.headers + + return dataframe + + def transpose(self) -> "TableData": + return TableData( + self.table_name, + self.headers, + [row for row in zip(*self.rows)], + max_workers=self.max_workers, + ) + + def filter_column( + self, + patterns: Optional[str] = None, + is_invert_match: bool = False, + is_re_match: bool = False, + pattern_match: PatternMatch = PatternMatch.OR, + ) -> "TableData": + logger.debug( + "filter_column: patterns={}, is_invert_match={}, " + "is_re_match={}, pattern_match={}".format( + patterns, is_invert_match, is_re_match, pattern_match + ) + ) + + if not patterns: + return self + + match_header_list = [] + match_column_matrix = [] + + if pattern_match == PatternMatch.OR: + match_method = any + elif pattern_match == PatternMatch.AND: + match_method = all + else: + raise ValueError(f"unknown matching: {pattern_match}") + + for header, column in zip(self.headers, zip(*self.rows)): + is_match_list = [] + for pattern in patterns: + is_match = self.__is_match(header, pattern, is_re_match) + + is_match_list.append( + any([is_match and not is_invert_match, not is_match and is_invert_match]) + ) + + if match_method(is_match_list): + match_header_list.append(header) + match_column_matrix.append(column) + + logger.debug( + "filter_column: table={}, match_header_list={}".format( + self.table_name, match_header_list + ) + ) + + return TableData( + self.table_name, + match_header_list, + list(zip(*match_column_matrix)), + max_workers=self.max_workers, + ) + + @staticmethod + def from_dataframe( + dataframe: "pandas.DataFrame", + table_name: str = "", + type_hints: Optional[Sequence[TypeHint]] = None, + max_workers: Optional[int] = None, + ) -> "TableData": + """ + Initialize TableData instance from a pandas.DataFrame instance. + + :param pandas.DataFrame dataframe: + :param str table_name: Table name to create. + """ + + return TableData( + table_name, + list(dataframe.columns.values), + dataframe.values.tolist(), + type_hints=type_hints, + max_workers=max_workers, + ) + + @staticmethod + def __is_match(header: str, pattern: str, is_re_match: bool) -> bool: + if is_re_match: + return re.search(pattern, header) is not None + + return header == pattern diff --git a/venv/lib/python3.10/site-packages/tabledata/_logger/__init__.py b/venv/lib/python3.10/site-packages/tabledata/_logger/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67f8939fdd2388cdf51aa53861884a9bb337c862 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_logger/__init__.py @@ -0,0 +1,4 @@ +from ._logger import logger, set_logger # type: ignore + + +__all__ = ("logger", "set_logger") diff --git a/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62229b21d582a5c48a833ef002dbefe15724c4ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17e6e09af6beb113b7cd80b3e8fa4b37b96265d5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/_null_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/_null_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8142025911e43600205254c119a7829b52a2e8a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/_logger/__pycache__/_null_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/_logger/_logger.py b/venv/lib/python3.10/site-packages/tabledata/_logger/_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..4891489e38c78a19fffbde8dfc1d208b03014ef9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_logger/_logger.py @@ -0,0 +1,40 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import warnings +from typing import Final + +import dataproperty + +from ._null_logger import NullLogger # type: ignore + + +MODULE_NAME: Final = "tabledata" + +try: + from loguru import logger + + logger.disable(MODULE_NAME) +except ImportError: + logger = NullLogger() + + +def set_logger(is_enable: bool, propagation_depth: int = 1) -> None: + if is_enable: + logger.enable(MODULE_NAME) + else: + logger.disable(MODULE_NAME) + + if propagation_depth <= 0: + return + + dataproperty.set_logger(is_enable, propagation_depth - 1) + + +def set_log_level(log_level): # type: ignore + warnings.warn( + "'set_log_level' method is deprecated and will be removed in the future. ", + DeprecationWarning, + ) + return diff --git a/venv/lib/python3.10/site-packages/tabledata/_logger/_null_logger.py b/venv/lib/python3.10/site-packages/tabledata/_logger/_null_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..1de427a45ad4986de92d71eea6a657aceb095766 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_logger/_null_logger.py @@ -0,0 +1,44 @@ +# type: ignore + + +class NullLogger: + level_name = None + + def remove(self, handler_id=None): # pragma: no cover + pass + + def add(self, sink, **kwargs): # pragma: no cover + pass + + def disable(self, name): # pragma: no cover + pass + + def enable(self, name): # pragma: no cover + pass + + def critical(self, __message, *args, **kwargs): # pragma: no cover + pass + + def debug(self, __message, *args, **kwargs): # pragma: no cover + pass + + def error(self, __message, *args, **kwargs): # pragma: no cover + pass + + def exception(self, __message, *args, **kwargs): # pragma: no cover + pass + + def info(self, __message, *args, **kwargs): # pragma: no cover + pass + + def log(self, __level, __message, *args, **kwargs): # pragma: no cover + pass + + def success(self, __message, *args, **kwargs): # pragma: no cover + pass + + def trace(self, __message, *args, **kwargs): # pragma: no cover + pass + + def warning(self, __message, *args, **kwargs): # pragma: no cover + pass diff --git a/venv/lib/python3.10/site-packages/tabledata/error.py b/venv/lib/python3.10/site-packages/tabledata/error.py new file mode 100644 index 0000000000000000000000000000000000000000..35084f8b1af8fa41a12f4fcaf5f0710771019f41 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/error.py @@ -0,0 +1,27 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + + +class NameValidationError(ValueError): + """ + Exception raised when a name is invalid. + """ + + +class InvalidTableNameError(NameValidationError): + """ + Exception raised when a table name is invalid. + """ + + +class InvalidHeaderNameError(NameValidationError): + """ + Exception raised when a table header name is invalid. + """ + + +class DataError(ValueError): + """ + Exception raised when data is invalid as tabular data. + """ diff --git a/venv/lib/python3.10/site-packages/tabledata/normalizer.py b/venv/lib/python3.10/site-packages/tabledata/normalizer.py new file mode 100644 index 0000000000000000000000000000000000000000..990ae65bb98073d04936bb9651259fc6cc19563e --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/normalizer.py @@ -0,0 +1,207 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +import warnings +from collections.abc import Sequence + +import typepy +from dataproperty.typing import TypeHint + +from ._core import TableData +from ._logger import logger # type: ignore +from .error import InvalidHeaderNameError, InvalidTableNameError + + +class TableDataNormalizerInterface(metaclass=abc.ABCMeta): + """ + Interface class to validate and normalize data of |TableData|. + """ + + @abc.abstractmethod + def validate(self) -> None: # pragma: no cover + pass + + @abc.abstractmethod + def normalize(self) -> TableData: # pragma: no cover + pass + + +class AbstractTableDataNormalizer(TableDataNormalizerInterface): + @property + def _type_hints(self) -> list[TypeHint]: + return self._tabledata.dp_extractor.column_type_hints + + def __init__(self, tabledata: TableData) -> None: + self._tabledata = tabledata + + def validate(self) -> None: + if not self._tabledata.table_name: + raise ValueError("table_name must not be empty") + + self._validate_table_name(self._tabledata.table_name) + self._validate_headers() + + def sanitize(self): # type: ignore + warnings.warn( + "'sanitize' method is deprecated and will be removed in the future." + " use 'normalize' method instead.", + DeprecationWarning, + ) + + return self.normalize() + + def normalize(self) -> TableData: + """ + :return: Sanitized table data. + :rtype: tabledata.TableData + """ + + logger.debug(f"normalize: {type(self).__name__}") + + normalize_headers = self._normalize_headers() + + return TableData( + self.__normalize_table_name(), + normalize_headers, + self._normalize_rows(normalize_headers), + dp_extractor=self._tabledata.dp_extractor, + type_hints=self._type_hints, + max_workers=self._tabledata.max_workers, + ) + + @abc.abstractmethod + def _preprocess_table_name(self) -> str: + """ + This method is always called before table name validation. + You must return preprocessed table name. + """ + + @abc.abstractmethod + def _validate_table_name(self, table_name: str) -> None: + """ + Must raise :py:class:`~.InvalidTableNameError` + when you consider the table name invalid. + + :param str header: Table name to validate. + :raises tabledata.InvalidTableNameError: + If the table name is invalid. + |raises_validate_table_name| + """ + + @abc.abstractmethod + def _normalize_table_name(self, table_name: str) -> str: + """ + Must return a valid table name. + The table name must be considered to be a valid name by + :py:meth:`~._validate_table_name` method. + + This method called when :py:meth:`~._validate_table_name` method raise + :py:class:`~.InvalidTableNameError`. + + :param str table_name: Table name to normalize. + :return: Sanitized table name. + :rtype: str + """ + + @abc.abstractmethod + def _preprocess_header(self, col_idx: int, header: str) -> str: + """ + This method is always called before a header validation. + You must return preprocessed header. + """ + + @abc.abstractmethod + def _validate_header(self, header: str) -> None: + """ + No operation. + + This method called for each table header. Override this method + in a subclass if you want to detect invalid table header elements. + Raise :py:class:`~.InvalidHeaderNameError` if an invalid + header element found. + + :param str header: Table header name. + :raises tabledata.InvalidHeaderNameError: + If the ``header`` is invalid. + """ + + @abc.abstractmethod + def _normalize_header(self, header: str) -> str: + """ + Must return a valid header name. + This method called when :py:meth:`~._validate_header` method raise + :py:class:`~.InvalidHeaderNameError`. + Override this method in subclass if you want to rename invalid + table header element. + + :param str header: Header name to normalize. + :return: Renamed header name. + :rtype: str + """ + + def _normalize_rows(self, normalize_headers: Sequence[str]) -> list: + return list(self._tabledata.rows) + + def _validate_headers(self) -> None: + for header in self._tabledata.headers: + self._validate_header(header) + + def __normalize_table_name(self) -> str: + preprocessed_table_name = self._preprocess_table_name() + + try: + self._validate_table_name(preprocessed_table_name) + new_table_name = preprocessed_table_name + except InvalidTableNameError: + new_table_name = self._normalize_table_name(preprocessed_table_name) + self._validate_table_name(new_table_name) + + return new_table_name + + def _normalize_headers(self) -> list[str]: + new_header_list = [] + + for col_idx, header in enumerate(self._tabledata.headers): + header = self._preprocess_header(col_idx, header) + + try: + self._validate_header(header) + new_header = header + except InvalidHeaderNameError: + new_header = self._normalize_header(header) + self._validate_header(new_header) + + new_header_list.append(new_header) + + return new_header_list + + +class TableDataNormalizer(AbstractTableDataNormalizer): + def _preprocess_table_name(self) -> str: + if not self._tabledata.table_name: + return "" + + return self._tabledata.table_name + + def _validate_table_name(self, table_name: str) -> None: + try: + typepy.String(table_name).validate() + except TypeError as e: + raise InvalidTableNameError(e) + + def _normalize_table_name(self, table_name: str) -> str: + return str(typepy.String(table_name).force_convert()) + + def _preprocess_header(self, col_idx: int, header: str) -> str: + return header + + def _validate_header(self, header: str) -> None: + try: + typepy.String(header).validate() + except TypeError as e: + raise InvalidHeaderNameError(e) + + def _normalize_header(self, header: str) -> str: + return str(typepy.String(header).force_convert()) diff --git a/venv/lib/python3.10/site-packages/tabledata/py.typed b/venv/lib/python3.10/site-packages/tabledata/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3708e7dd5f34d07a3150e5dc4c6aef15096baaa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/METADATA @@ -0,0 +1,846 @@ +Metadata-Version: 2.4 +Name: transformers +Version: 4.55.1 +Summary: State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow +Home-page: https://github.com/huggingface/transformers +Author: The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors) +Author-email: transformers@huggingface.co +License: Apache 2.0 License +Keywords: NLP vision speech deep learning transformer pytorch tensorflow jax BERT GPT-2 Wav2Vec2 ViT +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.9.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: filelock +Requires-Dist: huggingface-hub<1.0,>=0.34.0 +Requires-Dist: numpy>=1.17 +Requires-Dist: packaging>=20.0 +Requires-Dist: pyyaml>=5.1 +Requires-Dist: regex!=2019.12.17 +Requires-Dist: requests +Requires-Dist: tokenizers<0.22,>=0.21 +Requires-Dist: safetensors>=0.4.3 +Requires-Dist: tqdm>=4.27 +Provides-Extra: ja +Requires-Dist: fugashi>=1.0; extra == "ja" +Requires-Dist: ipadic<2.0,>=1.0.0; extra == "ja" +Requires-Dist: unidic_lite>=1.0.7; extra == "ja" +Requires-Dist: unidic>=1.0.2; extra == "ja" +Requires-Dist: sudachipy>=0.6.6; extra == "ja" +Requires-Dist: sudachidict_core>=20220729; extra == "ja" +Requires-Dist: rhoknp<1.3.1,>=1.1.0; extra == "ja" +Provides-Extra: sklearn +Requires-Dist: scikit-learn; extra == "sklearn" +Provides-Extra: tf +Requires-Dist: tensorflow<2.16,>2.9; extra == "tf" +Requires-Dist: onnxconverter-common; extra == "tf" +Requires-Dist: tf2onnx; extra == "tf" +Requires-Dist: tensorflow-text<2.16; extra == "tf" +Requires-Dist: keras-nlp<0.14.0,>=0.3.1; extra == "tf" +Provides-Extra: tf-cpu +Requires-Dist: keras<2.16,>2.9; extra == "tf-cpu" +Requires-Dist: tensorflow-cpu<2.16,>2.9; extra == "tf-cpu" +Requires-Dist: onnxconverter-common; extra == "tf-cpu" +Requires-Dist: tf2onnx; extra == "tf-cpu" +Requires-Dist: tensorflow-text<2.16; extra == "tf-cpu" +Requires-Dist: keras-nlp<0.14.0,>=0.3.1; extra == "tf-cpu" +Requires-Dist: tensorflow-probability<0.24; extra == "tf-cpu" +Provides-Extra: torch +Requires-Dist: torch>=2.1; extra == "torch" +Requires-Dist: accelerate>=0.26.0; extra == "torch" +Provides-Extra: accelerate +Requires-Dist: accelerate>=0.26.0; extra == "accelerate" +Provides-Extra: hf-xet +Requires-Dist: hf_xet; extra == "hf-xet" +Provides-Extra: retrieval +Requires-Dist: faiss-cpu; extra == "retrieval" +Requires-Dist: datasets>=2.15.0; extra == "retrieval" +Provides-Extra: flax +Requires-Dist: jax<=0.4.13,>=0.4.1; extra == "flax" +Requires-Dist: jaxlib<=0.4.13,>=0.4.1; extra == "flax" +Requires-Dist: flax<=0.7.0,>=0.4.1; extra == "flax" +Requires-Dist: optax<=0.1.4,>=0.0.8; extra == "flax" +Requires-Dist: scipy<1.13.0; extra == "flax" +Provides-Extra: tokenizers +Requires-Dist: tokenizers<0.22,>=0.21; extra == "tokenizers" +Provides-Extra: ftfy +Requires-Dist: ftfy; extra == "ftfy" +Provides-Extra: onnxruntime +Requires-Dist: onnxruntime>=1.4.0; extra == "onnxruntime" +Requires-Dist: onnxruntime-tools>=1.4.2; extra == "onnxruntime" +Provides-Extra: onnx +Requires-Dist: onnxconverter-common; extra == "onnx" +Requires-Dist: tf2onnx; extra == "onnx" +Requires-Dist: onnxruntime>=1.4.0; extra == "onnx" +Requires-Dist: onnxruntime-tools>=1.4.2; extra == "onnx" +Provides-Extra: modelcreation +Requires-Dist: cookiecutter==1.7.3; extra == "modelcreation" +Provides-Extra: sagemaker +Requires-Dist: sagemaker>=2.31.0; extra == "sagemaker" +Provides-Extra: deepspeed +Requires-Dist: deepspeed>=0.9.3; extra == "deepspeed" +Requires-Dist: accelerate>=0.26.0; extra == "deepspeed" +Provides-Extra: optuna +Requires-Dist: optuna; extra == "optuna" +Provides-Extra: ray +Requires-Dist: ray[tune]>=2.7.0; extra == "ray" +Provides-Extra: sigopt +Requires-Dist: sigopt; extra == "sigopt" +Provides-Extra: hub-kernels +Requires-Dist: kernels<=0.9,>=0.6.1; extra == "hub-kernels" +Provides-Extra: integrations +Requires-Dist: kernels<=0.9,>=0.6.1; extra == "integrations" +Requires-Dist: optuna; extra == "integrations" +Requires-Dist: ray[tune]>=2.7.0; extra == "integrations" +Requires-Dist: sigopt; extra == "integrations" +Provides-Extra: serving +Requires-Dist: openai>=1.98.0; extra == "serving" +Requires-Dist: pydantic>=2; extra == "serving" +Requires-Dist: uvicorn; extra == "serving" +Requires-Dist: fastapi; extra == "serving" +Requires-Dist: starlette; extra == "serving" +Requires-Dist: torch>=2.1; extra == "serving" +Requires-Dist: accelerate>=0.26.0; extra == "serving" +Provides-Extra: audio +Requires-Dist: librosa; extra == "audio" +Requires-Dist: pyctcdecode>=0.4.0; extra == "audio" +Requires-Dist: phonemizer; extra == "audio" +Requires-Dist: kenlm; extra == "audio" +Provides-Extra: speech +Requires-Dist: torchaudio; extra == "speech" +Requires-Dist: librosa; extra == "speech" +Requires-Dist: pyctcdecode>=0.4.0; extra == "speech" +Requires-Dist: phonemizer; extra == "speech" +Requires-Dist: kenlm; extra == "speech" +Provides-Extra: torch-speech +Requires-Dist: torchaudio; extra == "torch-speech" +Requires-Dist: librosa; extra == "torch-speech" +Requires-Dist: pyctcdecode>=0.4.0; extra == "torch-speech" +Requires-Dist: phonemizer; extra == "torch-speech" +Requires-Dist: kenlm; extra == "torch-speech" +Provides-Extra: tf-speech +Requires-Dist: librosa; extra == "tf-speech" +Requires-Dist: pyctcdecode>=0.4.0; extra == "tf-speech" +Requires-Dist: phonemizer; extra == "tf-speech" +Requires-Dist: kenlm; extra == "tf-speech" +Provides-Extra: flax-speech +Requires-Dist: librosa; extra == "flax-speech" +Requires-Dist: pyctcdecode>=0.4.0; extra == "flax-speech" +Requires-Dist: phonemizer; extra == "flax-speech" +Requires-Dist: kenlm; extra == "flax-speech" +Provides-Extra: vision +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "vision" +Provides-Extra: timm +Requires-Dist: timm!=1.0.18,<=1.0.19; extra == "timm" +Provides-Extra: torch-vision +Requires-Dist: torchvision; extra == "torch-vision" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "torch-vision" +Provides-Extra: natten +Requires-Dist: natten<0.15.0,>=0.14.6; extra == "natten" +Provides-Extra: codecarbon +Requires-Dist: codecarbon>=2.8.1; extra == "codecarbon" +Provides-Extra: video +Requires-Dist: av; extra == "video" +Provides-Extra: num2words +Requires-Dist: num2words; extra == "num2words" +Provides-Extra: sentencepiece +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "sentencepiece" +Requires-Dist: protobuf; extra == "sentencepiece" +Provides-Extra: tiktoken +Requires-Dist: tiktoken; extra == "tiktoken" +Requires-Dist: blobfile; extra == "tiktoken" +Provides-Extra: mistral-common +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "mistral-common" +Provides-Extra: testing +Requires-Dist: pytest>=7.2.0; extra == "testing" +Requires-Dist: pytest-asyncio; extra == "testing" +Requires-Dist: pytest-rich; extra == "testing" +Requires-Dist: pytest-xdist; extra == "testing" +Requires-Dist: pytest-order; extra == "testing" +Requires-Dist: pytest-rerunfailures; extra == "testing" +Requires-Dist: timeout-decorator; extra == "testing" +Requires-Dist: parameterized>=0.9; extra == "testing" +Requires-Dist: psutil; extra == "testing" +Requires-Dist: datasets>=2.15.0; extra == "testing" +Requires-Dist: dill<0.3.5; extra == "testing" +Requires-Dist: evaluate>=0.2.0; extra == "testing" +Requires-Dist: pytest-timeout; extra == "testing" +Requires-Dist: ruff==0.11.2; extra == "testing" +Requires-Dist: rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1; extra == "testing" +Requires-Dist: nltk<=3.8.1; extra == "testing" +Requires-Dist: GitPython<3.1.19; extra == "testing" +Requires-Dist: sacremoses; extra == "testing" +Requires-Dist: rjieba; extra == "testing" +Requires-Dist: beautifulsoup4; extra == "testing" +Requires-Dist: tensorboard; extra == "testing" +Requires-Dist: pydantic>=2; extra == "testing" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "testing" +Requires-Dist: sacrebleu<2.0.0,>=1.4.12; extra == "testing" +Requires-Dist: libcst; extra == "testing" +Requires-Dist: faiss-cpu; extra == "testing" +Requires-Dist: datasets>=2.15.0; extra == "testing" +Requires-Dist: cookiecutter==1.7.3; extra == "testing" +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "testing" +Provides-Extra: deepspeed-testing +Requires-Dist: deepspeed>=0.9.3; extra == "deepspeed-testing" +Requires-Dist: accelerate>=0.26.0; extra == "deepspeed-testing" +Requires-Dist: pytest>=7.2.0; extra == "deepspeed-testing" +Requires-Dist: pytest-asyncio; extra == "deepspeed-testing" +Requires-Dist: pytest-rich; extra == "deepspeed-testing" +Requires-Dist: pytest-xdist; extra == "deepspeed-testing" +Requires-Dist: pytest-order; extra == "deepspeed-testing" +Requires-Dist: pytest-rerunfailures; extra == "deepspeed-testing" +Requires-Dist: timeout-decorator; extra == "deepspeed-testing" +Requires-Dist: parameterized>=0.9; extra == "deepspeed-testing" +Requires-Dist: psutil; extra == "deepspeed-testing" +Requires-Dist: datasets>=2.15.0; extra == "deepspeed-testing" +Requires-Dist: dill<0.3.5; extra == "deepspeed-testing" +Requires-Dist: evaluate>=0.2.0; extra == "deepspeed-testing" +Requires-Dist: pytest-timeout; extra == "deepspeed-testing" +Requires-Dist: ruff==0.11.2; extra == "deepspeed-testing" +Requires-Dist: rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1; extra == "deepspeed-testing" +Requires-Dist: nltk<=3.8.1; extra == "deepspeed-testing" +Requires-Dist: GitPython<3.1.19; extra == "deepspeed-testing" +Requires-Dist: sacremoses; extra == "deepspeed-testing" +Requires-Dist: rjieba; extra == "deepspeed-testing" +Requires-Dist: beautifulsoup4; extra == "deepspeed-testing" +Requires-Dist: tensorboard; extra == "deepspeed-testing" +Requires-Dist: pydantic>=2; extra == "deepspeed-testing" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "deepspeed-testing" +Requires-Dist: sacrebleu<2.0.0,>=1.4.12; extra == "deepspeed-testing" +Requires-Dist: libcst; extra == "deepspeed-testing" +Requires-Dist: faiss-cpu; extra == "deepspeed-testing" +Requires-Dist: datasets>=2.15.0; extra == "deepspeed-testing" +Requires-Dist: cookiecutter==1.7.3; extra == "deepspeed-testing" +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "deepspeed-testing" +Requires-Dist: optuna; extra == "deepspeed-testing" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "deepspeed-testing" +Requires-Dist: protobuf; extra == "deepspeed-testing" +Provides-Extra: ruff +Requires-Dist: ruff==0.11.2; extra == "ruff" +Provides-Extra: quality +Requires-Dist: datasets>=2.15.0; extra == "quality" +Requires-Dist: ruff==0.11.2; extra == "quality" +Requires-Dist: GitPython<3.1.19; extra == "quality" +Requires-Dist: urllib3<2.0.0; extra == "quality" +Requires-Dist: libcst; extra == "quality" +Requires-Dist: rich; extra == "quality" +Requires-Dist: pandas<2.3.0; extra == "quality" +Provides-Extra: all +Requires-Dist: tensorflow<2.16,>2.9; extra == "all" +Requires-Dist: onnxconverter-common; extra == "all" +Requires-Dist: tf2onnx; extra == "all" +Requires-Dist: tensorflow-text<2.16; extra == "all" +Requires-Dist: keras-nlp<0.14.0,>=0.3.1; extra == "all" +Requires-Dist: torch>=2.1; extra == "all" +Requires-Dist: accelerate>=0.26.0; extra == "all" +Requires-Dist: jax<=0.4.13,>=0.4.1; extra == "all" +Requires-Dist: jaxlib<=0.4.13,>=0.4.1; extra == "all" +Requires-Dist: flax<=0.7.0,>=0.4.1; extra == "all" +Requires-Dist: optax<=0.1.4,>=0.0.8; extra == "all" +Requires-Dist: scipy<1.13.0; extra == "all" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "all" +Requires-Dist: protobuf; extra == "all" +Requires-Dist: tokenizers<0.22,>=0.21; extra == "all" +Requires-Dist: torchaudio; extra == "all" +Requires-Dist: librosa; extra == "all" +Requires-Dist: pyctcdecode>=0.4.0; extra == "all" +Requires-Dist: phonemizer; extra == "all" +Requires-Dist: kenlm; extra == "all" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "all" +Requires-Dist: kernels<=0.9,>=0.6.1; extra == "all" +Requires-Dist: optuna; extra == "all" +Requires-Dist: ray[tune]>=2.7.0; extra == "all" +Requires-Dist: sigopt; extra == "all" +Requires-Dist: timm!=1.0.18,<=1.0.19; extra == "all" +Requires-Dist: torchvision; extra == "all" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "all" +Requires-Dist: codecarbon>=2.8.1; extra == "all" +Requires-Dist: accelerate>=0.26.0; extra == "all" +Requires-Dist: av; extra == "all" +Requires-Dist: num2words; extra == "all" +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "all" +Provides-Extra: dev-torch +Requires-Dist: pytest>=7.2.0; extra == "dev-torch" +Requires-Dist: pytest-asyncio; extra == "dev-torch" +Requires-Dist: pytest-rich; extra == "dev-torch" +Requires-Dist: pytest-xdist; extra == "dev-torch" +Requires-Dist: pytest-order; extra == "dev-torch" +Requires-Dist: pytest-rerunfailures; extra == "dev-torch" +Requires-Dist: timeout-decorator; extra == "dev-torch" +Requires-Dist: parameterized>=0.9; extra == "dev-torch" +Requires-Dist: psutil; extra == "dev-torch" +Requires-Dist: datasets>=2.15.0; extra == "dev-torch" +Requires-Dist: dill<0.3.5; extra == "dev-torch" +Requires-Dist: evaluate>=0.2.0; extra == "dev-torch" +Requires-Dist: pytest-timeout; extra == "dev-torch" +Requires-Dist: ruff==0.11.2; extra == "dev-torch" +Requires-Dist: rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1; extra == "dev-torch" +Requires-Dist: nltk<=3.8.1; extra == "dev-torch" +Requires-Dist: GitPython<3.1.19; extra == "dev-torch" +Requires-Dist: sacremoses; extra == "dev-torch" +Requires-Dist: rjieba; extra == "dev-torch" +Requires-Dist: beautifulsoup4; extra == "dev-torch" +Requires-Dist: tensorboard; extra == "dev-torch" +Requires-Dist: pydantic>=2; extra == "dev-torch" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev-torch" +Requires-Dist: sacrebleu<2.0.0,>=1.4.12; extra == "dev-torch" +Requires-Dist: libcst; extra == "dev-torch" +Requires-Dist: faiss-cpu; extra == "dev-torch" +Requires-Dist: datasets>=2.15.0; extra == "dev-torch" +Requires-Dist: cookiecutter==1.7.3; extra == "dev-torch" +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "dev-torch" +Requires-Dist: torch>=2.1; extra == "dev-torch" +Requires-Dist: accelerate>=0.26.0; extra == "dev-torch" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev-torch" +Requires-Dist: protobuf; extra == "dev-torch" +Requires-Dist: tokenizers<0.22,>=0.21; extra == "dev-torch" +Requires-Dist: torchaudio; extra == "dev-torch" +Requires-Dist: librosa; extra == "dev-torch" +Requires-Dist: pyctcdecode>=0.4.0; extra == "dev-torch" +Requires-Dist: phonemizer; extra == "dev-torch" +Requires-Dist: kenlm; extra == "dev-torch" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "dev-torch" +Requires-Dist: kernels<=0.9,>=0.6.1; extra == "dev-torch" +Requires-Dist: optuna; extra == "dev-torch" +Requires-Dist: ray[tune]>=2.7.0; extra == "dev-torch" +Requires-Dist: sigopt; extra == "dev-torch" +Requires-Dist: timm!=1.0.18,<=1.0.19; extra == "dev-torch" +Requires-Dist: torchvision; extra == "dev-torch" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "dev-torch" +Requires-Dist: codecarbon>=2.8.1; extra == "dev-torch" +Requires-Dist: datasets>=2.15.0; extra == "dev-torch" +Requires-Dist: ruff==0.11.2; extra == "dev-torch" +Requires-Dist: GitPython<3.1.19; extra == "dev-torch" +Requires-Dist: urllib3<2.0.0; extra == "dev-torch" +Requires-Dist: libcst; extra == "dev-torch" +Requires-Dist: rich; extra == "dev-torch" +Requires-Dist: pandas<2.3.0; extra == "dev-torch" +Requires-Dist: fugashi>=1.0; extra == "dev-torch" +Requires-Dist: ipadic<2.0,>=1.0.0; extra == "dev-torch" +Requires-Dist: unidic_lite>=1.0.7; extra == "dev-torch" +Requires-Dist: unidic>=1.0.2; extra == "dev-torch" +Requires-Dist: sudachipy>=0.6.6; extra == "dev-torch" +Requires-Dist: sudachidict_core>=20220729; extra == "dev-torch" +Requires-Dist: rhoknp<1.3.1,>=1.1.0; extra == "dev-torch" +Requires-Dist: scikit-learn; extra == "dev-torch" +Requires-Dist: cookiecutter==1.7.3; extra == "dev-torch" +Requires-Dist: onnxruntime>=1.4.0; extra == "dev-torch" +Requires-Dist: onnxruntime-tools>=1.4.2; extra == "dev-torch" +Requires-Dist: num2words; extra == "dev-torch" +Provides-Extra: dev-tensorflow +Requires-Dist: pytest>=7.2.0; extra == "dev-tensorflow" +Requires-Dist: pytest-asyncio; extra == "dev-tensorflow" +Requires-Dist: pytest-rich; extra == "dev-tensorflow" +Requires-Dist: pytest-xdist; extra == "dev-tensorflow" +Requires-Dist: pytest-order; extra == "dev-tensorflow" +Requires-Dist: pytest-rerunfailures; extra == "dev-tensorflow" +Requires-Dist: timeout-decorator; extra == "dev-tensorflow" +Requires-Dist: parameterized>=0.9; extra == "dev-tensorflow" +Requires-Dist: psutil; extra == "dev-tensorflow" +Requires-Dist: datasets>=2.15.0; extra == "dev-tensorflow" +Requires-Dist: dill<0.3.5; extra == "dev-tensorflow" +Requires-Dist: evaluate>=0.2.0; extra == "dev-tensorflow" +Requires-Dist: pytest-timeout; extra == "dev-tensorflow" +Requires-Dist: ruff==0.11.2; extra == "dev-tensorflow" +Requires-Dist: rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1; extra == "dev-tensorflow" +Requires-Dist: nltk<=3.8.1; extra == "dev-tensorflow" +Requires-Dist: GitPython<3.1.19; extra == "dev-tensorflow" +Requires-Dist: sacremoses; extra == "dev-tensorflow" +Requires-Dist: rjieba; extra == "dev-tensorflow" +Requires-Dist: beautifulsoup4; extra == "dev-tensorflow" +Requires-Dist: tensorboard; extra == "dev-tensorflow" +Requires-Dist: pydantic>=2; extra == "dev-tensorflow" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev-tensorflow" +Requires-Dist: sacrebleu<2.0.0,>=1.4.12; extra == "dev-tensorflow" +Requires-Dist: libcst; extra == "dev-tensorflow" +Requires-Dist: faiss-cpu; extra == "dev-tensorflow" +Requires-Dist: datasets>=2.15.0; extra == "dev-tensorflow" +Requires-Dist: cookiecutter==1.7.3; extra == "dev-tensorflow" +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "dev-tensorflow" +Requires-Dist: tensorflow<2.16,>2.9; extra == "dev-tensorflow" +Requires-Dist: onnxconverter-common; extra == "dev-tensorflow" +Requires-Dist: tf2onnx; extra == "dev-tensorflow" +Requires-Dist: tensorflow-text<2.16; extra == "dev-tensorflow" +Requires-Dist: keras-nlp<0.14.0,>=0.3.1; extra == "dev-tensorflow" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev-tensorflow" +Requires-Dist: protobuf; extra == "dev-tensorflow" +Requires-Dist: tokenizers<0.22,>=0.21; extra == "dev-tensorflow" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "dev-tensorflow" +Requires-Dist: datasets>=2.15.0; extra == "dev-tensorflow" +Requires-Dist: ruff==0.11.2; extra == "dev-tensorflow" +Requires-Dist: GitPython<3.1.19; extra == "dev-tensorflow" +Requires-Dist: urllib3<2.0.0; extra == "dev-tensorflow" +Requires-Dist: libcst; extra == "dev-tensorflow" +Requires-Dist: rich; extra == "dev-tensorflow" +Requires-Dist: pandas<2.3.0; extra == "dev-tensorflow" +Requires-Dist: scikit-learn; extra == "dev-tensorflow" +Requires-Dist: cookiecutter==1.7.3; extra == "dev-tensorflow" +Requires-Dist: onnxconverter-common; extra == "dev-tensorflow" +Requires-Dist: tf2onnx; extra == "dev-tensorflow" +Requires-Dist: onnxruntime>=1.4.0; extra == "dev-tensorflow" +Requires-Dist: onnxruntime-tools>=1.4.2; extra == "dev-tensorflow" +Requires-Dist: librosa; extra == "dev-tensorflow" +Requires-Dist: pyctcdecode>=0.4.0; extra == "dev-tensorflow" +Requires-Dist: phonemizer; extra == "dev-tensorflow" +Requires-Dist: kenlm; extra == "dev-tensorflow" +Provides-Extra: dev +Requires-Dist: tensorflow<2.16,>2.9; extra == "dev" +Requires-Dist: onnxconverter-common; extra == "dev" +Requires-Dist: tf2onnx; extra == "dev" +Requires-Dist: tensorflow-text<2.16; extra == "dev" +Requires-Dist: keras-nlp<0.14.0,>=0.3.1; extra == "dev" +Requires-Dist: torch>=2.1; extra == "dev" +Requires-Dist: accelerate>=0.26.0; extra == "dev" +Requires-Dist: jax<=0.4.13,>=0.4.1; extra == "dev" +Requires-Dist: jaxlib<=0.4.13,>=0.4.1; extra == "dev" +Requires-Dist: flax<=0.7.0,>=0.4.1; extra == "dev" +Requires-Dist: optax<=0.1.4,>=0.0.8; extra == "dev" +Requires-Dist: scipy<1.13.0; extra == "dev" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev" +Requires-Dist: protobuf; extra == "dev" +Requires-Dist: tokenizers<0.22,>=0.21; extra == "dev" +Requires-Dist: torchaudio; extra == "dev" +Requires-Dist: librosa; extra == "dev" +Requires-Dist: pyctcdecode>=0.4.0; extra == "dev" +Requires-Dist: phonemizer; extra == "dev" +Requires-Dist: kenlm; extra == "dev" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "dev" +Requires-Dist: kernels<=0.9,>=0.6.1; extra == "dev" +Requires-Dist: optuna; extra == "dev" +Requires-Dist: ray[tune]>=2.7.0; extra == "dev" +Requires-Dist: sigopt; extra == "dev" +Requires-Dist: timm!=1.0.18,<=1.0.19; extra == "dev" +Requires-Dist: torchvision; extra == "dev" +Requires-Dist: Pillow<=15.0,>=10.0.1; extra == "dev" +Requires-Dist: codecarbon>=2.8.1; extra == "dev" +Requires-Dist: accelerate>=0.26.0; extra == "dev" +Requires-Dist: av; extra == "dev" +Requires-Dist: num2words; extra == "dev" +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "dev" +Requires-Dist: pytest>=7.2.0; extra == "dev" +Requires-Dist: pytest-asyncio; extra == "dev" +Requires-Dist: pytest-rich; extra == "dev" +Requires-Dist: pytest-xdist; extra == "dev" +Requires-Dist: pytest-order; extra == "dev" +Requires-Dist: pytest-rerunfailures; extra == "dev" +Requires-Dist: timeout-decorator; extra == "dev" +Requires-Dist: parameterized>=0.9; extra == "dev" +Requires-Dist: psutil; extra == "dev" +Requires-Dist: datasets>=2.15.0; extra == "dev" +Requires-Dist: dill<0.3.5; extra == "dev" +Requires-Dist: evaluate>=0.2.0; extra == "dev" +Requires-Dist: pytest-timeout; extra == "dev" +Requires-Dist: ruff==0.11.2; extra == "dev" +Requires-Dist: rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1; extra == "dev" +Requires-Dist: nltk<=3.8.1; extra == "dev" +Requires-Dist: GitPython<3.1.19; extra == "dev" +Requires-Dist: sacremoses; extra == "dev" +Requires-Dist: rjieba; extra == "dev" +Requires-Dist: beautifulsoup4; extra == "dev" +Requires-Dist: tensorboard; extra == "dev" +Requires-Dist: pydantic>=2; extra == "dev" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev" +Requires-Dist: sacrebleu<2.0.0,>=1.4.12; extra == "dev" +Requires-Dist: libcst; extra == "dev" +Requires-Dist: faiss-cpu; extra == "dev" +Requires-Dist: datasets>=2.15.0; extra == "dev" +Requires-Dist: cookiecutter==1.7.3; extra == "dev" +Requires-Dist: mistral-common[opencv]>=1.6.3; extra == "dev" +Requires-Dist: datasets>=2.15.0; extra == "dev" +Requires-Dist: ruff==0.11.2; extra == "dev" +Requires-Dist: GitPython<3.1.19; extra == "dev" +Requires-Dist: urllib3<2.0.0; extra == "dev" +Requires-Dist: libcst; extra == "dev" +Requires-Dist: rich; extra == "dev" +Requires-Dist: pandas<2.3.0; extra == "dev" +Requires-Dist: fugashi>=1.0; extra == "dev" +Requires-Dist: ipadic<2.0,>=1.0.0; extra == "dev" +Requires-Dist: unidic_lite>=1.0.7; extra == "dev" +Requires-Dist: unidic>=1.0.2; extra == "dev" +Requires-Dist: sudachipy>=0.6.6; extra == "dev" +Requires-Dist: sudachidict_core>=20220729; extra == "dev" +Requires-Dist: rhoknp<1.3.1,>=1.1.0; extra == "dev" +Requires-Dist: scikit-learn; extra == "dev" +Requires-Dist: cookiecutter==1.7.3; extra == "dev" +Provides-Extra: torchhub +Requires-Dist: filelock; extra == "torchhub" +Requires-Dist: huggingface-hub<1.0,>=0.34.0; extra == "torchhub" +Requires-Dist: importlib_metadata; extra == "torchhub" +Requires-Dist: numpy>=1.17; extra == "torchhub" +Requires-Dist: packaging>=20.0; extra == "torchhub" +Requires-Dist: protobuf; extra == "torchhub" +Requires-Dist: regex!=2019.12.17; extra == "torchhub" +Requires-Dist: requests; extra == "torchhub" +Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "torchhub" +Requires-Dist: torch>=2.1; extra == "torchhub" +Requires-Dist: tokenizers<0.22,>=0.21; extra == "torchhub" +Requires-Dist: tqdm>=4.27; extra == "torchhub" +Provides-Extra: benchmark +Requires-Dist: optimum-benchmark>=0.3.0; extra == "benchmark" +Provides-Extra: open-telemetry +Requires-Dist: opentelemetry-api; extra == "open-telemetry" +Requires-Dist: opentelemetry-exporter-otlp; extra == "open-telemetry" +Requires-Dist: opentelemetry-sdk; extra == "open-telemetry" +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: keywords +Dynamic: license +Dynamic: license-file +Dynamic: provides-extra +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + + + +

    + + + + Hugging Face Transformers Library + +
    +
    +

    + +

    + Checkpoints on Hub + Build + GitHub + Documentation + GitHub release + Contributor Covenant + DOI +

    + +

    +

    + English | + 简体中文 | + 繁體中文 | + 한국어 | + Español | + 日本語 | + हिन्दी | + Русский | + Português | + తెలుగు | + Français | + Deutsch | + Tiếng Việt | + العربية | + اردو | +

    +

    + +

    +

    State-of-the-art pretrained models for inference and training

    +

    + +

    + +

    + + +Transformers acts as the model-definition framework for state-of-the-art machine learning models in text, computer +vision, audio, video, and multimodal model, for both inference and training. + +It centralizes the model definition so that this definition is agreed upon across the ecosystem. `transformers` is the +pivot across frameworks: if a model definition is supported, it will be compatible with the majority of training +frameworks (Axolotl, Unsloth, DeepSpeed, FSDP, PyTorch-Lightning, ...), inference engines (vLLM, SGLang, TGI, ...), +and adjacent modeling libraries (llama.cpp, mlx, ...) which leverage the model definition from `transformers`. + +We pledge to help support new state-of-the-art models and democratize their usage by having their model definition be +simple, customizable, and efficient. + +There are over 1M+ Transformers [model checkpoints](https://huggingface.co/models?library=transformers&sort=trending) on the [Hugging Face Hub](https://huggingface.com/models) you can use. + +Explore the [Hub](https://huggingface.com/) today to find a model and use Transformers to help you get started right away. + +## Installation + +Transformers works with Python 3.9+ [PyTorch](https://pytorch.org/get-started/locally/) 2.1+, [TensorFlow](https://www.tensorflow.org/install/pip) 2.6+, and [Flax](https://flax.readthedocs.io/en/latest/) 0.4.1+. + +Create and activate a virtual environment with [venv](https://docs.python.org/3/library/venv.html) or [uv](https://docs.astral.sh/uv/), a fast Rust-based Python package and project manager. + +```py +# venv +python -m venv .my-env +source .my-env/bin/activate +# uv +uv venv .my-env +source .my-env/bin/activate +``` + +Install Transformers in your virtual environment. + +```py +# pip +pip install "transformers[torch]" + +# uv +uv pip install "transformers[torch]" +``` + +Install Transformers from source if you want the latest changes in the library or are interested in contributing. However, the *latest* version may not be stable. Feel free to open an [issue](https://github.com/huggingface/transformers/issues) if you encounter an error. + +```shell +git clone https://github.com/huggingface/transformers.git +cd transformers + +# pip +pip install .[torch] + +# uv +uv pip install .[torch] +``` + +## Quickstart + +Get started with Transformers right away with the [Pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) API. The `Pipeline` is a high-level inference class that supports text, audio, vision, and multimodal tasks. It handles preprocessing the input and returns the appropriate output. + +Instantiate a pipeline and specify model to use for text generation. The model is downloaded and cached so you can easily reuse it again. Finally, pass some text to prompt the model. + +```py +from transformers import pipeline + +pipeline = pipeline(task="text-generation", model="Qwen/Qwen2.5-1.5B") +pipeline("the secret to baking a really good cake is ") +[{'generated_text': 'the secret to baking a really good cake is 1) to use the right ingredients and 2) to follow the recipe exactly. the recipe for the cake is as follows: 1 cup of sugar, 1 cup of flour, 1 cup of milk, 1 cup of butter, 1 cup of eggs, 1 cup of chocolate chips. if you want to make 2 cakes, how much sugar do you need? To make 2 cakes, you will need 2 cups of sugar.'}] +``` + +To chat with a model, the usage pattern is the same. The only difference is you need to construct a chat history (the input to `Pipeline`) between you and the system. + +> [!TIP] +> You can also chat with a model directly from the command line. +> ```shell +> transformers chat Qwen/Qwen2.5-0.5B-Instruct +> ``` + +```py +import torch +from transformers import pipeline + +chat = [ + {"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."}, + {"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"} +] + +pipeline = pipeline(task="text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.bfloat16, device_map="auto") +response = pipeline(chat, max_new_tokens=512) +print(response[0]["generated_text"][-1]["content"]) +``` + +Expand the examples below to see how `Pipeline` works for different modalities and tasks. + +
    +Automatic speech recognition + +```py +from transformers import pipeline + +pipeline = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3") +pipeline("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") +{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} +``` + +
    + +
    +Image classification + +

    + +

    + +```py +from transformers import pipeline + +pipeline = pipeline(task="image-classification", model="facebook/dinov2-small-imagenet1k-1-layer") +pipeline("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") +[{'label': 'macaw', 'score': 0.997848391532898}, + {'label': 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', + 'score': 0.0016551691805943847}, + {'label': 'lorikeet', 'score': 0.00018523589824326336}, + {'label': 'African grey, African gray, Psittacus erithacus', + 'score': 7.85409429227002e-05}, + {'label': 'quail', 'score': 5.502637941390276e-05}] +``` + +
    + +
    +Visual question answering + + +

    + +

    + +```py +from transformers import pipeline + +pipeline = pipeline(task="visual-question-answering", model="Salesforce/blip-vqa-base") +pipeline( + image="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/idefics-few-shot.jpg", + question="What is in the image?", +) +[{'answer': 'statue of liberty'}] +``` + +
    + +## Why should I use Transformers? + +1. Easy-to-use state-of-the-art models: + - High performance on natural language understanding & generation, computer vision, audio, video, and multimodal tasks. + - Low barrier to entry for researchers, engineers, and developers. + - Few user-facing abstractions with just three classes to learn. + - A unified API for using all our pretrained models. + +1. Lower compute costs, smaller carbon footprint: + - Share trained models instead of training from scratch. + - Reduce compute time and production costs. + - Dozens of model architectures with 1M+ pretrained checkpoints across all modalities. + +1. Choose the right framework for every part of a models lifetime: + - Train state-of-the-art models in 3 lines of code. + - Move a single model between PyTorch/JAX/TF2.0 frameworks at will. + - Pick the right framework for training, evaluation, and production. + +1. Easily customize a model or an example to your needs: + - We provide examples for each architecture to reproduce the results published by its original authors. + - Model internals are exposed as consistently as possible. + - Model files can be used independently of the library for quick experiments. + + + Hugging Face Enterprise Hub +
    + +## Why shouldn't I use Transformers? + +- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files. +- The training API is optimized to work with PyTorch models provided by Transformers. For generic machine learning loops, you should use another library like [Accelerate](https://huggingface.co/docs/accelerate). +- The [example scripts](https://github.com/huggingface/transformers/tree/main/examples) are only *examples*. They may not necessarily work out-of-the-box on your specific use case and you'll need to adapt the code for it to work. + +## 100 projects using Transformers + +Transformers is more than a toolkit to use pretrained models, it's a community of projects built around it and the +Hugging Face Hub. We want Transformers to enable developers, researchers, students, professors, engineers, and anyone +else to build their dream projects. + +In order to celebrate Transformers 100,000 stars, we wanted to put the spotlight on the +community with the [awesome-transformers](./awesome-transformers.md) page which lists 100 +incredible projects built with Transformers. + +If you own or use a project that you believe should be part of the list, please open a PR to add it! + +## Example models + +You can test most of our models directly on their [Hub model pages](https://huggingface.co/models). + +Expand each modality below to see a few example models for various use cases. + +
    +Audio + +- Audio classification with [Whisper](https://huggingface.co/openai/whisper-large-v3-turbo) +- Automatic speech recognition with [Moonshine](https://huggingface.co/UsefulSensors/moonshine) +- Keyword spotting with [Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks) +- Speech to speech generation with [Moshi](https://huggingface.co/kyutai/moshiko-pytorch-bf16) +- Text to audio with [MusicGen](https://huggingface.co/facebook/musicgen-large) +- Text to speech with [Bark](https://huggingface.co/suno/bark) + +
    + +
    +Computer vision + +- Automatic mask generation with [SAM](https://huggingface.co/facebook/sam-vit-base) +- Depth estimation with [DepthPro](https://huggingface.co/apple/DepthPro-hf) +- Image classification with [DINO v2](https://huggingface.co/facebook/dinov2-base) +- Keypoint detection with [SuperPoint](https://huggingface.co/magic-leap-community/superpoint) +- Keypoint matching with [SuperGlue](https://huggingface.co/magic-leap-community/superglue_outdoor) +- Object detection with [RT-DETRv2](https://huggingface.co/PekingU/rtdetr_v2_r50vd) +- Pose Estimation with [VitPose](https://huggingface.co/usyd-community/vitpose-base-simple) +- Universal segmentation with [OneFormer](https://huggingface.co/shi-labs/oneformer_ade20k_swin_large) +- Video classification with [VideoMAE](https://huggingface.co/MCG-NJU/videomae-large) + +
    + +
    +Multimodal + +- Audio or text to text with [Qwen2-Audio](https://huggingface.co/Qwen/Qwen2-Audio-7B) +- Document question answering with [LayoutLMv3](https://huggingface.co/microsoft/layoutlmv3-base) +- Image or text to text with [Qwen-VL](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct) +- Image captioning [BLIP-2](https://huggingface.co/Salesforce/blip2-opt-2.7b) +- OCR-based document understanding with [GOT-OCR2](https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf) +- Table question answering with [TAPAS](https://huggingface.co/google/tapas-base) +- Unified multimodal understanding and generation with [Emu3](https://huggingface.co/BAAI/Emu3-Gen) +- Vision to text with [Llava-OneVision](https://huggingface.co/llava-hf/llava-onevision-qwen2-0.5b-ov-hf) +- Visual question answering with [Llava](https://huggingface.co/llava-hf/llava-1.5-7b-hf) +- Visual referring expression segmentation with [Kosmos-2](https://huggingface.co/microsoft/kosmos-2-patch14-224) + +
    + +
    +NLP + +- Masked word completion with [ModernBERT](https://huggingface.co/answerdotai/ModernBERT-base) +- Named entity recognition with [Gemma](https://huggingface.co/google/gemma-2-2b) +- Question answering with [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) +- Summarization with [BART](https://huggingface.co/facebook/bart-large-cnn) +- Translation with [T5](https://huggingface.co/google-t5/t5-base) +- Text generation with [Llama](https://huggingface.co/meta-llama/Llama-3.2-1B) +- Text classification with [Qwen](https://huggingface.co/Qwen/Qwen2.5-0.5B) + +
    + +## Citation + +We now have a [paper](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) you can cite for the 🤗 Transformers library: +```bibtex +@inproceedings{wolf-etal-2020-transformers, + title = "Transformers: State-of-the-Art Natural Language Processing", + author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush", + booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", + month = oct, + year = "2020", + address = "Online", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6", + pages = "38--45" +} +``` diff --git a/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a1316aec2784ba954b1179ec44ebf2237d3d428a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/RECORD @@ -0,0 +1,4185 @@ +../../../bin/transformers,sha256=xHVu8foVffcLZ0S5jkbiBmmm8UokEozZPPKxmhqYaMk,307 +../../../bin/transformers-cli,sha256=WtCmPE7_kRBOqedfwn6ecF7LARvP2Jt1lGeJaH5q_L4,315 +transformers-4.55.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +transformers-4.55.1.dist-info/METADATA,sha256=UHb42GZgvR2YczdoumWAp85g96qU_Mxq180tMxjCuhc,41969 +transformers-4.55.1.dist-info/RECORD,, +transformers-4.55.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +transformers-4.55.1.dist-info/entry_points.txt,sha256=Zra3dVQyt6Q3fU_suoD3gF81JV3WeV8gH66vzoev408,144 +transformers-4.55.1.dist-info/licenses/LICENSE,sha256=d_1HEN757DwPYiWADgI18VpCWr1KiwNVkSf814JhIEk,11418 +transformers-4.55.1.dist-info/top_level.txt,sha256=GLBaeTo_CSdhnHvbxQ0kzpEHdlLuA_33foIogaWxntI,13 +transformers/__init__.py,sha256=N3xPuTTMEQfU0l42uT3sacWgH_yOZyH6oEDG6nJYS-M,46485 +transformers/__pycache__/__init__.cpython-310.pyc,, +transformers/__pycache__/activations.cpython-310.pyc,, +transformers/__pycache__/activations_tf.cpython-310.pyc,, +transformers/__pycache__/audio_utils.cpython-310.pyc,, +transformers/__pycache__/cache_utils.cpython-310.pyc,, +transformers/__pycache__/configuration_utils.cpython-310.pyc,, +transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc,, +transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc,, +transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc,, +transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc,, +transformers/__pycache__/convert_tf_hub_seq_to_seq_bert_to_pytorch.cpython-310.pyc,, +transformers/__pycache__/debug_utils.cpython-310.pyc,, +transformers/__pycache__/dependency_versions_check.cpython-310.pyc,, +transformers/__pycache__/dependency_versions_table.cpython-310.pyc,, +transformers/__pycache__/dynamic_module_utils.cpython-310.pyc,, +transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc,, +transformers/__pycache__/feature_extraction_utils.cpython-310.pyc,, +transformers/__pycache__/file_utils.cpython-310.pyc,, +transformers/__pycache__/hf_argparser.cpython-310.pyc,, +transformers/__pycache__/hyperparameter_search.cpython-310.pyc,, +transformers/__pycache__/image_processing_base.cpython-310.pyc,, +transformers/__pycache__/image_processing_utils.cpython-310.pyc,, +transformers/__pycache__/image_processing_utils_fast.cpython-310.pyc,, +transformers/__pycache__/image_transforms.cpython-310.pyc,, +transformers/__pycache__/image_utils.cpython-310.pyc,, +transformers/__pycache__/keras_callbacks.cpython-310.pyc,, +transformers/__pycache__/masking_utils.cpython-310.pyc,, +transformers/__pycache__/model_debugging_utils.cpython-310.pyc,, +transformers/__pycache__/modelcard.cpython-310.pyc,, +transformers/__pycache__/modeling_attn_mask_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_flash_attention_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc,, +transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_flax_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_gguf_pytorch_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_layers.cpython-310.pyc,, +transformers/__pycache__/modeling_outputs.cpython-310.pyc,, +transformers/__pycache__/modeling_rope_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc,, +transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_tf_utils.cpython-310.pyc,, +transformers/__pycache__/modeling_utils.cpython-310.pyc,, +transformers/__pycache__/optimization.cpython-310.pyc,, +transformers/__pycache__/optimization_tf.cpython-310.pyc,, +transformers/__pycache__/processing_utils.cpython-310.pyc,, +transformers/__pycache__/pytorch_utils.cpython-310.pyc,, +transformers/__pycache__/safetensors_conversion.cpython-310.pyc,, +transformers/__pycache__/testing_utils.cpython-310.pyc,, +transformers/__pycache__/tf_utils.cpython-310.pyc,, +transformers/__pycache__/time_series_utils.cpython-310.pyc,, +transformers/__pycache__/tokenization_mistral_common.cpython-310.pyc,, +transformers/__pycache__/tokenization_utils.cpython-310.pyc,, +transformers/__pycache__/tokenization_utils_base.cpython-310.pyc,, +transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc,, +transformers/__pycache__/trainer.cpython-310.pyc,, +transformers/__pycache__/trainer_callback.cpython-310.pyc,, +transformers/__pycache__/trainer_pt_utils.cpython-310.pyc,, +transformers/__pycache__/trainer_seq2seq.cpython-310.pyc,, +transformers/__pycache__/trainer_utils.cpython-310.pyc,, +transformers/__pycache__/training_args.cpython-310.pyc,, +transformers/__pycache__/training_args_seq2seq.cpython-310.pyc,, +transformers/__pycache__/training_args_tf.cpython-310.pyc,, +transformers/__pycache__/video_processing_utils.cpython-310.pyc,, +transformers/__pycache__/video_utils.cpython-310.pyc,, +transformers/activations.py,sha256=BE9RCFCqghTIIS5rVyaRITo2_1bZd_xyPagb4eoQuLs,7781 +transformers/activations_tf.py,sha256=TGmah3loMs_pERwxpjWb5-AUeHLoBAyDxFYWVuLC7FU,4729 +transformers/audio_utils.py,sha256=tQRkKv6id6HduKtypO65uHv9Ia5RQAIT7gmcWOMFSeU,51807 +transformers/cache_utils.py,sha256=m-MDT_1ZsEk7rL7-kh3sM3_sfJ7V73jPy1nBzgj-1Eo,109999 +transformers/commands/__init__.py,sha256=aFO3I7C6G9OLA9JZSc_yMaZl0glOQtjNPjqMFfu9wfQ,923 +transformers/commands/__pycache__/__init__.cpython-310.pyc,, +transformers/commands/__pycache__/add_fast_image_processor.cpython-310.pyc,, +transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc,, +transformers/commands/__pycache__/chat.cpython-310.pyc,, +transformers/commands/__pycache__/convert.cpython-310.pyc,, +transformers/commands/__pycache__/download.cpython-310.pyc,, +transformers/commands/__pycache__/env.cpython-310.pyc,, +transformers/commands/__pycache__/run.cpython-310.pyc,, +transformers/commands/__pycache__/serving.cpython-310.pyc,, +transformers/commands/__pycache__/train.cpython-310.pyc,, +transformers/commands/__pycache__/transformers_cli.cpython-310.pyc,, +transformers/commands/add_fast_image_processor.py,sha256=HIVXaU8NERWdsSJuyjnSp8bAnXxHojrwPCegX9IcfYU,24141 +transformers/commands/add_new_model_like.py,sha256=B-1WnK2UiZ8tlnFluwDgN4Pa6dJHflm4zIvIZpvr1E4,33377 +transformers/commands/chat.py,sha256=36FfWWZn5VSI5MRGCkMcZXK1fmpP6xaNvyKhbVIUMkM,31449 +transformers/commands/convert.py,sha256=IhyqKqO33anJiIwneOBCogxREJkfH7qIP_3At2xnoVE,7064 +transformers/commands/download.py,sha256=GKPadx-YGBL7dHJSEcUp-QNOP3R2L71-gPGP0z6NNQI,2395 +transformers/commands/env.py,sha256=lC6D4ssqBQc_CEyFsJ_sHKj7lsvs7g1EfQG73IVvnFs,7024 +transformers/commands/run.py,sha256=nyEe2lOoj6e0EOxjKeF08hdW9WVWa101r9hWXl9v3Jo,4249 +transformers/commands/serving.py,sha256=5LbOGVxZctEDy1eF8pou16dALzOMJSc2AbeSP1QirnY,65883 +transformers/commands/train.py,sha256=SDGD_DF2-y9n2sqW2c77j5a4B9Lj8sRWHZ-VU4bnx_U,6337 +transformers/commands/transformers_cli.py,sha256=cFlXM_DHUCFgf6KnjpAcvebihZL5UKKIOlZtixopBVw,2281 +transformers/configuration_utils.py,sha256=koT83dqTtWKp0_7DbLaojfDripI3Ln6tLboVG3LPz_8,61497 +transformers/convert_graph_to_onnx.py,sha256=g-BvJuYIq2wDmHxQ0Ng2DrpwqNshxAbQNk4zjegX4nw,20130 +transformers/convert_pytorch_checkpoint_to_tf2.py,sha256=xNNks70V1s5Hesk4_xr65VgurQGd3Jv-pRw70isWjQc,14415 +transformers/convert_slow_tokenizer.py,sha256=jle2J9QvNOARfWMJC8etj1xm6AKMF4h9Pkx-rpkL2Ss,63658 +transformers/convert_slow_tokenizers_checkpoints_to_fast.py,sha256=Sa8NS-oVEYDgqYEhUfg-WuB4a8RsLReIu067twp8uCA,5061 +transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py,sha256=02fwRNsiK3zmmL9O_hgsduomBuTDHWh8vcTyk2GOlz8,2895 +transformers/data/__init__.py,sha256=MuXSchTzRSaUtUDC1uSeDkHiSbjtrQZg4IoKeKHoH6A,1490 +transformers/data/__pycache__/__init__.cpython-310.pyc,, +transformers/data/__pycache__/data_collator.cpython-310.pyc,, +transformers/data/data_collator.py,sha256=guOI80GqNReZlVnEYZIumud7IjXXSxrzY-OKanRXfqE,101123 +transformers/data/datasets/__init__.py,sha256=PGzUJjdmTPOPMyjV4-Tj3sNrmmh-lspjyxrVbrfJoX8,909 +transformers/data/datasets/__pycache__/__init__.cpython-310.pyc,, +transformers/data/datasets/__pycache__/glue.cpython-310.pyc,, +transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc,, +transformers/data/datasets/__pycache__/squad.cpython-310.pyc,, +transformers/data/datasets/glue.py,sha256=d2ys4oU49fQJ3ZXLpGGyou54lWOY2UJMUbZcdqaBNxg,6245 +transformers/data/datasets/language_modeling.py,sha256=tNZvgig_gzJzuEjc0wGVQa86Jx2wUaQMATDCBrqN-z8,23709 +transformers/data/datasets/squad.py,sha256=uEA-pVJFVKtZlliRBE_TU5MtWenJfJtacKtZLtOG_3w,9295 +transformers/data/metrics/__init__.py,sha256=o9t_VTQtqU3lEhqvocDzFMm7OvAKD-uxrjPWy0r74BI,3632 +transformers/data/metrics/__pycache__/__init__.cpython-310.pyc,, +transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc,, +transformers/data/metrics/squad_metrics.py,sha256=fKA4MXBLgyB4p7EaPCiaLMR-CW5NDweXSDZh1qO02NM,29699 +transformers/data/processors/__init__.py,sha256=lvN5mp9mdrr5v6QvZT6VcoZ78zZUvXiumTm6Gdvlgvo,1014 +transformers/data/processors/__pycache__/__init__.cpython-310.pyc,, +transformers/data/processors/__pycache__/glue.cpython-310.pyc,, +transformers/data/processors/__pycache__/squad.cpython-310.pyc,, +transformers/data/processors/__pycache__/utils.cpython-310.pyc,, +transformers/data/processors/__pycache__/xnli.cpython-310.pyc,, +transformers/data/processors/glue.py,sha256=IGwrYOn1sg6mztFzwfA_Eb9KyuvIYL4iYBDe5b-m83Y,23214 +transformers/data/processors/squad.py,sha256=aKeAhkB_zAZliI0n8V4rYHFPGJChND3OZ0AN9wHs2c8,33303 +transformers/data/processors/utils.py,sha256=tljqv-RDmkbfutIo2cUYbJuL75PfXMB3IP2mOM4gQJA,13823 +transformers/data/processors/xnli.py,sha256=sgcYz9YSfHY9NS0LO_YeFRRjq-nJFsDhFUP4NJeu-Q4,3481 +transformers/debug_utils.py,sha256=6m6Ks51IXFlIhEPdbXbsFi_3ZWjM34kpxV5l7-LR4bU,12891 +transformers/dependency_versions_check.py,sha256=6HbgtT2Wp-QZGOAdyUOklHvNA4rOVITGHrX34dtMOqg,2115 +transformers/dependency_versions_table.py,sha256=Mag9fB6cnH16MXchuz-sfoGj_jeI0_MTI0H9EJtPcc0,3979 +transformers/distributed/__init__.py,sha256=ds-xiU6Hko8BN-XiIF2cJZPCjrQ-JFlodRARkPK8g-0,978 +transformers/distributed/__pycache__/__init__.cpython-310.pyc,, +transformers/distributed/__pycache__/configuration_utils.cpython-310.pyc,, +transformers/distributed/configuration_utils.py,sha256=rBPisXQ4szdnjxqxtFWOJYjOtnQ7JSCdbWs4_3xA1fU,4438 +transformers/dynamic_module_utils.py,sha256=V5jWxqMGqLqyDDT6cvQPeouU_QCEPOfH41BMR3w0gK8,34776 +transformers/feature_extraction_sequence_utils.py,sha256=U60TIDSbdFI_MD0Jxoe_aEGrwjoqEafP6eGMYKpF9jE,18273 +transformers/feature_extraction_utils.py,sha256=RzUyKCPU7M5P2R__IPy5QBWLkTO0WXeGD5F6nnhbPZY,30051 +transformers/file_utils.py,sha256=qGXLORUv3xflV0GcJdJryr_aWc6w8PJ4S-eQGTaYxpQ,3698 +transformers/generation/__init__.py,sha256=DX3zYcNeKVjWjzeax92L6e3xM8zNQC-WhqZrSS1Jo5s,12472 +transformers/generation/__pycache__/__init__.cpython-310.pyc,, +transformers/generation/__pycache__/beam_constraints.cpython-310.pyc,, +transformers/generation/__pycache__/beam_search.cpython-310.pyc,, +transformers/generation/__pycache__/candidate_generator.cpython-310.pyc,, +transformers/generation/__pycache__/configuration_utils.cpython-310.pyc,, +transformers/generation/__pycache__/continuous_batching.cpython-310.pyc,, +transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc,, +transformers/generation/__pycache__/flax_utils.cpython-310.pyc,, +transformers/generation/__pycache__/logits_process.cpython-310.pyc,, +transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc,, +transformers/generation/__pycache__/streamers.cpython-310.pyc,, +transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc,, +transformers/generation/__pycache__/tf_utils.cpython-310.pyc,, +transformers/generation/__pycache__/utils.cpython-310.pyc,, +transformers/generation/__pycache__/watermarking.cpython-310.pyc,, +transformers/generation/beam_constraints.py,sha256=ctpz6PYBFnyJWXS7tgZPG-fRTzMkf2qOoKzGwWK2JwQ,19268 +transformers/generation/beam_search.py,sha256=aSRPHw1Vp8oiCR_4HScMMc66ofty8adVAmj80_1ObAA,49497 +transformers/generation/candidate_generator.py,sha256=aItvWhLA3S4HE7mro-wSsngsZBcQM9rMOCyALOkQJOA,60742 +transformers/generation/configuration_utils.py,sha256=3t9m65HNHKMvu-dIgomt7ZVEUt-Pk8cEJ7p-dRlKcoA,84064 +transformers/generation/continuous_batching.py,sha256=HddYvCu4MTEB8z2KdG2jzWFiOqaYjzLwo4Rj8HZpCkg,60401 +transformers/generation/flax_logits_process.py,sha256=d9K9Np1169WLpcXRnS_MwtWVKNAgDzKSA2fq5rom9FI,23008 +transformers/generation/flax_utils.py,sha256=M04Chm6S0EiGgt9XSBE70iSL8YYHGhu4QeAsFX1dBCI,50641 +transformers/generation/logits_process.py,sha256=7OVdRCq0XmxT1iRQBc0WPHIhrR-ckPw17Ps9XkxZYz8,152235 +transformers/generation/stopping_criteria.py,sha256=FqN7zP4qlPt9GSRtibBiHapcjcy3A1QZIIVRAk6XMWQ,28932 +transformers/generation/streamers.py,sha256=Mj_bPFPCh4225Z_oFLc5wLegJPCUzF83ppojDdh19mA,12985 +transformers/generation/tf_logits_process.py,sha256=q9KY6Fx6pfQ15_7Lm_oAD4_Eyr3ApM4pmhjeNcNeF4M,28639 +transformers/generation/tf_utils.py,sha256=6B48u3CpCO7AEtSdhSxOjNVfqDpzcIIQqNR9NmTUCbo,175687 +transformers/generation/utils.py,sha256=qPHChybogAbs7qfSJz86vuj6ubXJizpDflphXT-Op1s,286684 +transformers/generation/watermarking.py,sha256=IbT_aNam8yTFCZl0oXpyLDi4ilZnyIqySHKOq6Py--E,24526 +transformers/hf_argparser.py,sha256=qIhpTm1qGgJAOi9m-qGyXbqnYD7oFfm8LbfU7HayXoI,20672 +transformers/hyperparameter_search.py,sha256=1PGHNbFHqQD8Y0FSWgDec6OxbzJWJCJe2uWDX5r4vwE,4194 +transformers/image_processing_base.py,sha256=1WqpokgvNLkQL7IbRSTov5YDpLzCh2Ikuql-D8TufHI,24634 +transformers/image_processing_utils.py,sha256=xXfKFX_pImg_zttx-l6S2gF-Vki1mSPRG2vcm6csR5U,13587 +transformers/image_processing_utils_fast.py,sha256=SNmIKxBMZX5wzZiiSuxk92rjHURnU1ILkP21KaId9ZI,27539 +transformers/image_transforms.py,sha256=U-1qz7p90kpCF1hLuGk8H3jceCKqYwCYdz3329nv1Dg,41511 +transformers/image_utils.py,sha256=2ewxK96J0UOSQifZKfJ6pazgvOC7F_XpW2vB2guCDVs,37376 +transformers/integrations/__init__.py,sha256=W2Z-UEW2PCX17CtT2Y8-PBAUW4oCtye-IQV6dzjWXTQ,9481 +transformers/integrations/__pycache__/__init__.cpython-310.pyc,, +transformers/integrations/__pycache__/accelerate.cpython-310.pyc,, +transformers/integrations/__pycache__/aqlm.cpython-310.pyc,, +transformers/integrations/__pycache__/awq.cpython-310.pyc,, +transformers/integrations/__pycache__/bitnet.cpython-310.pyc,, +transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc,, +transformers/integrations/__pycache__/deepspeed.cpython-310.pyc,, +transformers/integrations/__pycache__/eager_paged.cpython-310.pyc,, +transformers/integrations/__pycache__/eetq.cpython-310.pyc,, +transformers/integrations/__pycache__/executorch.cpython-310.pyc,, +transformers/integrations/__pycache__/fbgemm_fp8.cpython-310.pyc,, +transformers/integrations/__pycache__/finegrained_fp8.cpython-310.pyc,, +transformers/integrations/__pycache__/flash_attention.cpython-310.pyc,, +transformers/integrations/__pycache__/flash_paged.cpython-310.pyc,, +transformers/integrations/__pycache__/flex_attention.cpython-310.pyc,, +transformers/integrations/__pycache__/fp_quant.cpython-310.pyc,, +transformers/integrations/__pycache__/fsdp.cpython-310.pyc,, +transformers/integrations/__pycache__/ggml.cpython-310.pyc,, +transformers/integrations/__pycache__/higgs.cpython-310.pyc,, +transformers/integrations/__pycache__/hqq.cpython-310.pyc,, +transformers/integrations/__pycache__/hub_kernels.cpython-310.pyc,, +transformers/integrations/__pycache__/integration_utils.cpython-310.pyc,, +transformers/integrations/__pycache__/mistral.cpython-310.pyc,, +transformers/integrations/__pycache__/mxfp4.cpython-310.pyc,, +transformers/integrations/__pycache__/npu_flash_attention.cpython-310.pyc,, +transformers/integrations/__pycache__/peft.cpython-310.pyc,, +transformers/integrations/__pycache__/quanto.cpython-310.pyc,, +transformers/integrations/__pycache__/sdpa_attention.cpython-310.pyc,, +transformers/integrations/__pycache__/sdpa_paged.cpython-310.pyc,, +transformers/integrations/__pycache__/spqr.cpython-310.pyc,, +transformers/integrations/__pycache__/tensor_parallel.cpython-310.pyc,, +transformers/integrations/__pycache__/tiktoken.cpython-310.pyc,, +transformers/integrations/__pycache__/tpu.cpython-310.pyc,, +transformers/integrations/__pycache__/vptq.cpython-310.pyc,, +transformers/integrations/accelerate.py,sha256=nEQ-TMOFaXomnuKMogrzU_J4PNCZM-Bs6GPddPGbuW0,7351 +transformers/integrations/aqlm.py,sha256=T2gpCoj62L5hkyJzm6tJlP_emhJlepezKN4y1HWueVI,4535 +transformers/integrations/awq.py,sha256=gIAEOj3Tepd_eQadBbPkeMpRlHc3it4tDFzQf8XOKF4,20579 +transformers/integrations/bitnet.py,sha256=-AQ7JCa7cOcuq4tGreVgyME_k7i3D5BVUT9OYM-tg-w,15718 +transformers/integrations/bitsandbytes.py,sha256=UR7BOYCcnYDgiyrmzXzLQ5EIdTK8HxtNlDtZMNqeVHo,23901 +transformers/integrations/deepspeed.py,sha256=ioCDBbFddzR_TcdtaI_x4r7DZw-rOcSSfx95qi1xlMQ,21805 +transformers/integrations/eager_paged.py,sha256=zX9wO8snknsWMaXephsWHM4S1yQpj24XRAm_YNmFSsI,1667 +transformers/integrations/eetq.py,sha256=wpofdy55HcvaTaOXrO_VMbmG1Rfly-kN1JfzOxw5X0U,5364 +transformers/integrations/executorch.py,sha256=ZfOXWSIKX9MhdYu5IXzWlHQm506L14lltN1P4Ytq0H0,43136 +transformers/integrations/fbgemm_fp8.py,sha256=jwTi8hC_Y12YSKktXqUktCCtl_qO-Ncx3uvIyFcni1o,12441 +transformers/integrations/finegrained_fp8.py,sha256=AlhRxjh7bjn-0DwonNrWhiXtBQiClrrF3hHJf3dlYLw,15120 +transformers/integrations/flash_attention.py,sha256=qlQMURuKfQrHTMZ7br_my73VpQKAv4iwMM9dSvUZB50,3050 +transformers/integrations/flash_paged.py,sha256=DMt5a9VwlQJNU2GIeFgdst30eRy3eauCgcS8NaMqDKY,3280 +transformers/integrations/flex_attention.py,sha256=uMt9LK-hIcvEpBSsfgOzuc5q1Pugyi2w7pUxez-yg9A,12334 +transformers/integrations/fp_quant.py,sha256=MLC4bZucw-tTDoWjQIHUbj0nEYDM702t03KPjUZxExc,1672 +transformers/integrations/fsdp.py,sha256=fIrl8PQhkQiCQ5EqJjkwCdbTRLXMwmb9qn-Klp9Gew0,1138 +transformers/integrations/ggml.py,sha256=23nJmP-TS9mNg-NkWr84uBapzvsd9w75sxELxHRqFdw,28597 +transformers/integrations/higgs.py,sha256=4s7Ym9cfiSg0PAAZOeIPt5iQkkml7s_ZZHwnKvlTe4A,31400 +transformers/integrations/hqq.py,sha256=GeTogGSqPyrgTvTHzxwt5TZhpc1vRj_lb2DdWy5BKkI,5075 +transformers/integrations/hub_kernels.py,sha256=ZjPaohcr8k8VpSL5QcfekifQm_vuR5ypOPk87ClFUk4,3872 +transformers/integrations/integration_utils.py,sha256=8HO6VkdTj4n2kAfkL8J2hrTPwWaDaFcMsv7ISbEYqIU,112904 +transformers/integrations/mistral.py,sha256=xMEkFF5aKOLcXWS_cnRXma4RMOSXn94uacKy1OVIRJU,4017 +transformers/integrations/mxfp4.py,sha256=KfbXVYmhAiI7GuyiTklV7GP08Qj3cyx7n4v1eSlnPww,18967 +transformers/integrations/npu_flash_attention.py,sha256=luMbhT_sk2yg1xbZSEjbVlwXWjJ9HCJCXypxU6kuuWc,4979 +transformers/integrations/peft.py,sha256=VevDC04v3PW738laxNr6NOhBYcAcuzKbB96eU9eACgg,28753 +transformers/integrations/quanto.py,sha256=m3tz7fCciceEe3mJc1i8GNVWcKTQ--GopPGwU4ctZ4I,4377 +transformers/integrations/sdpa_attention.py,sha256=0Wu_C8n90ZfPArKReVnS0sF7jJVVVDKqJ7TN6DoUADM,4541 +transformers/integrations/sdpa_paged.py,sha256=uv9gOkGrJk7t_YX1JyT43SFcZzpVvcHjqaSR0dSvVM8,1721 +transformers/integrations/spqr.py,sha256=nHTdlyfkCc5vJO60TMZuE9pUiTTPfaqYV7kVLF6PMd0,5525 +transformers/integrations/tensor_parallel.py,sha256=QYUCz80RfSeH4vzGWSV0PbnJaQreBmXhETNAly_MjTE,49701 +transformers/integrations/tiktoken.py,sha256=2s3O3_3dsA7pbsz1Lu_eLA2SrloMZWVpg0NklRxPMlY,1627 +transformers/integrations/tpu.py,sha256=JtzQLGX0mnci_xKVxoXPDqrAT_YLSCaw2WK-4IssCu4,1394 +transformers/integrations/vptq.py,sha256=15NwmsI95i7qcNyC-g52IfuPB2jFHBIzqt3KbUIhyEc,4544 +transformers/keras_callbacks.py,sha256=OoYb2VkmFFEqJnwEu-_Wt1VK-33oBTIpLnPrYX14abg,20634 +transformers/kernels/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +transformers/kernels/__pycache__/__init__.cpython-310.pyc,, +transformers/kernels/deta/cpu/ms_deform_attn_cpu.cpp,sha256=VcCGm9IrvgVvmyZt0KyP16Q-ONmbeg6bKwccP6KadL0,1255 +transformers/kernels/deta/cpu/ms_deform_attn_cpu.h,sha256=nvVsKj9nabQ7IaNY4di5xVx6u-0lIifQvLg2JCoxiik,1138 +transformers/kernels/deta/cuda/ms_deform_attn_cuda.cu,sha256=M5-bW9g5z-upTFMNPIfnyLAqKTxGMCjAPqBr0GmWHX8,7360 +transformers/kernels/deta/cuda/ms_deform_attn_cuda.cuh,sha256=hygB20Vh3RttOSdCuTFz8V0d3CXNp-Q89x22rYmD258,61433 +transformers/kernels/deta/cuda/ms_deform_attn_cuda.h,sha256=rPWOOMo3QyFdB5kMiexpApLFZ4dnRtx4CluEAGwsfO8,1139 +transformers/kernels/deta/cuda/ms_deform_im2col_cuda.cuh,sha256=BRN8-yfSHY8ChLij8jFl2_z2LL0LEFKuVF6Byi-YLAY,54695 +transformers/kernels/deta/ms_deform_attn.h,sha256=H2bBXGyl0R-v2DqGVz11asoRvxbjZ9iWB9djomZTpgY,1837 +transformers/kernels/deta/vision.cpp,sha256=8RvZy7P_MMx5QEszo_MwNODddJLQ8mKcmmMfgLYC_HA,798 +transformers/kernels/falcon_mamba/__init__.py,sha256=bt0j851F1uuH7flSsTvIqdh9zdKVTOVKWt3datb15SI,721 +transformers/kernels/falcon_mamba/__pycache__/__init__.cpython-310.pyc,, +transformers/kernels/falcon_mamba/__pycache__/selective_scan_with_ln_interface.cpython-310.pyc,, +transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py,sha256=649oJD0sox1I-TCkZuRMjYm3tWQkQ3VoPXLNeOcN_ss,19731 +transformers/kernels/mra/cuda_kernel.cu,sha256=LxxRYTymSoBEQpWXHA0PMzwZwpolcwX7mFAjwU8-ZMc,11678 +transformers/kernels/mra/cuda_kernel.h,sha256=UJvYq_MDzhcp07bZpYcOBn8ZGFcf_Ax1dynuiVTBvmA,1682 +transformers/kernels/mra/cuda_launch.cu,sha256=Ox5MTACriC30CGyn-g1Kb5EgQSMAZSaN6fpit3xLFWc,4072 +transformers/kernels/mra/cuda_launch.h,sha256=RVCkN_euasvgPK0zADNRvRYGWd4ah5l9X-7UG_AcdH8,707 +transformers/kernels/mra/torch_extension.cpp,sha256=N0YdBLVX0lZabckJzV_RYTHS2atCNvn13E4Ivobt25g,1405 +transformers/kernels/rwkv/wkv_cuda.cu,sha256=EvaUrEnw_qr2EjMKP-Pq7VPzFfGlMJnFhdHNLtn1fPU,6219 +transformers/kernels/rwkv/wkv_cuda_bf16.cu,sha256=DG9hTtOAlrnpDFahjt-MmnOxjMuhGU55GPsmV21HtrQ,6633 +transformers/kernels/rwkv/wkv_op.cpp,sha256=qSExhKdT6p3hyaTv5SypCnH_c7EmaX6HbhTcCntvZWg,4022 +transformers/kernels/yoso/common.h,sha256=Tq2rOUtE8Y4DRAUrRISvwIwVI3u8JBf21WgWSAYiDlQ,273 +transformers/kernels/yoso/common_cuda.h,sha256=Sji70AuVcuZSotLF7Gotmun9MJuOHo8wEkxizKXLRtc,258 +transformers/kernels/yoso/common_cuda_device.h,sha256=y6WUgAiapnMKqthRMS5s-DMSWNVkar_i8g4KPFvqiuk,2063 +transformers/kernels/yoso/fast_lsh_cumulation.cu,sha256=LA4LGNgyXT3osIyQtFBcRanSyNQWm8yqmpz7AeLP7cw,19061 +transformers/kernels/yoso/fast_lsh_cumulation.h,sha256=1cTWZjOm751HGiEB5P-UPJ8SE1VO7XRyXmBgyxYDyjI,1575 +transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu,sha256=HKGLWl-WFz5BXjaAPHTNTbG6IUkJjhBdvFf2K7hrDVQ,32870 +transformers/kernels/yoso/fast_lsh_cumulation_cuda.h,sha256=_KGI8HQbVFtCN5KAcSGpyiJ2foGi26RKen138CUc2fY,5490 +transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp,sha256=-Rh7o39Z3rtOPwNnEM-c51TCqywpVdK0WVaA7VRrXbQ,3154 +transformers/loss/__init__.py,sha256=qETsqCwayu6Ymj_J4_A_eiwiaMRHQ0noWKM35naanzc,606 +transformers/loss/__pycache__/__init__.cpython-310.pyc,, +transformers/loss/__pycache__/loss_d_fine.cpython-310.pyc,, +transformers/loss/__pycache__/loss_deformable_detr.cpython-310.pyc,, +transformers/loss/__pycache__/loss_for_object_detection.cpython-310.pyc,, +transformers/loss/__pycache__/loss_grounding_dino.cpython-310.pyc,, +transformers/loss/__pycache__/loss_rt_detr.cpython-310.pyc,, +transformers/loss/__pycache__/loss_utils.cpython-310.pyc,, +transformers/loss/loss_d_fine.py,sha256=pyVihlU1CQraOzUjFLrPXIsVSHxHhCun2SIzvOZFEDs,15881 +transformers/loss/loss_deformable_detr.py,sha256=pUwwrAVxEwa2qamyoTIqlxpll_rBTXCOn67bW73ZKuc,7321 +transformers/loss/loss_for_object_detection.py,sha256=fZuLWKzaCtGvCmlpevpHnIGp4BFIPJIIU4GcFqDO7r0,24581 +transformers/loss/loss_grounding_dino.py,sha256=Efh5GmRzZHjK3ZoNCNCRhU1GV9pcBtHDKxFbrJwr3K0,11190 +transformers/loss/loss_rt_detr.py,sha256=rGk8fFh1qoPgsRL0-vHw3FrjL3wRNV81-XQTFrElTeM,22130 +transformers/loss/loss_utils.py,sha256=Mq7QqPrxsbYYMApR0eT8NWSgap9PTsETCyfx-g5PVls,6936 +transformers/masking_utils.py,sha256=4pr-XHBYUGsiwONeXv2Lt0nc_C558yZD1EKz5ZCI63A,58122 +transformers/model_debugging_utils.py,sha256=l-BY-PJEoamDQyxSuzK5rTMG3LzhdzaZP280yv9Rt2Y,17052 +transformers/modelcard.py,sha256=LGTf5iHPFz6bC1celbGk21gabAr9baqC2Jph_bBMYjs,35854 +transformers/modeling_attn_mask_utils.py,sha256=oIEM72sNYJO_2qNJs63eLmM_06a-UHPYC14IsvEsNUs,21517 +transformers/modeling_flash_attention_utils.py,sha256=U8_sNNv-RTf2R__n-xm3ma2tncqcr72zJTd_c10LzPA,31551 +transformers/modeling_flax_outputs.py,sha256=1RQh6VTIIVgh2OME-EkUdJc2NdBi5TEXBHCFCupFASs,42188 +transformers/modeling_flax_pytorch_utils.py,sha256=rhewsql7OtuXPqpORVigrGzw8sAPcTK7D0z4lWwAYz0,21550 +transformers/modeling_flax_utils.py,sha256=TbaDBlsD6A4nh9xkeBW1H7bxYcxsQXA4ZesXY2ReJSc,61240 +transformers/modeling_gguf_pytorch_utils.py,sha256=uRgngKGMbKEl6E-cUVw8acjHtv4wCkD-kyMA8fFtWOU,20359 +transformers/modeling_layers.py,sha256=YCqIvJ_FCnuC0Zds_Tf4WgNAO9tszajt6cj1wNo-cwo,11483 +transformers/modeling_outputs.py,sha256=sdgyoU6iT2PxgcutfkEL0VpDCCgzIh_2mAhZpsf9NX4,109642 +transformers/modeling_rope_utils.py,sha256=ORGA2w1ed5aS21nB2SDRvYZFUTMrem-23VrYmEo6mjY,30455 +transformers/modeling_tf_outputs.py,sha256=6THINWzeA-DZyxAdvE3lu3p3h4ZpeBl8sjrOhj9wb9Y,56248 +transformers/modeling_tf_pytorch_utils.py,sha256=Q7-5aDeI2ec3-NYgH6Fp9OpAMmymqyp1mDZGWYve_Io,27982 +transformers/modeling_tf_utils.py,sha256=Rqc7BVljDxocTRTXu2IuiUtpMAFijjtxXx46dZIDsbE,166141 +transformers/modeling_utils.py,sha256=h7kzn6tNz36N79hBpU8oV2Y6Cnx1fZkDMmhpYoo7VRU,308695 +transformers/models/__init__.py,sha256=mMUzXybMj3mjU-QMrzFzAdjn73-Wd4oXKYpWlBqxy8w,10680 +transformers/models/__pycache__/__init__.cpython-310.pyc,, +transformers/models/aimv2/__init__.py,sha256=cDli19QT_YABtn4DPLYfoWHtkmOQYGipAgPKGuRje4c,991 +transformers/models/aimv2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/aimv2/__pycache__/configuration_aimv2.cpython-310.pyc,, +transformers/models/aimv2/__pycache__/modeling_aimv2.cpython-310.pyc,, +transformers/models/aimv2/__pycache__/modular_aimv2.cpython-310.pyc,, +transformers/models/aimv2/configuration_aimv2.py,sha256=K0yaVDpIlBtlYi8xDfhpU7ndTASqTu8nV_rCoWAAAyM,13740 +transformers/models/aimv2/modeling_aimv2.py,sha256=02H3E4lpv9cKDNs0d1_lt6MHmyrkY_RxwTlkXSHY5b8,33340 +transformers/models/aimv2/modular_aimv2.py,sha256=rjPSN1r4WY4gdFnIby-pCNBbRIhYDo3iyX5saLmIsSU,29181 +transformers/models/albert/__init__.py,sha256=WjQ4NtNxKNj7Hvk9lA2OXmdgD_SNFp1wLS2eeL3-WoE,1154 +transformers/models/albert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/albert/__pycache__/configuration_albert.cpython-310.pyc,, +transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc,, +transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc,, +transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc,, +transformers/models/albert/__pycache__/tokenization_albert.cpython-310.pyc,, +transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc,, +transformers/models/albert/configuration_albert.py,sha256=1jz6sQm_Ki_o_EHJj7mzULanRt3xFoPv3tt_rQg6Ct4,8162 +transformers/models/albert/modeling_albert.py,sha256=UeUK-zmo7Gz66IJaJJSsSMyVg4dIWDoN8XGENjX4v0k,58338 +transformers/models/albert/modeling_flax_albert.py,sha256=-QrqU89tM44jANMS-haQlSARp2uxVuUXCpUnlE8lT58,41035 +transformers/models/albert/modeling_tf_albert.py,sha256=nfIkDUf5d0ORzk1vG-3F-PWTyLSt-7uIUsBpdR9r_wg,68993 +transformers/models/albert/tokenization_albert.py,sha256=kV5S_i-EPu2mZ8f1tr7T-IRsd_W_eshGVGO_veSgQi8,13391 +transformers/models/albert/tokenization_albert_fast.py,sha256=m4Xl3Pb038gBQzlGPAvWFf2G3LrEDdPWhASLBrLucz8,7609 +transformers/models/align/__init__.py,sha256=QqTKk-Z4BylY6EkBSlYvKXVhT2te-m2Al626OUAz-r4,1027 +transformers/models/align/__pycache__/__init__.cpython-310.pyc,, +transformers/models/align/__pycache__/configuration_align.cpython-310.pyc,, +transformers/models/align/__pycache__/modeling_align.cpython-310.pyc,, +transformers/models/align/__pycache__/processing_align.cpython-310.pyc,, +transformers/models/align/configuration_align.py,sha256=T3yiaW_5C9VNHouwd-FGLgwFJd8M8h-a9gBIod0Ys2c,15999 +transformers/models/align/modeling_align.py,sha256=CJaKVRKPFyYRu2HS8fhMWnJc51B2ZVv1nc67FBjeHdI,52106 +transformers/models/align/processing_align.py,sha256=40kW253u0x1k1ifur0qjzTYzeltzrdiq6nzpp4Fkci8,7137 +transformers/models/altclip/__init__.py,sha256=405IijUCYr1EGvOqg1xzds_GHOlxCl0HCsf1rI0wtPY,1033 +transformers/models/altclip/__pycache__/__init__.cpython-310.pyc,, +transformers/models/altclip/__pycache__/configuration_altclip.cpython-310.pyc,, +transformers/models/altclip/__pycache__/modeling_altclip.cpython-310.pyc,, +transformers/models/altclip/__pycache__/processing_altclip.cpython-310.pyc,, +transformers/models/altclip/configuration_altclip.py,sha256=Usb29EWjpdtZATW-jxZ8VBmms57H2v4UtoGtQ_mvYB4,18498 +transformers/models/altclip/modeling_altclip.py,sha256=KvagnDiw4P0vSsA_mAuGnyZLxe2QAoYjvFrEGJ7wVoI,61585 +transformers/models/altclip/processing_altclip.py,sha256=UGgcbBwUR-ORmf0N3a0FaQtM3FS4NNdUuBguQbB9_lU,6926 +transformers/models/arcee/__init__.py,sha256=bysIumYEa1Z1bCLBaaP_SCT_6poh8zFLgxt_4Ib-Diw,1009 +transformers/models/arcee/__pycache__/__init__.cpython-310.pyc,, +transformers/models/arcee/__pycache__/configuration_arcee.cpython-310.pyc,, +transformers/models/arcee/__pycache__/modeling_arcee.cpython-310.pyc,, +transformers/models/arcee/__pycache__/modular_arcee.cpython-310.pyc,, +transformers/models/arcee/configuration_arcee.py,sha256=HxmzfCnqMmC-NNv8z6K-_v8CbuvsRazQmjz3Ij7d_Y4,10760 +transformers/models/arcee/modeling_arcee.py,sha256=LmmxiM4bUiRfH0QoJ79LkeyWIhuduVrNiGNcwPzmG5g,21065 +transformers/models/arcee/modular_arcee.py,sha256=LuFdyhIVyweFhWfdHk1t1R0OCi5qvwc624WQhFMu4EI,10129 +transformers/models/aria/__init__.py,sha256=I3vYPjV-sDl0OAILLADGZ7hUkk9ZsmyZ8CEf9tie_dY,1066 +transformers/models/aria/__pycache__/__init__.cpython-310.pyc,, +transformers/models/aria/__pycache__/configuration_aria.cpython-310.pyc,, +transformers/models/aria/__pycache__/image_processing_aria.cpython-310.pyc,, +transformers/models/aria/__pycache__/modeling_aria.cpython-310.pyc,, +transformers/models/aria/__pycache__/modular_aria.cpython-310.pyc,, +transformers/models/aria/__pycache__/processing_aria.cpython-310.pyc,, +transformers/models/aria/configuration_aria.py,sha256=OfA722tTQm4EEsYxmZM-Wm12cnoH8sQ702bHQSBSBEw,16425 +transformers/models/aria/image_processing_aria.py,sha256=pX14AS5XemXSUgAvcDqSh5VdTun7WW80bTtcSIMSZQ8,24752 +transformers/models/aria/modeling_aria.py,sha256=bfnUnQUstea1V7Dh_VTOMLnjUKgnPeeot4sKd_oq_vM,52199 +transformers/models/aria/modular_aria.py,sha256=VEbfNvhLFTqEbkaiIsArQ-8OsPjL_yJcc5wyCiwPkjA,71497 +transformers/models/aria/processing_aria.py,sha256=GZOl7CP6vdL88ZTJgYGI5ydj1wnc4kqRV45v5BcEYfA,9903 +transformers/models/audio_spectrogram_transformer/__init__.py,sha256=a_YVwB1p4_PPeqPFWqFsGSGSQVTaSUXY0xsOd_Gflqs,1107 +transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc,, +transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc,, +transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc,, +transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py,sha256=HAhLugn_E6Ajr3-3n3qohG5ifAPqNfSuucQ0B2S7tCM,5901 +transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py,sha256=vPBynKfjgNCc2c6T3kd5AKxsstjKBI5Z-oNwtRAH7VY,9929 +transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py,sha256=dn5ZB_yRbp3w69HGfRfRxHK4RWDQCrDkDgr3aNJbvPM,25194 +transformers/models/auto/__init__.py,sha256=wX3m7QJXMmkNMTL6ef7HH18vXdZ0cgUIkHgpVLpGZ_4,1292 +transformers/models/auto/__pycache__/__init__.cpython-310.pyc,, +transformers/models/auto/__pycache__/auto_factory.cpython-310.pyc,, +transformers/models/auto/__pycache__/configuration_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/feature_extraction_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/image_processing_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/modeling_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/modeling_flax_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/modeling_tf_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/processing_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/tokenization_auto.cpython-310.pyc,, +transformers/models/auto/__pycache__/video_processing_auto.cpython-310.pyc,, +transformers/models/auto/auto_factory.py,sha256=JciN8MikivUw9VpYE-DkwNvowMR2Ofw0ig-isUe_lBM,47034 +transformers/models/auto/configuration_auto.py,sha256=Xqukrirr3I_y1NQo6BLgVgsbb1CRarVPU2wOLKMlOAo,51774 +transformers/models/auto/feature_extraction_auto.py,sha256=7iH2sHF7VNfo_Gq03jbuZU4w1UJ6umOdbqyPfJ2KLhY,20347 +transformers/models/auto/image_processing_auto.py,sha256=UEoJgG2cRiUQkn3Y5ty0sAT1YGCgl4kui4zLXuU1oh8,38395 +transformers/models/auto/modeling_auto.py,sha256=4TZLBzSIhcClwv_xyZSyfFHNEddY8leehqsIASg95vw,94334 +transformers/models/auto/modeling_flax_auto.py,sha256=jljyZ4H_wWjcxuVbLUDtO0acB104wm78aXyVNeGu_Zk,15709 +transformers/models/auto/modeling_tf_auto.py,sha256=YWaGWUmrGNg5eieun1OTG_EmtzWy8CU_Ebt9gw6mxyw,30313 +transformers/models/auto/processing_auto.py,sha256=aZvk-OCJe4ZVXysIQeUg7wnPR0x8yHc9RiItvna_At4,20389 +transformers/models/auto/tokenization_auto.py,sha256=KaAb988Slhp4ASAwWk-wgvNQNeDa1tpdCmcqVCAipgQ,56552 +transformers/models/auto/video_processing_auto.py,sha256=eCs5Rk9KH9ugBHlLSt7vGQz741vK622K98jvz0HaYWQ,19067 +transformers/models/autoformer/__init__.py,sha256=EzGIA8hECx9XytdzTifaGyGp7hrXqlyP0slqAq8xBNY,1001 +transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc,, +transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc,, +transformers/models/autoformer/configuration_autoformer.py,sha256=hSn6Waq6CuyDFOxAecr9IhFaq4fEAEHn0uiaC_tsa3s,12192 +transformers/models/autoformer/modeling_autoformer.py,sha256=r5Nsp3H5qRMEY-TgKkzP1Q4mEh8iuyGiliLEXZZKMBk,106649 +transformers/models/aya_vision/__init__.py,sha256=-DIHmMjkXOyNGbMtZJkHtLiOzdxOYSrKq4_mmR09cfk,1042 +transformers/models/aya_vision/__pycache__/__init__.cpython-310.pyc,, +transformers/models/aya_vision/__pycache__/configuration_aya_vision.cpython-310.pyc,, +transformers/models/aya_vision/__pycache__/modeling_aya_vision.cpython-310.pyc,, +transformers/models/aya_vision/__pycache__/modular_aya_vision.cpython-310.pyc,, +transformers/models/aya_vision/__pycache__/processing_aya_vision.cpython-310.pyc,, +transformers/models/aya_vision/configuration_aya_vision.py,sha256=tkEfVo-n7XC4cU2UeGZGLLSoHqpNN-3pxTuRoN_Ky_Q,4886 +transformers/models/aya_vision/modeling_aya_vision.py,sha256=iUJB1_YOSdJlV_ttmyN3R63_hhMYPbyFj_VT5OufREQ,23503 +transformers/models/aya_vision/modular_aya_vision.py,sha256=qsWjbo5m7xb_sxZ1MvnBXWTg47zVDVzcnxNauLwcrmY,13277 +transformers/models/aya_vision/processing_aya_vision.py,sha256=kR5SLJaXY6IoJS9moJhMGWTlQHoivw1WetD8EwP6lvU,12789 +transformers/models/bamba/__init__.py,sha256=gtebRUrAdiwq-rJmlM5qpbtbGEg-xxA3pjivOHJvaRs,1040 +transformers/models/bamba/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bamba/__pycache__/configuration_bamba.cpython-310.pyc,, +transformers/models/bamba/__pycache__/modeling_bamba.cpython-310.pyc,, +transformers/models/bamba/__pycache__/modular_bamba.cpython-310.pyc,, +transformers/models/bamba/configuration_bamba.py,sha256=zo-wvX5wz8wWepdJLwQnm2yyZdpHx0lMsE85Quf8RYE,10134 +transformers/models/bamba/modeling_bamba.py,sha256=oo9zrnSO-aXWX53uxSsT5Y6sxo5u6dR5iFAQp0VyW9s,69815 +transformers/models/bamba/modular_bamba.py,sha256=6_lbGBsOQV0ejY22KD3-R3dfMzMlTcdfieg2grgBLFw,55273 +transformers/models/bark/__init__.py,sha256=fIlOQ6RPBARVhUKdjNx2Nvf09azEI6AiPv3lyWjk0Gc,1024 +transformers/models/bark/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bark/__pycache__/configuration_bark.cpython-310.pyc,, +transformers/models/bark/__pycache__/generation_configuration_bark.cpython-310.pyc,, +transformers/models/bark/__pycache__/modeling_bark.cpython-310.pyc,, +transformers/models/bark/__pycache__/processing_bark.cpython-310.pyc,, +transformers/models/bark/configuration_bark.py,sha256=p9Upfi8NXf4bHj9d2C75qseupR7X7F3JYWFATkVFh7c,11907 +transformers/models/bark/generation_configuration_bark.py,sha256=cI5vwf3ll9YIBKiXpb7HKZwu1-wDrhnlktpYy8i9X94,14955 +transformers/models/bark/modeling_bark.py,sha256=nMQXsLAGfJNlNfMc78DNuaJzM1NUwBKXl4-szUru1P0,72422 +transformers/models/bark/processing_bark.py,sha256=2Vu4KGa57Ruqwxc60fYYW_uKsYQ3FffZaUwIn3YEVX4,13834 +transformers/models/bart/__init__.py,sha256=1_kCOlvj4hcCbNiAsAhH0PYAK4zopuVKAYKZ_64O3_c,1142 +transformers/models/bart/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bart/__pycache__/configuration_bart.cpython-310.pyc,, +transformers/models/bart/__pycache__/modeling_bart.cpython-310.pyc,, +transformers/models/bart/__pycache__/modeling_flax_bart.cpython-310.pyc,, +transformers/models/bart/__pycache__/modeling_tf_bart.cpython-310.pyc,, +transformers/models/bart/__pycache__/tokenization_bart.cpython-310.pyc,, +transformers/models/bart/__pycache__/tokenization_bart_fast.cpython-310.pyc,, +transformers/models/bart/configuration_bart.py,sha256=0BemB9DKkzjpDV-39iIC96OkQHY9sevzmYUmWdG5fHg,18871 +transformers/models/bart/modeling_bart.py,sha256=UJ7l2R1eXl3dIR36KlHufHG6TmuzWeyOTjSQvbZM2oE,89700 +transformers/models/bart/modeling_flax_bart.py,sha256=T8dSDHTYEZBOhc5MAgmtCal_ns_hFCOn98b9dPS3Tho,83070 +transformers/models/bart/modeling_tf_bart.py,sha256=Kb_NgfqI4LdvaLE4Ji6vzTqc9P7zdY7ijjr0osDd0lQ,80645 +transformers/models/bart/tokenization_bart.py,sha256=kSDfbiku7CuiLkGRu2WN4rvk4Ub-fIyM7h1tw8W4Ids,16265 +transformers/models/bart/tokenization_bart_fast.py,sha256=KT0ISbLlAUn8i77zti_Oe3yebxVSM65gXGMFS3PaeU8,11275 +transformers/models/barthez/__init__.py,sha256=21WBGVafx-0kV-K_2jBdpBg0NBWsRKJqJowo03g2S9A,1003 +transformers/models/barthez/__pycache__/__init__.cpython-310.pyc,, +transformers/models/barthez/__pycache__/tokenization_barthez.cpython-310.pyc,, +transformers/models/barthez/__pycache__/tokenization_barthez_fast.cpython-310.pyc,, +transformers/models/barthez/tokenization_barthez.py,sha256=_uwi3euB_QpCr9lakhVXRWMK0G-RYTD5WyLhbI4qF6Y,12160 +transformers/models/barthez/tokenization_barthez_fast.py,sha256=7gExI5ls2M0YNtk7Dp5AHW1PAggk5zMzz2tKtQ3x54s,7721 +transformers/models/bartpho/__init__.py,sha256=DN0zgU4dM841Kqqo6wN8FpWFeWYHCBxIq3lxrg5vUoU,958 +transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc,, +transformers/models/bartpho/tokenization_bartpho.py,sha256=fpW_x46y9RWaXd3i1aWRWZN-hAeXnph8DzzLwPWwf10,13619 +transformers/models/beit/__init__.py,sha256=t99cV1TicuPrQlZaHjwkrEi5d7tMQeK7TTooGJIn6-Q,1157 +transformers/models/beit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc,, +transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc,, +transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc,, +transformers/models/beit/__pycache__/image_processing_beit_fast.cpython-310.pyc,, +transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc,, +transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc,, +transformers/models/beit/configuration_beit.py,sha256=zT9actpT-E-p_5LLb6aDvYM8xClvu2pddJWK6wlIfgU,11602 +transformers/models/beit/feature_extraction_beit.py,sha256=I3Hxy2MRCaAr0m4taNn5y8_9_fAXCNpcYZi6gQa5tXY,1284 +transformers/models/beit/image_processing_beit.py,sha256=tnbamvPrXoZIeiIqJb3OJjqPcn2tjCUJ5R9Ctxgho4s,24074 +transformers/models/beit/image_processing_beit_fast.py,sha256=-SKWE4nhnoEmNCLIgdRNgUi7UIpwy99a-b_hE4RMRqQ,9211 +transformers/models/beit/modeling_beit.py,sha256=U4H9ojR_s0c7wVSkvq-H48IRfK8mQ9duDbUYdrhmybM,65827 +transformers/models/beit/modeling_flax_beit.py,sha256=g6QwQOBdYd5kheWIzaO7Xpok4MFPJWwtopojVj5jLfU,37136 +transformers/models/bert/__init__.py,sha256=8IqoRT5cO4DU3GmQHsJgW-n6MclOZTmho5VYkKDMbnU,1182 +transformers/models/bert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bert/__pycache__/configuration_bert.cpython-310.pyc,, +transformers/models/bert/__pycache__/modeling_bert.cpython-310.pyc,, +transformers/models/bert/__pycache__/modeling_flax_bert.cpython-310.pyc,, +transformers/models/bert/__pycache__/modeling_tf_bert.cpython-310.pyc,, +transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc,, +transformers/models/bert/__pycache__/tokenization_bert_fast.cpython-310.pyc,, +transformers/models/bert/__pycache__/tokenization_bert_tf.cpython-310.pyc,, +transformers/models/bert/configuration_bert.py,sha256=dv6OswIVpNUrWtI7WmM3XaAA8C8ZB-S3Lzs5Jl9LkVk,7314 +transformers/models/bert/modeling_bert.py,sha256=ELW-2aZOJO3KQY8VBQVlQgp1t0ZupRwyqrEM4xdDJao,78498 +transformers/models/bert/modeling_flax_bert.py,sha256=xhjDVfsHDHsdFNHwjNRwjHq9wQW5usH-iKpINjBQ7SQ,64027 +transformers/models/bert/modeling_tf_bert.py,sha256=e7HT05UokKQ8fhpOwvksEcSGY0HDsoBAc7Nzb64xOik,94415 +transformers/models/bert/tokenization_bert.py,sha256=Ffkso5F6UuKyFQ_4Ao4op0k9o1Df90qeA9IJpYj2t98,19766 +transformers/models/bert/tokenization_bert_fast.py,sha256=QE60mWbUbQf8D96L5evCqqrN0hRbKz6LKXhg2Hf8T_A,6557 +transformers/models/bert/tokenization_bert_tf.py,sha256=jmvu68QDk-uMMGM3cHF_1n4PtAMf-PLmgk3xtMBzC90,12060 +transformers/models/bert_generation/__init__.py,sha256=sLEyyFf2yI6QflP1lTI9LXUF5PvWBvu-fsaFbjund5I,1059 +transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc,, +transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc,, +transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc,, +transformers/models/bert_generation/configuration_bert_generation.py,sha256=KHse7kMgoXgcldz0LMonkb6mmNoVRbQ2U07Q3_p6_fI,6393 +transformers/models/bert_generation/modeling_bert_generation.py,sha256=JudC35HheTinO2BgeGBLcpoBx75YdEheaAOzSK37Cqo,39581 +transformers/models/bert_generation/tokenization_bert_generation.py,sha256=yu2PgCmCenfHfutzdLJioGoI2_8r8dbqBL7WmJ6JTZs,7179 +transformers/models/bert_japanese/__init__.py,sha256=94xfgVPnIQuHQxvmc55_EedJlJQTnHiL4va6Ry6x3LE,964 +transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc,, +transformers/models/bert_japanese/tokenization_bert_japanese.py,sha256=-ehwXShgMWynj6owRC5JCJJOusGVpUFCejcuvbVxPrU,37815 +transformers/models/bertweet/__init__.py,sha256=EZegs0rWTTCiOC_eY-M8eV7bCcwU60dB0HsM1S1VDzQ,959 +transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc,, +transformers/models/bertweet/tokenization_bertweet.py,sha256=9WQZwFonhHf2CvY-r7Xa4TG3x_eLY4BZrNHvmA1hdKg,27007 +transformers/models/big_bird/__init__.py,sha256=3rloOuQNKURURWgk5Td4OBQBAzBdTJ2_fM_CI6yPrV0,1126 +transformers/models/big_bird/__pycache__/__init__.cpython-310.pyc,, +transformers/models/big_bird/__pycache__/configuration_big_bird.cpython-310.pyc,, +transformers/models/big_bird/__pycache__/modeling_big_bird.cpython-310.pyc,, +transformers/models/big_bird/__pycache__/modeling_flax_big_bird.cpython-310.pyc,, +transformers/models/big_bird/__pycache__/tokenization_big_bird.cpython-310.pyc,, +transformers/models/big_bird/__pycache__/tokenization_big_bird_fast.cpython-310.pyc,, +transformers/models/big_bird/configuration_big_bird.py,sha256=qb_lyze6oqg-PSGaAOSUQkHyeg8ApLGP0c76lZ-aFMI,7892 +transformers/models/big_bird/modeling_big_bird.py,sha256=tQk4N9CeAohDpRmHr7kFM2g8OK6n7FcczI86zOIoUJU,129991 +transformers/models/big_bird/modeling_flax_big_bird.py,sha256=ZNo_0dB2U0PWW2sX_sbg3PBC7DpQv5RMfdpES-ZzTUM,109894 +transformers/models/big_bird/tokenization_big_bird.py,sha256=h0RLGmqUBycIt1lo1gMDd3DNVP4dEz__sF5E4XY23yw,13249 +transformers/models/big_bird/tokenization_big_bird_fast.py,sha256=EmJBuqogH76ZeE9ZjX3DIAloL-MyT-Bl7PpmozcntfA,8946 +transformers/models/bigbird_pegasus/__init__.py,sha256=7zOl1EhO8W2S9jE0FsyEoW8kV6yn5bLA0dspGFM1mLQ,1011 +transformers/models/bigbird_pegasus/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bigbird_pegasus/__pycache__/configuration_bigbird_pegasus.cpython-310.pyc,, +transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc,, +transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py,sha256=KCIddfmLPMNb3LIrJY9xSCMasUjRHP6WB-jnedBOeNI,19323 +transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py,sha256=JtYnYhbmgCmz5CvrpqSpEmcQy_f5BaEZVmL34iiJcAE,141882 +transformers/models/biogpt/__init__.py,sha256=pZxVjmVzt7FXlkMO_5fMg01eyPvvHYXmDA33MKhp6Yk,1032 +transformers/models/biogpt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/biogpt/__pycache__/configuration_biogpt.cpython-310.pyc,, +transformers/models/biogpt/__pycache__/modeling_biogpt.cpython-310.pyc,, +transformers/models/biogpt/__pycache__/modular_biogpt.cpython-310.pyc,, +transformers/models/biogpt/__pycache__/tokenization_biogpt.cpython-310.pyc,, +transformers/models/biogpt/configuration_biogpt.py,sha256=t544SePN3AO-BjY3nzMW4CH-lMAdxs1TombK682z4fI,6215 +transformers/models/biogpt/modeling_biogpt.py,sha256=uebmwipr3wgWB8xLcBeH-CdJCfMMlZJnETrsDR-hS4k,42209 +transformers/models/biogpt/modular_biogpt.py,sha256=E_hd-bfD9sjKf47ZZGY7ChcdlWfl2pN6UiS-OWzfE94,33739 +transformers/models/biogpt/tokenization_biogpt.py,sha256=8Lh_IRa00R-tyz6kAF4zTr48Yl6AcCfvG54ysueGsVU,12157 +transformers/models/bit/__init__.py,sha256=I9z2RYsPRokD1ycMBRLaesbyMKK4MwLPM5oTles2KmQ,1072 +transformers/models/bit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc,, +transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc,, +transformers/models/bit/__pycache__/image_processing_bit_fast.cpython-310.pyc,, +transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc,, +transformers/models/bit/configuration_bit.py,sha256=wZFP76CJYV7Hn-M4aSRmXn-SxXAsmUUHOLNHP6By6lI,6295 +transformers/models/bit/image_processing_bit.py,sha256=YW-8pzn8uS2Mr0Nof3QBZxuAhXmDCpaNxaWXFJdl_VU,15912 +transformers/models/bit/image_processing_bit_fast.py,sha256=JY4UL4OH2nQ8S66PyIYQaLFFjfhc7rIPaA-hCgmAo6Y,1327 +transformers/models/bit/modeling_bit.py,sha256=-v5t7o2LoH6j9OpERK3njL4FoBvEqex8cl0NcX_akBU,29615 +transformers/models/bitnet/__init__.py,sha256=0u3B40Xd6dJ7J7TBxJzSQWcyUe2ZWJTbT6iaWVod_-A,1018 +transformers/models/bitnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bitnet/__pycache__/configuration_bitnet.cpython-310.pyc,, +transformers/models/bitnet/__pycache__/modeling_bitnet.cpython-310.pyc,, +transformers/models/bitnet/__pycache__/modular_bitnet.cpython-310.pyc,, +transformers/models/bitnet/configuration_bitnet.py,sha256=H5hN7Qc_TWBcMr3xmF04oRrg36gd_IIs_X-wUMTs47k,6652 +transformers/models/bitnet/modeling_bitnet.py,sha256=DSCNzTt3I_UXgrN9Dv3--wyyKm56EW4PkXJSgA2O0AU,21296 +transformers/models/bitnet/modular_bitnet.py,sha256=0BNK__Ebgjm-FJ2RCU_rYULfDqEJ9zIDRKvuHMvQz2g,5753 +transformers/models/blenderbot/__init__.py,sha256=kdNRND4x54J18VhDVLH6usun5IblSN_9NYaLZfvaysc,1178 +transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc,, +transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc,, +transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc,, +transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc,, +transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc,, +transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc,, +transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc,, +transformers/models/blenderbot/configuration_blenderbot.py,sha256=NRqxofcj8VdtIozBg7xniPOiyFy1fb3MX4STphPVB8A,18881 +transformers/models/blenderbot/modeling_blenderbot.py,sha256=BlxfhFWYz-G1ZZ4GcGIkAuZ9nqFIGZzTJsqh1jRZP5k,73415 +transformers/models/blenderbot/modeling_flax_blenderbot.py,sha256=ETs29jtKfDjSBL-y7VJcdnHPHu_OGsvr4U5vmeVtRo8,65181 +transformers/models/blenderbot/modeling_tf_blenderbot.py,sha256=zzZt4rlKifDEhE7H-EsSDbavzBRCZl2XJE6mQgOe7-4,72662 +transformers/models/blenderbot/tokenization_blenderbot.py,sha256=sfqSGJEl-owb9-MHxG97G4BQdB6rSv9d17y7xlbLJCw,18223 +transformers/models/blenderbot/tokenization_blenderbot_fast.py,sha256=Bn6KZaUr2-LwmvHQmOMo6TfqwLxbm_-AXarQw4iP9bQ,12448 +transformers/models/blenderbot_small/__init__.py,sha256=QsmmBSPdTC43EIyYBwo-xTyJjLLVqm4Cx-KFJ9O2mfE,1214 +transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc,, +transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc,, +transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc,, +transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc,, +transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc,, +transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc,, +transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc,, +transformers/models/blenderbot_small/configuration_blenderbot_small.py,sha256=mok0lacLLkSC3LlQ6C6UxDBHZRBWW1pEUGyjAbamVco,18323 +transformers/models/blenderbot_small/modeling_blenderbot_small.py,sha256=7BafIrBo_I6R5-XFM8kNP2u8kWr1dOB4_cZvxmHDWGk,71661 +transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py,sha256=4gn7JguFgvff3qE1Yspggo998Ai1ycs6CwJFXJv9zok,66171 +transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py,sha256=9yF-h3X3_ZH-YC4R_WTi4VuQRV8vupHwP6AaNrrs3B4,71604 +transformers/models/blenderbot_small/tokenization_blenderbot_small.py,sha256=ygaUGnIJLrPKz1jOJY9tgL97t9-OUMFegcNJXm9wnRU,7945 +transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py,sha256=v28mfjk2dPGKH3YLRhol45ZsYFqXTfTCXlPtk4MK9-A,3361 +transformers/models/blip/__init__.py,sha256=aWgKd8B53KWjNBpR7xREMajO43tIo4sRcEjZUGMt8TI,1226 +transformers/models/blip/__pycache__/__init__.cpython-310.pyc,, +transformers/models/blip/__pycache__/configuration_blip.cpython-310.pyc,, +transformers/models/blip/__pycache__/image_processing_blip.cpython-310.pyc,, +transformers/models/blip/__pycache__/image_processing_blip_fast.cpython-310.pyc,, +transformers/models/blip/__pycache__/modeling_blip.cpython-310.pyc,, +transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc,, +transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc,, +transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc,, +transformers/models/blip/__pycache__/processing_blip.cpython-310.pyc,, +transformers/models/blip/configuration_blip.py,sha256=_i_EKea4_h_16az8_o0jL9-Os0nXbP6iNwQaqrlnz44,14430 +transformers/models/blip/image_processing_blip.py,sha256=Bv4cRkjlhZXpYBQwABpD_LD9wBNXjD9M2wBrayil3Kw,15277 +transformers/models/blip/image_processing_blip_fast.py,sha256=0gEkLRg06PJYQk6gpm15N3nQsPZtstosy_kxMkY8CS8,1312 +transformers/models/blip/modeling_blip.py,sha256=Tvh3B4Ox2BQbShLSTDWpzEJLeYAsbn8sTrGyYfRZpIM,59264 +transformers/models/blip/modeling_blip_text.py,sha256=vJBBIV9MtnAi0kX1-D4f7ukK-dUKncTrYWFWAQq6fY8,45055 +transformers/models/blip/modeling_tf_blip.py,sha256=M1B1RZxL9JDZIvhM05QMI0VL6Y3tKqwZdvT9FJM58xE,71427 +transformers/models/blip/modeling_tf_blip_text.py,sha256=asEmINloo3IGhWYpwzPsj7IxjMlOlxe5cDGO3yZpHXU,49960 +transformers/models/blip/processing_blip.py,sha256=MQCbaYChjgNax2cUvYcx4MsXGIfcLzo6kI0KAB0kQ7M,5897 +transformers/models/blip_2/__init__.py,sha256=kj_6H0rQ7dLoQk-COIb06LlDRnbORu3GLU3m4EdMkAM,1030 +transformers/models/blip_2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/blip_2/__pycache__/configuration_blip_2.cpython-310.pyc,, +transformers/models/blip_2/__pycache__/modeling_blip_2.cpython-310.pyc,, +transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc,, +transformers/models/blip_2/configuration_blip_2.py,sha256=Ysb_dx8tf5TRrNw3vIjwp1xuKQF4-L_U2TyUOZdepR8,16177 +transformers/models/blip_2/modeling_blip_2.py,sha256=xpQSG89xK7zij-o11od1pCErugjpy_I_PHgwSjRvO5U,104943 +transformers/models/blip_2/processing_blip_2.py,sha256=N-UuctuRoN0_4DOzloGcLpIATeLngi0iv3kgGbwGK1Y,8324 +transformers/models/bloom/__init__.py,sha256=lcq09Py2vSezUf26aaBG4yp2DpLZ-mAPt-fybvY_C-Q,1073 +transformers/models/bloom/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc,, +transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc,, +transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc,, +transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc,, +transformers/models/bloom/configuration_bloom.py,sha256=O9X_juvNPqEuxlwJQN-jPJqj8d0SROTHERYlSCEf_C4,10216 +transformers/models/bloom/modeling_bloom.py,sha256=EFFQx3fGICh9OQMKfcykYh6akCvU0_Xt2yzQIdRvEt4,56161 +transformers/models/bloom/modeling_flax_bloom.py,sha256=JRj-42JcNa40WjWRnQ6Q-aAsZVxd1zuNN59lf1MGinM,30197 +transformers/models/bloom/tokenization_bloom_fast.py,sha256=ViQsNhssK_0gFtEHraJXLRJyHVgjAbBp0tmRbIsviVg,6277 +transformers/models/bridgetower/__init__.py,sha256=S9u22GAHi1LVcS3OYGBzfBVTjDvk_WU9JZGPTEo6zxw,1146 +transformers/models/bridgetower/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bridgetower/__pycache__/configuration_bridgetower.cpython-310.pyc,, +transformers/models/bridgetower/__pycache__/image_processing_bridgetower.cpython-310.pyc,, +transformers/models/bridgetower/__pycache__/image_processing_bridgetower_fast.cpython-310.pyc,, +transformers/models/bridgetower/__pycache__/modeling_bridgetower.cpython-310.pyc,, +transformers/models/bridgetower/__pycache__/processing_bridgetower.cpython-310.pyc,, +transformers/models/bridgetower/configuration_bridgetower.py,sha256=EtrVtgjiemvmV-CjJnnl98BQGepG77YRYUrK1lMsORU,14416 +transformers/models/bridgetower/image_processing_bridgetower.py,sha256=i2kVb1f7aX2c_COgncZnLk3h-8OzDu6lVdK4Q7oQT_Y,26386 +transformers/models/bridgetower/image_processing_bridgetower_fast.py,sha256=_8sXWxBa3F4lO_X7rHyTTuWh2b4XxLR-rLwxEFcmCgk,13229 +transformers/models/bridgetower/modeling_bridgetower.py,sha256=GqzAth4n2YTxYjBWuLZgrlF9iynfYSkYWkcntDLktS8,84762 +transformers/models/bridgetower/processing_bridgetower.py,sha256=nZcPioEGlI_44mRW06QUyiLCBiTgNtcvu2lORnZ0gh0,4431 +transformers/models/bros/__init__.py,sha256=wT0avJ_J50-WK6jOB-6UbgN5kjHiBwG-NNT_iefMXr8,1024 +transformers/models/bros/__pycache__/__init__.cpython-310.pyc,, +transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc,, +transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc,, +transformers/models/bros/__pycache__/processing_bros.cpython-310.pyc,, +transformers/models/bros/configuration_bros.py,sha256=9Vgmvk3hZ-VccsOGhB8OlUPjM5ojPufSIBHa2oY4I5I,6418 +transformers/models/bros/modeling_bros.py,sha256=a28uSFQ7cTiUjj-B4fTWvJINa0ImNHwMZSMu-9KTdSo,49250 +transformers/models/bros/processing_bros.py,sha256=N_uGepqmaWw8DFCN38n7H77tYNCG-nLex9Onhh2wvBY,4217 +transformers/models/byt5/__init__.py,sha256=O7yXvHyqMZ7stkKX67knnddmJ81pPHoKrY_7NCAauU4,955 +transformers/models/byt5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc,, +transformers/models/byt5/tokenization_byt5.py,sha256=ALgzHke0kQEe_3bopDv8r2TXfkaq2tQAM61DmzmQ8MU,10046 +transformers/models/camembert/__init__.py,sha256=hfxYgJYchvXLwio03yWsATGmrU2hgKOoiw7gaNoVgj8,1129 +transformers/models/camembert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/camembert/__pycache__/configuration_camembert.cpython-310.pyc,, +transformers/models/camembert/__pycache__/modeling_camembert.cpython-310.pyc,, +transformers/models/camembert/__pycache__/modeling_tf_camembert.cpython-310.pyc,, +transformers/models/camembert/__pycache__/tokenization_camembert.cpython-310.pyc,, +transformers/models/camembert/__pycache__/tokenization_camembert_fast.cpython-310.pyc,, +transformers/models/camembert/configuration_camembert.py,sha256=kf91zHJLL5_C_1OnJwPthKDj_636CNffChcfRs-epqg,7429 +transformers/models/camembert/modeling_camembert.py,sha256=B5x1CROTIlzpUfzMsKjg2yeARxz6bYcTjnI6yTaXgU0,72667 +transformers/models/camembert/modeling_tf_camembert.py,sha256=onjWfIs2bL12BpsnRUkjmBlGUCbDJ_2zsFYd89q6uEQ,81596 +transformers/models/camembert/tokenization_camembert.py,sha256=OaoBvDD4XRNLdCgRGCJCFnjznbVNt_ApfKkGj76WxHM,14075 +transformers/models/camembert/tokenization_camembert_fast.py,sha256=dDsFP_EvbYbt3kDWcj5BXEf-mbYQ-i4VNordbhOk5_E,8159 +transformers/models/canine/__init__.py,sha256=ThkEqO6wPzWCnAplx0EWCUqVaKKsNYQKXQhWfTblEBU,1032 +transformers/models/canine/__pycache__/__init__.cpython-310.pyc,, +transformers/models/canine/__pycache__/configuration_canine.cpython-310.pyc,, +transformers/models/canine/__pycache__/modeling_canine.cpython-310.pyc,, +transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc,, +transformers/models/canine/configuration_canine.py,sha256=8Rlt-y-lkY4Jwzi4Aa7NXN4TJtDoQylbogUOjt_q9IA,6584 +transformers/models/canine/modeling_canine.py,sha256=qNGPHXpKMBxa9IdP5VLm7K2jmFIWkvsXS-i3X5QsfKw,68562 +transformers/models/canine/tokenization_canine.py,sha256=hgz1yAdqqYWg6LkijfpzgataobP19GiOqwcUhQJZvtI,8194 +transformers/models/chameleon/__init__.py,sha256=EJ5kOvTyCFQjjWv9h4CMGHDRjQZ3CJ6j9Ygk2bEylA0,1136 +transformers/models/chameleon/__pycache__/__init__.cpython-310.pyc,, +transformers/models/chameleon/__pycache__/configuration_chameleon.cpython-310.pyc,, +transformers/models/chameleon/__pycache__/image_processing_chameleon.cpython-310.pyc,, +transformers/models/chameleon/__pycache__/image_processing_chameleon_fast.cpython-310.pyc,, +transformers/models/chameleon/__pycache__/modeling_chameleon.cpython-310.pyc,, +transformers/models/chameleon/__pycache__/processing_chameleon.cpython-310.pyc,, +transformers/models/chameleon/configuration_chameleon.py,sha256=hZwZ8pOjKFZa-urI7HFe-Nsk_4rWDKCDxDP-o3NjzvE,13413 +transformers/models/chameleon/image_processing_chameleon.py,sha256=T7IXKarv_SdQ1UdaCSeUgC5wT9XOqOYNnwhLTg_4eC8,16884 +transformers/models/chameleon/image_processing_chameleon_fast.py,sha256=6ALWS-g82vjxaAYQ52EkPGd_zjsEe1o5zmoA9XOmfNc,4334 +transformers/models/chameleon/modeling_chameleon.py,sha256=8815J9afCG8adNXlEucdZv5u7WtDkfKAlnpkn99ppiY,50913 +transformers/models/chameleon/processing_chameleon.py,sha256=3I70gsSH-4I9kxSvW6spc6SUIYfLDsbA3ZZiZVUJxOc,10351 +transformers/models/chinese_clip/__init__.py,sha256=-koN80ZGdGEDnTkLDGSDlzQ3fZcahTtaOgjtl3sddSE,1202 +transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc,, +transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc,, +transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc,, +transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc,, +transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip_fast.cpython-310.pyc,, +transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc,, +transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc,, +transformers/models/chinese_clip/configuration_chinese_clip.py,sha256=aut4tOdmgvWV5LN3D75LJRhKTNDRHMt9KJ6bmJQDrEw,20310 +transformers/models/chinese_clip/feature_extraction_chinese_clip.py,sha256=hZDBWu4SqNaqbxgA6EE-WZd4Qs8tmqPgXQjveRB5bnU,1366 +transformers/models/chinese_clip/image_processing_chinese_clip.py,sha256=aXemdDs0TG7UtL6dfjZyEawug-KdzJbiicthNYVBD1k,15548 +transformers/models/chinese_clip/image_processing_chinese_clip_fast.py,sha256=XkNJybi8fcwuB7_uN33KN61wDRJcNvXSuY7yUKKSr2k,1347 +transformers/models/chinese_clip/modeling_chinese_clip.py,sha256=ZyxO6a22OPQwEMkywnMIP8hHYksdxaEAteB0pgGDvtw,53611 +transformers/models/chinese_clip/processing_chinese_clip.py,sha256=v5O6mx3FJr4r18yy7RkzB7kzdf8qUOZfIu0Sg6kosAk,7558 +transformers/models/clap/__init__.py,sha256=751udHbsD7FBLGAByjx_8Z4XPLly1MaQQ4wKN_9vbOY,1067 +transformers/models/clap/__pycache__/__init__.cpython-310.pyc,, +transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc,, +transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc,, +transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc,, +transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc,, +transformers/models/clap/configuration_clap.py,sha256=jTnfuJQv3FHs0rP7nfJPMRk4bz6MtsjHXdS1ZL_um-I,18343 +transformers/models/clap/feature_extraction_clap.py,sha256=b-WKqQwRdmTU9kC3mVwx4wpcBOTA4s3XiovfnvyyfZM,18828 +transformers/models/clap/modeling_clap.py,sha256=gOU7symHOjGxK7WfWyhsX9RpLkPqiU0Glla2pfIzBLI,83840 +transformers/models/clap/processing_clap.py,sha256=3jrZQROdgz6jA-kchTBHfYPVCkLtszIah1HQAHnK-yM,5708 +transformers/models/clip/__init__.py,sha256=bkfM4LH7u_ab8C6cctpvdgySHyQmUaSlWphG4CkcQtg,1307 +transformers/models/clip/__pycache__/__init__.cpython-310.pyc,, +transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/image_processing_clip_fast.cpython-310.pyc,, +transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/tokenization_clip.cpython-310.pyc,, +transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc,, +transformers/models/clip/configuration_clip.py,sha256=mxIZbRQW7zJ0hcCTvI-95tMuJsrrr0-e8wOekflDfXw,18900 +transformers/models/clip/feature_extraction_clip.py,sha256=45gMszIrxGAwWmVEjEOF7GmpoWAkUnG9YQnb60wT_7I,1284 +transformers/models/clip/image_processing_clip.py,sha256=yCu3mXUS_6mqNlppQx9m3LYUof8-aZJkgXe1ZMkfF68,16976 +transformers/models/clip/image_processing_clip_fast.py,sha256=19Xm-DXHVL7IU8xmCwJJEuiiIMDFwWw0uBQFaVAi4So,1407 +transformers/models/clip/modeling_clip.py,sha256=ypRVFPgkI9I9IGo9xUdAJbvoC5CmKwSvcxZmwhNLCNY,52767 +transformers/models/clip/modeling_flax_clip.py,sha256=LrZPnOAh57jEZwhEMXzX1hsPf_RliNPYtjsnMYk_TOg,50791 +transformers/models/clip/modeling_tf_clip.py,sha256=Mcc1NH0iwm6sqsfznhjNCpXzaX105CnC7PID1GWndV0,60317 +transformers/models/clip/processing_clip.py,sha256=cQSYGJ1qCLa2zcxS2z1gkU3lhOcj1nzaspPNOUGa3Is,7206 +transformers/models/clip/tokenization_clip.py,sha256=-WrPr-t8Kr-HXnL3tdXbj_FFladaDPLMNk78I7ROK5Y,20554 +transformers/models/clip/tokenization_clip_fast.py,sha256=Z9_w8hpW0NwYpF-HZPP6-gLuKwrq_rcHYcliP5EU3VM,6766 +transformers/models/clipseg/__init__.py,sha256=12Y-b3sRDKM3Hy8-6rK4GUF2a91V1S3nLUF7559AALw,1033 +transformers/models/clipseg/__pycache__/__init__.cpython-310.pyc,, +transformers/models/clipseg/__pycache__/configuration_clipseg.cpython-310.pyc,, +transformers/models/clipseg/__pycache__/modeling_clipseg.cpython-310.pyc,, +transformers/models/clipseg/__pycache__/processing_clipseg.cpython-310.pyc,, +transformers/models/clipseg/configuration_clipseg.py,sha256=VxzkSvhASsaxVNzM4Ypp5XygKiDO-eo2EhznIVVsRIc,18855 +transformers/models/clipseg/modeling_clipseg.py,sha256=cJ3CODleX4MPQ25DMyHYc6O6ZwEIBXBZqFwMYXQw5c4,59417 +transformers/models/clipseg/processing_clipseg.py,sha256=q-SmkH77BV2sM9t4mY_F0ZwiGpE16O5vw4ZGz1ABRsg,7850 +transformers/models/clvp/__init__.py,sha256=RRnPofxkr_llgSxCP9tcAhu3xCR7E_m1PkrHv7KLMzo,1104 +transformers/models/clvp/__pycache__/__init__.cpython-310.pyc,, +transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc,, +transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc,, +transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc,, +transformers/models/clvp/__pycache__/number_normalizer.cpython-310.pyc,, +transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc,, +transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc,, +transformers/models/clvp/configuration_clvp.py,sha256=-coBzCPB5o6HbwdQ-b0JWxMERxkAWPFM4mTI5fazcQs,20257 +transformers/models/clvp/feature_extraction_clvp.py,sha256=l6Jr-23VBpVr1cZVATLVxTh9vNeuA3cKZkqrL83sSPI,10995 +transformers/models/clvp/modeling_clvp.py,sha256=tfHZFlUZVJJEKFMwU6QHyMitIkuxynhVPRxhkEVEzZY,86889 +transformers/models/clvp/number_normalizer.py,sha256=0zNI1TWJCMJ4i9VxrirCDeX_wjEV02G0_Ig8xdmD-LY,8933 +transformers/models/clvp/processing_clvp.py,sha256=A1e_PBrUDIG6xRoX3nzwgkmkDHzpRsc8MEXSnJKzA_Q,3634 +transformers/models/clvp/tokenization_clvp.py,sha256=G5O6ykpxfuJLaej8mgkjty3UNEnTL47e5RfvVVJ7P8Q,14808 +transformers/models/code_llama/__init__.py,sha256=aZJA9qTifG-RGtJKMzfspfxuQkaBryVva7Ah_uGNMoM,1009 +transformers/models/code_llama/__pycache__/__init__.cpython-310.pyc,, +transformers/models/code_llama/__pycache__/tokenization_code_llama.cpython-310.pyc,, +transformers/models/code_llama/__pycache__/tokenization_code_llama_fast.cpython-310.pyc,, +transformers/models/code_llama/tokenization_code_llama.py,sha256=BLEp72WT3fN8px65zzBn9bcwv30ThoDflE2267TJ8Xk,19314 +transformers/models/code_llama/tokenization_code_llama_fast.py,sha256=1ca69b8iic_Q61Fy9frmt6maw8DnQxxSBhBexILkltw,15832 +transformers/models/codegen/__init__.py,sha256=NeUIbS8szfu5R9-7CX_G6730RHOODzTfmrapJH2ApMk,1080 +transformers/models/codegen/__pycache__/__init__.cpython-310.pyc,, +transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc,, +transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc,, +transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc,, +transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc,, +transformers/models/codegen/configuration_codegen.py,sha256=c0KsyyBNKsPFMf2TL_5lOS_DztoS-cOphRasNOI2e5I,9574 +transformers/models/codegen/modeling_codegen.py,sha256=-H1tFORy1UjdT-fkTge3lQiufmu2Q44nfkHomqu9SP0,29576 +transformers/models/codegen/tokenization_codegen.py,sha256=ih2YU6A9RW2oPI6dyXsxx8WUHeKcibwmtyWOu3vbuHY,15365 +transformers/models/codegen/tokenization_codegen_fast.py,sha256=wiHxam7coCyQR5L1LAgcYCBgwCUUhaLZwyF2lr7WK7w,9650 +transformers/models/cohere/__init__.py,sha256=1Tg-6WGc5wgGduSR__N-jGZvPje9kNs92DW78vN0Auo,1037 +transformers/models/cohere/__pycache__/__init__.cpython-310.pyc,, +transformers/models/cohere/__pycache__/configuration_cohere.cpython-310.pyc,, +transformers/models/cohere/__pycache__/modeling_cohere.cpython-310.pyc,, +transformers/models/cohere/__pycache__/modular_cohere.cpython-310.pyc,, +transformers/models/cohere/__pycache__/tokenization_cohere_fast.cpython-310.pyc,, +transformers/models/cohere/configuration_cohere.py,sha256=cuNOpFFmlh-aqWbY7M771gr1Efo4ScohzJ7AXN1cXAs,11162 +transformers/models/cohere/modeling_cohere.py,sha256=DS8Ds-lLgHYIqjucuK0gHgYX4HNbAN1qL0-tBcplsKA,24215 +transformers/models/cohere/modular_cohere.py,sha256=c4gnZFu1EQ4JKOWFkpIuzsmQ6OS4BdlapCD_jcdpFRY,16047 +transformers/models/cohere/tokenization_cohere_fast.py,sha256=SFaANeLr0Kw4zx-ItLTA13UBGZulMlBdO7d9FqJBxnU,28818 +transformers/models/cohere2/__init__.py,sha256=6Cx_c-uTSNopbO3NLWCgMmEB2-5hzkrunUWmMrb8YSU,1011 +transformers/models/cohere2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/cohere2/__pycache__/configuration_cohere2.cpython-310.pyc,, +transformers/models/cohere2/__pycache__/modeling_cohere2.cpython-310.pyc,, +transformers/models/cohere2/__pycache__/modular_cohere2.cpython-310.pyc,, +transformers/models/cohere2/configuration_cohere2.py,sha256=7tHWlR5FLhOWwR9d_JTMBs1sbFugq-NM6UN4UeOq1qk,13024 +transformers/models/cohere2/modeling_cohere2.py,sha256=gh6P5JnV8MpXVIdEgCLn9WargXSNnGe4EAbI48u8glI,23526 +transformers/models/cohere2/modular_cohere2.py,sha256=KDrZ94yp7NUXu5HZN4OwW6j4OD-G4nMgzMb4mLN9O0U,20731 +transformers/models/cohere2_vision/__init__.py,sha256=VZPnI0ugmeUt8tHNLXhSF1X4maFa0T4pqnm3VAPEyo0,1110 +transformers/models/cohere2_vision/__pycache__/__init__.cpython-310.pyc,, +transformers/models/cohere2_vision/__pycache__/configuration_cohere2_vision.cpython-310.pyc,, +transformers/models/cohere2_vision/__pycache__/image_processing_cohere2_vision_fast.cpython-310.pyc,, +transformers/models/cohere2_vision/__pycache__/modeling_cohere2_vision.cpython-310.pyc,, +transformers/models/cohere2_vision/__pycache__/modular_cohere2_vision.cpython-310.pyc,, +transformers/models/cohere2_vision/__pycache__/processing_cohere2_vision.cpython-310.pyc,, +transformers/models/cohere2_vision/configuration_cohere2_vision.py,sha256=wLz9OJCrFFCazMXgy1ntXWDUq-o8Q75B8L1eN3Y7QFg,3612 +transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py,sha256=JxYEOa1I5_SIF_MFDOIlDw0ullZWTDpPph4EqN4IyQs,13780 +transformers/models/cohere2_vision/modeling_cohere2_vision.py,sha256=Bm5PE5FnK2ByDBC6yhWYE5SHfX7Y_xN9lh13v5GhKc8,19155 +transformers/models/cohere2_vision/modular_cohere2_vision.py,sha256=Q3-MgKrqqiCPt7l7Fofp2OIBghg75u4GgJocVv4glb0,13764 +transformers/models/cohere2_vision/processing_cohere2_vision.py,sha256=V12UAVRARxMMXrp9uQC16mkdYxGp_LanFeCoEc_Pzrw,9865 +transformers/models/colpali/__init__.py,sha256=eG-nOojo-DPkgZJACn6hbJqqfnGE97uKmLkpWVin66A,1033 +transformers/models/colpali/__pycache__/__init__.cpython-310.pyc,, +transformers/models/colpali/__pycache__/configuration_colpali.cpython-310.pyc,, +transformers/models/colpali/__pycache__/modeling_colpali.cpython-310.pyc,, +transformers/models/colpali/__pycache__/modular_colpali.cpython-310.pyc,, +transformers/models/colpali/__pycache__/processing_colpali.cpython-310.pyc,, +transformers/models/colpali/configuration_colpali.py,sha256=823j9kFkFOZCVfi0VVC7ActtstXkK2LDfD_Ra9wVv9c,4346 +transformers/models/colpali/modeling_colpali.py,sha256=I-kKuahUTZ1HAll9QjXp2f-UgxDHYm5EBlvfnnDep6s,8584 +transformers/models/colpali/modular_colpali.py,sha256=B4wbtgQEM5ay48NrcOxu5tIWQGhHwlS1-vRqsSjhGBY,16199 +transformers/models/colpali/processing_colpali.py,sha256=j3ncyUf6lgVTGHkNpTt1a2fVbNKeCuKNm6ZYhnQ8r0w,20774 +transformers/models/colqwen2/__init__.py,sha256=GBrOYGkXcXTOuCd6AhVMss6TVb2igEKFcrAkgJbbg-Q,1036 +transformers/models/colqwen2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/colqwen2/__pycache__/configuration_colqwen2.cpython-310.pyc,, +transformers/models/colqwen2/__pycache__/modeling_colqwen2.cpython-310.pyc,, +transformers/models/colqwen2/__pycache__/modular_colqwen2.cpython-310.pyc,, +transformers/models/colqwen2/__pycache__/processing_colqwen2.cpython-310.pyc,, +transformers/models/colqwen2/configuration_colqwen2.py,sha256=0mUQKsC-twaY24aSSmjydo2HmGtzW7MmXKxHO3e4IWk,3708 +transformers/models/colqwen2/modeling_colqwen2.py,sha256=_eVoPMhdPNXacujicVFNJ8qecYuhCVS4sOU9UtnbS2c,11193 +transformers/models/colqwen2/modular_colqwen2.py,sha256=RYulE7OTUCVcV4qYjXyIGnmhJglbgoFqRvOMALNrJfM,18629 +transformers/models/colqwen2/processing_colqwen2.py,sha256=sRKJU2_jy5c7sIRh6fygrI48FJzYgMvbACaQ9Ol8nPg,20045 +transformers/models/conditional_detr/__init__.py,sha256=p8luCb38qMZvKdI7GLvBTx1eiKGFMv8Obd5iKaqoVe8,1179 +transformers/models/conditional_detr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/conditional_detr/__pycache__/configuration_conditional_detr.cpython-310.pyc,, +transformers/models/conditional_detr/__pycache__/feature_extraction_conditional_detr.cpython-310.pyc,, +transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr.cpython-310.pyc,, +transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr_fast.cpython-310.pyc,, +transformers/models/conditional_detr/__pycache__/modeling_conditional_detr.cpython-310.pyc,, +transformers/models/conditional_detr/__pycache__/modular_conditional_detr.cpython-310.pyc,, +transformers/models/conditional_detr/configuration_conditional_detr.py,sha256=J0SMZqyKvQ0Bu_og74WVqwm2MuxmYOZQFe1Iw-fVliE,13739 +transformers/models/conditional_detr/feature_extraction_conditional_detr.py,sha256=QwZ7PwpcYVGjSFPluSXbT5oTGM4UgeYSL_q-sybwHgY,1676 +transformers/models/conditional_detr/image_processing_conditional_detr.py,sha256=eFEmjsNriiW0uYVG8465chS3lzlo23JFojUlmkxvyA4,85840 +transformers/models/conditional_detr/image_processing_conditional_detr_fast.py,sha256=oKAzLE4JaqjwANsgyp4nTq8-s7QxDfG_ESqnqeVIGW8,48262 +transformers/models/conditional_detr/modeling_conditional_detr.py,sha256=e52KPafYa_OYYmfS1CKG1Rcfu9noHg9kMI1UK9UaBgw,93088 +transformers/models/conditional_detr/modular_conditional_detr.py,sha256=_Nnn5kIrM4rE9KIqSjC7wBW2ETjE85ky3pVXEmmpv6c,6082 +transformers/models/convbert/__init__.py,sha256=x1Rv5-rurTKFifp3w8N_CNcZ3sHvuFwqpw_Zn1BAenw,1124 +transformers/models/convbert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc,, +transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc,, +transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc,, +transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc,, +transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc,, +transformers/models/convbert/configuration_convbert.py,sha256=Ml8UytHYCv4-BAeyPTUzUY5ynnX8dIkCIGoumPTqaCc,6895 +transformers/models/convbert/modeling_convbert.py,sha256=H2kKGuwm0g-IiHa4vR7TRRgZLfnBdLfUSy1cVeCN0mA,58383 +transformers/models/convbert/modeling_tf_convbert.py,sha256=mvR9901jo5L-GOvp6QtoY0nORRpwRURQpSxEs3T5X5c,61487 +transformers/models/convbert/tokenization_convbert.py,sha256=nt9KjwTDvJV-IGV_7PsznPWF1_uZj2aR3oL9-Ie56VM,20170 +transformers/models/convbert/tokenization_convbert_fast.py,sha256=3TKalVIkHf4iWJ5mxhbdjS7s6svBsEQUWVdhLSK07G8,6686 +transformers/models/convnext/__init__.py,sha256=QAUm2k3PH0pqhHzPXIhkEmqzMWKYCs4bo0gNVnH_bBw,1179 +transformers/models/convnext/__pycache__/__init__.cpython-310.pyc,, +transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc,, +transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc,, +transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc,, +transformers/models/convnext/__pycache__/image_processing_convnext_fast.cpython-310.pyc,, +transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc,, +transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc,, +transformers/models/convnext/configuration_convnext.py,sha256=9m5oXY9Vx3BdADw0k7EeM4n3KrkCb3JA-WOPUlbtr74,6192 +transformers/models/convnext/feature_extraction_convnext.py,sha256=7oC8UpEVxpiIhXIC8Rc3I5YnJExAPEEezSdAUnn1hnw,1316 +transformers/models/convnext/image_processing_convnext.py,sha256=DLLgSmlBRKoZuOCIt3IHTqDqV4SS-jLpERtLmfvqnko,16015 +transformers/models/convnext/image_processing_convnext_fast.py,sha256=40nvdfgAtQHGWNwNLa5MLvnqWKSEOd4MvoEtiZv8750,7200 +transformers/models/convnext/modeling_convnext.py,sha256=dJTGcpf5D1qLDKrGfD4SpCI6eT3-zz5qXujAEw8dWLU,19454 +transformers/models/convnext/modeling_tf_convnext.py,sha256=CQ3AIeUarAgYgMsI9_aWcMjD6ie-JYI22JSw9rGzo9E,27199 +transformers/models/convnextv2/__init__.py,sha256=kOl9JbYIk9ioImF_hd0BS_mGDC8SG2k5LvO0-7WroRo,1043 +transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc,, +transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc,, +transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc,, +transformers/models/convnextv2/configuration_convnextv2.py,sha256=bUeneirJLhh_eL1rQZ1Mk_ZSCCzJlg-CwcNNEk0fVjY,5564 +transformers/models/convnextv2/modeling_convnextv2.py,sha256=K3mGdY-MWyir9Fodtn4qnx_7Sjq7n2keHJ8TrxLRZ3w,21013 +transformers/models/convnextv2/modeling_tf_convnextv2.py,sha256=2cVHJDxFMYHHL-NEPTKF5MfsVNFxC5sqdu5bNHoK1M4,27605 +transformers/models/cpm/__init__.py,sha256=5Oz79wRruzXHciBLUAOGeo6PIH70Vs4ta8ffsMyT1Yg,995 +transformers/models/cpm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc,, +transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc,, +transformers/models/cpm/tokenization_cpm.py,sha256=FoFU7yUwJTGuny6ZYyjn97gCO3gGGjldWStRPjnKdPY,15114 +transformers/models/cpm/tokenization_cpm_fast.py,sha256=2-jf6ZXKnBM1x38CE2SIpHDkZ_aJs7PN0A-kM_dw5YA,10307 +transformers/models/cpmant/__init__.py,sha256=RfkbbhNqdbioJ5XVaTtxBLnZRt1GFnXugS3UFXHYV0c,1032 +transformers/models/cpmant/__pycache__/__init__.cpython-310.pyc,, +transformers/models/cpmant/__pycache__/configuration_cpmant.cpython-310.pyc,, +transformers/models/cpmant/__pycache__/modeling_cpmant.cpython-310.pyc,, +transformers/models/cpmant/__pycache__/tokenization_cpmant.cpython-310.pyc,, +transformers/models/cpmant/configuration_cpmant.py,sha256=RvgmQH8lQazRopzpfK5-Hf4eePtXXfvMJ3ar1VQC2vE,5145 +transformers/models/cpmant/modeling_cpmant.py,sha256=f-YmtWc4xvxMOYOaZqW9nfR9XnmeUmM0A70M7IVSH5Y,33794 +transformers/models/cpmant/tokenization_cpmant.py,sha256=v-Ra7sTNzfxA6TPHjglkvlx0D-awCHm9P5VSUoyaTF8,9747 +transformers/models/csm/__init__.py,sha256=n-AQHwxZwD8imEHipiQoTDRf_OMo5zJhQ0tKKWMCPYs,1021 +transformers/models/csm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/csm/__pycache__/configuration_csm.cpython-310.pyc,, +transformers/models/csm/__pycache__/generation_csm.cpython-310.pyc,, +transformers/models/csm/__pycache__/modeling_csm.cpython-310.pyc,, +transformers/models/csm/__pycache__/modular_csm.cpython-310.pyc,, +transformers/models/csm/__pycache__/processing_csm.cpython-310.pyc,, +transformers/models/csm/configuration_csm.py,sha256=xhRBdgZbc76V3OMJAcF4EL9wQEi9FVQV8g3f_ZuwxGo,23791 +transformers/models/csm/generation_csm.py,sha256=pYZltTDah__he6zoN70qda4YLH1wdaCHoxS4Whzsmkw,25692 +transformers/models/csm/modeling_csm.py,sha256=aFKydQaWSLFAd4CAqEHXswH-jyddjaRDa97mM4t-ph8,50819 +transformers/models/csm/modular_csm.py,sha256=wSk__r9pnfqmfGMsnb6phL-j6uK14vPb9qS_Szop4o0,35542 +transformers/models/csm/processing_csm.py,sha256=4lwcP9lFIjyS4dJ2IYf2zx0vbaeTzsbKWRSmertED04,16011 +transformers/models/ctrl/__init__.py,sha256=bVtGijL4n9ewNyhcJt7lpsRhXU8yo4nY0xIlRbpismk,1062 +transformers/models/ctrl/__pycache__/__init__.cpython-310.pyc,, +transformers/models/ctrl/__pycache__/configuration_ctrl.cpython-310.pyc,, +transformers/models/ctrl/__pycache__/modeling_ctrl.cpython-310.pyc,, +transformers/models/ctrl/__pycache__/modeling_tf_ctrl.cpython-310.pyc,, +transformers/models/ctrl/__pycache__/tokenization_ctrl.cpython-310.pyc,, +transformers/models/ctrl/configuration_ctrl.py,sha256=Vg6ZFqal5MCr-t2K5pp5mtN2TJSeojgKL8IgbZkd81k,4684 +transformers/models/ctrl/modeling_ctrl.py,sha256=vKrex6fs3ACnjUEIhoUlFKAvLGjnYm0oGF4eMpAggHY,32217 +transformers/models/ctrl/modeling_tf_ctrl.py,sha256=7C954SzmkF6m4uraWukQ2Q_vhD3slx7usn9AXI26YsE,39279 +transformers/models/ctrl/tokenization_ctrl.py,sha256=N6D_85X_YHRvhltFZMVdR5M4ahJWyUvCuUvcFYOL5Lw,8080 +transformers/models/cvt/__init__.py,sha256=i1847SsjrXEIbrXsDEAiUlrtgLZRHtCSVG0rvCPXE9I,1022 +transformers/models/cvt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/cvt/__pycache__/configuration_cvt.cpython-310.pyc,, +transformers/models/cvt/__pycache__/modeling_cvt.cpython-310.pyc,, +transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc,, +transformers/models/cvt/configuration_cvt.py,sha256=zVX0Ht69OHm8ttnbAYbzxtV0kNDKV_qpbSDwToqJMKI,6684 +transformers/models/cvt/modeling_cvt.py,sha256=0LKChJHfc7xzs8YU788h2H45rhcC7OFze_7Pwl-qUOM,25870 +transformers/models/cvt/modeling_tf_cvt.py,sha256=N7vzSNotCa0x2wKN28PrxWSoSX5VL0O4Jwl-qVCPQcg,43457 +transformers/models/d_fine/__init__.py,sha256=1gNscomeWytwZT7K2GJBwyXxDkfVNLhRjuDwyde2A0s,995 +transformers/models/d_fine/__pycache__/__init__.cpython-310.pyc,, +transformers/models/d_fine/__pycache__/configuration_d_fine.cpython-310.pyc,, +transformers/models/d_fine/__pycache__/modeling_d_fine.cpython-310.pyc,, +transformers/models/d_fine/__pycache__/modular_d_fine.cpython-310.pyc,, +transformers/models/d_fine/configuration_d_fine.py,sha256=mFg_xcD2t7-evibX5REKeCKg3RKZIdBQsSRtGWnYN6E,22689 +transformers/models/d_fine/modeling_d_fine.py,sha256=TrDHAJFuspAItmA9DN_gQRODKieL0keS-6GwKRCPxbA,105231 +transformers/models/d_fine/modular_d_fine.py,sha256=jQOO7gL-X5JZ9Uaij-SHPvGK61CYirBhNMSMs-ja58I,56809 +transformers/models/dab_detr/__init__.py,sha256=ZvNYPQyXWplaRQIxFR8CURcsnu_HRPXrwojF5nTmGd4,998 +transformers/models/dab_detr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dab_detr/__pycache__/configuration_dab_detr.cpython-310.pyc,, +transformers/models/dab_detr/__pycache__/modeling_dab_detr.cpython-310.pyc,, +transformers/models/dab_detr/configuration_dab_detr.py,sha256=LOpJZP2nJtYS9yU2tshuqgtEWWNIojY__pl6Vuekkgg,13756 +transformers/models/dab_detr/modeling_dab_detr.py,sha256=vdCs9jxbggsJ0hPPSYXs29Q4LLkyqTdMdvANFPIigVQ,74808 +transformers/models/dac/__init__.py,sha256=UpwXPmSOQOwvbIvklM21-y5HKY7MEIInmTt65xMX6Hw,1029 +transformers/models/dac/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dac/__pycache__/configuration_dac.cpython-310.pyc,, +transformers/models/dac/__pycache__/feature_extraction_dac.cpython-310.pyc,, +transformers/models/dac/__pycache__/modeling_dac.cpython-310.pyc,, +transformers/models/dac/configuration_dac.py,sha256=Exf0bhzmsEvxLxSJTOpdjPL6Pc30SHKW1MZ23aVdt1M,4581 +transformers/models/dac/feature_extraction_dac.py,sha256=isiE4djooNAEY3ddjom56y5mMW3g4e8Aa1iOono4eFk,7958 +transformers/models/dac/modeling_dac.py,sha256=0iudOtUa6B_ITe14UbT3sHOpV64YEkNxXKJGu0BOOL4,28594 +transformers/models/data2vec/__init__.py,sha256=-2iFF1Rb8eF9cccBNLA29zgeFV1ADYaSLoQgf6K6KB8,1238 +transformers/models/data2vec/__pycache__/__init__.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/configuration_data2vec_audio.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/configuration_data2vec_text.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/configuration_data2vec_vision.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/modeling_data2vec_audio.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/modeling_data2vec_text.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/modeling_data2vec_vision.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/modeling_tf_data2vec_vision.cpython-310.pyc,, +transformers/models/data2vec/__pycache__/modular_data2vec_audio.cpython-310.pyc,, +transformers/models/data2vec/configuration_data2vec_audio.py,sha256=SmJMa0tBoQZudlxPe1RvpSq0YwB10GjTCify0NwL6mg,16373 +transformers/models/data2vec/configuration_data2vec_text.py,sha256=EJRVy6uJcVoDFVdMJfGaIOx_cIIW03A58N5X849i7G4,7361 +transformers/models/data2vec/configuration_data2vec_vision.py,sha256=8LsGPjPhFaBXpMF6LrtndMoXTE33pTnfZ1p9Lz06c7k,9314 +transformers/models/data2vec/modeling_data2vec_audio.py,sha256=q2RKNuqtdsxTCa3SKPoq7jG_J1ZxX0V5Aw5t8bvdvAA,59332 +transformers/models/data2vec/modeling_data2vec_text.py,sha256=Dr8jQxrEOJqvItQDYK59jMOzx8u-5oGXGx_EkVxAnPc,60471 +transformers/models/data2vec/modeling_data2vec_vision.py,sha256=OHQVk9dGzXTO1HsPG6kVnlOlPwwcICCyZbUyBp7K_Z8,59610 +transformers/models/data2vec/modeling_tf_data2vec_vision.py,sha256=GO5cK8C7P1jtULl88FYw0T8Thao7E09IMx4whTzq6H0,73392 +transformers/models/data2vec/modular_data2vec_audio.py,sha256=jnI7VbLCriZmsoogiycJklyWhHkOvWl3eVToViSr3rE,9350 +transformers/models/dbrx/__init__.py,sha256=Kzn3gm0QHW9RKEmog_IfdCGam5TXSCzkOs_WHC43sgM,989 +transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc,, +transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc,, +transformers/models/dbrx/configuration_dbrx.py,sha256=vSPYWWhW289qAU9iIOaMYadUlFcc5SMq5u8zmZ4AsCw,9928 +transformers/models/dbrx/modeling_dbrx.py,sha256=wgh2qMf1Cfjlh-wdkIUtLgyjtHTSpsRJsbsRUb1_dtQ,55104 +transformers/models/deberta/__init__.py,sha256=diL764eL8gu80XkBDQU9nI6Zy39ArO0d85MtcZ4_NPw,1119 +transformers/models/deberta/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc,, +transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc,, +transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc,, +transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc,, +transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc,, +transformers/models/deberta/configuration_deberta.py,sha256=GFkrPn9TgA3QRrRTYsxFnQRsFRzFHPlpDJwS_HCo9Go,9024 +transformers/models/deberta/modeling_deberta.py,sha256=DLuOBnWZEyRw3p75kK1WRagzXTXReJsB3Er1AdPQ87o,48815 +transformers/models/deberta/modeling_tf_deberta.py,sha256=6rH8jdCWMoxKnurItq1ilWZ8KbaM-IbUGkPECrUMR9M,69231 +transformers/models/deberta/tokenization_deberta.py,sha256=sEbB4J8F0BdwyxfkOnkwPc4AWBHcwRyVE7Ql9AN8R-Q,15951 +transformers/models/deberta/tokenization_deberta_fast.py,sha256=uoJE9AssyGms5uRMxIy3awKj0W8nJ7sYFB0XiZXz47k,9121 +transformers/models/deberta_v2/__init__.py,sha256=N6wcSGakSmmHDW_QelFsn58zuDFTuvbctgkyC0OfQ5Y,1134 +transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc,, +transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc,, +transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc,, +transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc,, +transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc,, +transformers/models/deberta_v2/configuration_deberta_v2.py,sha256=96TOPpbrNSW2e-Mvs2D3S-AuKyb8r_kLtStv0TRH_rY,8964 +transformers/models/deberta_v2/modeling_deberta_v2.py,sha256=mPo5j2LkRuH2MD_2f6es7drE90ahpgEyJolsP6TmzdY,56783 +transformers/models/deberta_v2/modeling_tf_deberta_v2.py,sha256=3eJKDG8GQCcDXSKO8AWikBsCQuUZzOOC-tYO1j3yaL0,81610 +transformers/models/deberta_v2/tokenization_deberta_v2.py,sha256=WhRgGdG440RsyEZp6yV9IudKT0F2W8uXf2zLuTvcdY4,19731 +transformers/models/deberta_v2/tokenization_deberta_v2_fast.py,sha256=_i28i-MLIDWhqACnNKhrDcDOopjy2ulJb3R0HYqxBtc,8593 +transformers/models/decision_transformer/__init__.py,sha256=8XAHnFrFv8IFz495cQLTeaAk2G1AVRT7roauVHCGoJs,1021 +transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc,, +transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc,, +transformers/models/decision_transformer/configuration_decision_transformer.py,sha256=hKOOb_TuM0XU7fDQu9sl2o6iNYYt16Dll3JUCKecFB4,7029 +transformers/models/decision_transformer/modeling_decision_transformer.py,sha256=zxcx59F2rW6okAi5u2Qpd4slB7cV7o4kzmxnFw2YGCY,43242 +transformers/models/deepseek_v2/__init__.py,sha256=cRpNT946KLnKXl4i2mGlImi9QLOe2a1ocnWNjBSbK68,1005 +transformers/models/deepseek_v2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deepseek_v2/__pycache__/configuration_deepseek_v2.cpython-310.pyc,, +transformers/models/deepseek_v2/__pycache__/modeling_deepseek_v2.cpython-310.pyc,, +transformers/models/deepseek_v2/__pycache__/modular_deepseek_v2.cpython-310.pyc,, +transformers/models/deepseek_v2/configuration_deepseek_v2.py,sha256=8snqY9qX7eRSMd28UfvOdVe8o19ABxcE21ak1WW1XCw,12231 +transformers/models/deepseek_v2/modeling_deepseek_v2.py,sha256=2FkuX0nFR-CXFdH23zzUdE9fsyC7UsfZ497fq_Wp-ME,27426 +transformers/models/deepseek_v2/modular_deepseek_v2.py,sha256=75TD8UR--4VhHX1QLbSa9wY3DC5M0KN3sJJqdL8QUVM,23383 +transformers/models/deepseek_v3/__init__.py,sha256=t-ejxAfULC_tUrUucNLt-x3hbTEIqUQp96m2DRFeaTg,1008 +transformers/models/deepseek_v3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deepseek_v3/__pycache__/configuration_deepseek_v3.cpython-310.pyc,, +transformers/models/deepseek_v3/__pycache__/modeling_deepseek_v3.cpython-310.pyc,, +transformers/models/deepseek_v3/__pycache__/modular_deepseek_v3.cpython-310.pyc,, +transformers/models/deepseek_v3/configuration_deepseek_v3.py,sha256=VEcrAJDOUBn4NmPDqAXDZ47gCRGGjZOaKsGL0qSH5xE,12703 +transformers/models/deepseek_v3/modeling_deepseek_v3.py,sha256=jZxbv_Pz8Kcj3h4iZyRGOfWOzTBrZjVQOxI1tKwklRg,29907 +transformers/models/deepseek_v3/modular_deepseek_v3.py,sha256=9kyA4SxGOMlr5wCVHY76fkrsiUJ7TTdaDkzX1u3rmNs,15280 +transformers/models/deepseek_vl/__init__.py,sha256=ZjpL2NkjCM_tsGMz_fY7bOQ2HQwimj3RL4cocjrEtHI,1162 +transformers/models/deepseek_vl/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deepseek_vl/__pycache__/configuration_deepseek_vl.cpython-310.pyc,, +transformers/models/deepseek_vl/__pycache__/image_processing_deepseek_vl.cpython-310.pyc,, +transformers/models/deepseek_vl/__pycache__/image_processing_deepseek_vl_fast.cpython-310.pyc,, +transformers/models/deepseek_vl/__pycache__/modeling_deepseek_vl.cpython-310.pyc,, +transformers/models/deepseek_vl/__pycache__/modular_deepseek_vl.cpython-310.pyc,, +transformers/models/deepseek_vl/__pycache__/processing_deepseek_vl.cpython-310.pyc,, +transformers/models/deepseek_vl/configuration_deepseek_vl.py,sha256=gGJkOSpesfQyuwzj74EWC6p0TXnGs_74-cStk3aqDpE,4554 +transformers/models/deepseek_vl/image_processing_deepseek_vl.py,sha256=XOtNCfHXr45RpEghL3x18AslM_h4oPF7kTUHrBjW55k,21020 +transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py,sha256=IibhJug4LYi9XgXdIRKK5jaoyZx2y3y6HoSQvkXqyZE,8171 +transformers/models/deepseek_vl/modeling_deepseek_vl.py,sha256=K52u_PsOx6wu83sWUQudSiTH4mIYja2vg3zrdGTnErQ,15754 +transformers/models/deepseek_vl/modular_deepseek_vl.py,sha256=_Ms7kuIdv67vb5nVnF4UCAoyysPJ2XdEFPrLKpODw-Y,13664 +transformers/models/deepseek_vl/processing_deepseek_vl.py,sha256=blO0K-YBes3BPpi_ndSNNpnU8rweOUdjofu4wixdEGc,8026 +transformers/models/deepseek_vl_hybrid/__init__.py,sha256=FjSY1MYTEUP8BbAqIG2DZSAzmLh17w8aZr7pUkC_Gto,1257 +transformers/models/deepseek_vl_hybrid/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deepseek_vl_hybrid/__pycache__/configuration_deepseek_vl_hybrid.cpython-310.pyc,, +transformers/models/deepseek_vl_hybrid/__pycache__/image_processing_deepseek_vl_hybrid.cpython-310.pyc,, +transformers/models/deepseek_vl_hybrid/__pycache__/image_processing_deepseek_vl_hybrid_fast.cpython-310.pyc,, +transformers/models/deepseek_vl_hybrid/__pycache__/modeling_deepseek_vl_hybrid.cpython-310.pyc,, +transformers/models/deepseek_vl_hybrid/__pycache__/modular_deepseek_vl_hybrid.cpython-310.pyc,, +transformers/models/deepseek_vl_hybrid/__pycache__/processing_deepseek_vl_hybrid.cpython-310.pyc,, +transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py,sha256=0wPzB5AuZjiPr3RYugMklRgxOw2QrKVFFHJWfWnlpw4,5448 +transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py,sha256=c7wA632hfX6XDfPeX6ynwsrRINIwnqpUvHbNapUbs_M,25982 +transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py,sha256=oNyM6cnv8dARIv5xoiHWFCX2NKdM0oEwuhyYoKwblrQ,14765 +transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py,sha256=_NnTgr6oETeihLaMiwYht7fYd2uFZJ2MQ06iXBlYsNg,23072 +transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py,sha256=MBRKGrwyFe6fV0jB0OmD0ORYH-c_m2CSzxlXyhAA2TQ,47344 +transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py,sha256=pTv1I8tlnUcyEQcmWpCnMUMLC4CjK43G_zfj1o-BtUU,8236 +transformers/models/deformable_detr/__init__.py,sha256=_ae-sABBY17hOT28SN_d0GLeRVjya0W4aqniH8u8Bcw,1176 +transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc,, +transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc,, +transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc,, +transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr_fast.cpython-310.pyc,, +transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc,, +transformers/models/deformable_detr/__pycache__/modular_deformable_detr.cpython-310.pyc,, +transformers/models/deformable_detr/configuration_deformable_detr.py,sha256=5CRV4cdMS_5N4NS8Qa9ymAKocEaGIegCQ0xFmwA-Ijw,14794 +transformers/models/deformable_detr/feature_extraction_deformable_detr.py,sha256=ifv-_D_b2_5GsavP72mH6etQoobhFYmf2NB4Fyl9nP0,1668 +transformers/models/deformable_detr/image_processing_deformable_detr.py,sha256=eS60y_h2tGzXerEYCiROlAj1Dnnuw-9BOAbgPwkvS5E,73296 +transformers/models/deformable_detr/image_processing_deformable_detr_fast.py,sha256=pandJq9-TtViJxMULhZgiwxEzeioQQ3ut5KbRVMANGI,36237 +transformers/models/deformable_detr/modeling_deformable_detr.py,sha256=O7WbRGoQMlCbYfq0GWlg_oxWpvfFWXvM1sJpSQVSiTA,88407 +transformers/models/deformable_detr/modular_deformable_detr.py,sha256=pFFZknbkPCW6EaA8JC_vJv8weqzwtUfQ7aHa4SpCscg,6573 +transformers/models/deit/__init__.py,sha256=8S1h-sIvhRy1EiQ7DKXHqqNEgR0_juhrAyQZ2AU1rVw,1155 +transformers/models/deit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deit/__pycache__/configuration_deit.cpython-310.pyc,, +transformers/models/deit/__pycache__/feature_extraction_deit.cpython-310.pyc,, +transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc,, +transformers/models/deit/__pycache__/image_processing_deit_fast.cpython-310.pyc,, +transformers/models/deit/__pycache__/modeling_deit.cpython-310.pyc,, +transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc,, +transformers/models/deit/configuration_deit.py,sha256=qd4pscfJKPWbxhmdCzj1Fdv19o8KPIuXwJpUI04Puf4,6375 +transformers/models/deit/feature_extraction_deit.py,sha256=0kfS_x_-B8O9b6ECuj3kosuPP9bwHKO_ZzjuvkBnPsc,1284 +transformers/models/deit/image_processing_deit.py,sha256=5n6oeHNAsl8GrCKNLO6mR6qhQlFkUOBWxKkzE_wzlE8,15332 +transformers/models/deit/image_processing_deit_fast.py,sha256=DrJaX0I_Pu2tihvqPrsUZRdIWFTtH4BrTKT22RqVfYU,1399 +transformers/models/deit/modeling_deit.py,sha256=BtokKLqCuuWvXBW9WFK97gwyBbTkrYSJzqTcwYMt9PM,38385 +transformers/models/deit/modeling_tf_deit.py,sha256=iGVk1mPqdtOAbd6jo5j3gbueqXA30JVy7XBs2tE9kRY,51676 +transformers/models/deprecated/__init__.py,sha256=upBgfMVSzFMxNZYSd4AXNGvd0IkwHZ-ygfdf34srafo,1596 +transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/bort/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/deta/__init__.py,sha256=WNvQWU-pO4wBtGZPE5TAHF0OF1RPjEIgql1GC9wnmf8,1032 +transformers/models/deprecated/deta/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/deta/__pycache__/configuration_deta.cpython-310.pyc,, +transformers/models/deprecated/deta/__pycache__/image_processing_deta.cpython-310.pyc,, +transformers/models/deprecated/deta/__pycache__/modeling_deta.cpython-310.pyc,, +transformers/models/deprecated/deta/configuration_deta.py,sha256=Hvr4QZAIruQw-s7C2JarGwAzrXzqg8FU40Co6g7Hhlc,14198 +transformers/models/deprecated/deta/image_processing_deta.py,sha256=HnnwUSDq_e5aIugcapay6VaE7qVWqQenj0W4Nk64-9M,54964 +transformers/models/deprecated/deta/modeling_deta.py,sha256=9tphDXNvMmTTvslFPnn5kx9Yb7Q6EE6s_QWiNP-aJrU,135354 +transformers/models/deprecated/efficientformer/__init__.py,sha256=RIMtCzn7AGYDfv279AZxapQ7tM7FFguknlC5CShrV3M,1112 +transformers/models/deprecated/efficientformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc,, +transformers/models/deprecated/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc,, +transformers/models/deprecated/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc,, +transformers/models/deprecated/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc,, +transformers/models/deprecated/efficientformer/configuration_efficientformer.py,sha256=liR9COZM1WNt1Cp7LtY0hBm4HQxcFcTSML5Sokq8Jwc,7739 +transformers/models/deprecated/efficientformer/image_processing_efficientformer.py,sha256=6U_fds9OmhfaRV3Oz21sSMjD370AM_mQ9JA11O3DzYw,15772 +transformers/models/deprecated/efficientformer/modeling_efficientformer.py,sha256=L70Z_gB2hgmxfjkYezlKRbQbOZnKdoqUChXAkc12wlA,33763 +transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py,sha256=VBiFapA65P5jM1IblTLuZDWStd609KRYM1g5eRwrf1A,49408 +transformers/models/deprecated/ernie_m/__init__.py,sha256=LlPR0I3qUe-L3t0xeakW3FKohvQgcWBgRMxENjy6_Ew,1047 +transformers/models/deprecated/ernie_m/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc,, +transformers/models/deprecated/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc,, +transformers/models/deprecated/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc,, +transformers/models/deprecated/ernie_m/configuration_ernie_m.py,sha256=jPmSWmo38ovIiyrzIcHvnj-CdbnzqkogCTcCkPPsf0o,5889 +transformers/models/deprecated/ernie_m/modeling_ernie_m.py,sha256=kIup5qyTCvw1dAmPoWZ8n0TvKW-0j0F7yNs-bixQEgM,47199 +transformers/models/deprecated/ernie_m/tokenization_ernie_m.py,sha256=se3eYEzFrfa1Z_Xnla9l7c4WsXBeF4gePXVh0jf81K4,16250 +transformers/models/deprecated/gptsan_japanese/__init__.py,sha256=Q0KI_MuMRbQNKBzYOEsDgNZLktGUjTUWlm-1-TmdAeE,1061 +transformers/models/deprecated/gptsan_japanese/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc,, +transformers/models/deprecated/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc,, +transformers/models/deprecated/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc,, +transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py,sha256=UQbzr2Yr4EkjZgUE0hj8HYuelQDZZi0b7nw_vwtOvvk,7169 +transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py,sha256=8yTwr1pjXAgo27_3J9OL7MKvLFSHIOG34j_PUu5slw8,64863 +transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py,sha256=5SgFlrtT8qCXUay6paKiEIW8h6bh9NN9rq5BJ-iScMI,23347 +transformers/models/deprecated/graphormer/__init__.py,sha256=qvmWWqa8KkAItGYVAHgjatAQlmjcF0bovLch0U0ubc8,1003 +transformers/models/deprecated/graphormer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/graphormer/__pycache__/collating_graphormer.cpython-310.pyc,, +transformers/models/deprecated/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc,, +transformers/models/deprecated/graphormer/__pycache__/modeling_graphormer.cpython-310.pyc,, +transformers/models/deprecated/graphormer/algos_graphormer.pyx,sha256=b_Qlm1hKCHnAqx6oOLGC9LkivAV0K_AZRGgXT9MmBas,3635 +transformers/models/deprecated/graphormer/collating_graphormer.py,sha256=DFDjr0s7B_hG4J3foSOQDoy5mFMsrsK1MCTq_HwXA-I,6088 +transformers/models/deprecated/graphormer/configuration_graphormer.py,sha256=vg6O_wY-Xn_aTVTg5XTYqNREozAomSLCHVo_diH9Pas,10480 +transformers/models/deprecated/graphormer/modeling_graphormer.py,sha256=QbQNmiKzedpILarXhoFiZR9McA29QJ1w_eJ2DVaMdpQ,37118 +transformers/models/deprecated/jukebox/__init__.py,sha256=5boFy1Eld2ll-ZpGhar77TZp4gVN5m-Ks8QumIZeAcI,1037 +transformers/models/deprecated/jukebox/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc,, +transformers/models/deprecated/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc,, +transformers/models/deprecated/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc,, +transformers/models/deprecated/jukebox/configuration_jukebox.py,sha256=etr-yMp1t851E6omXV9ZrZdruQL7NkA1lifBu9h6f6A,26837 +transformers/models/deprecated/jukebox/modeling_jukebox.py,sha256=TsKdCCiOrpbuoEc2cDZV518JyOgV7mkoFUobjcFr0VA,119621 +transformers/models/deprecated/jukebox/tokenization_jukebox.py,sha256=bwpU60IQUNrPoZ0Mg4cobVe9_04tnda-6KU0koLpAC0,17366 +transformers/models/deprecated/mctct/__init__.py,sha256=oL2eRCmC1eKqGcN2nn7WWmVh4Lyq6zvfTK8Fbcct-Cc,1073 +transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc,, +transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc,, +transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc,, +transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc,, +transformers/models/deprecated/mctct/configuration_mctct.py,sha256=se5nTkdBsiWJT9eGIbsAju4olV_f-GddUDtba3HUSEk,9101 +transformers/models/deprecated/mctct/feature_extraction_mctct.py,sha256=n6JOh7Mp6IqfGTTZKpNJx9EDyatyoxP5DMFhvEkojO8,13492 +transformers/models/deprecated/mctct/modeling_mctct.py,sha256=w2slXI67ndyGsWRCk9SQ6MyO0Nd3B4dLTe6ggeOIYXE,32550 +transformers/models/deprecated/mctct/processing_mctct.py,sha256=H7zMpvwQ_cjGwBl9NjkplynPo8L-uhY4zaCh9iq2JUc,5962 +transformers/models/deprecated/mega/__init__.py,sha256=MAxMoZtbT_fdVUYgMGeBlgwYRYVz07EeK5RyL2GB-ic,991 +transformers/models/deprecated/mega/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/mega/__pycache__/configuration_mega.cpython-310.pyc,, +transformers/models/deprecated/mega/__pycache__/modeling_mega.cpython-310.pyc,, +transformers/models/deprecated/mega/configuration_mega.py,sha256=nWdk-zPvpSI7UmUeXnClAwk-hEXCM5f5vt4xxJyAE_E,12642 +transformers/models/deprecated/mega/modeling_mega.py,sha256=DYDjSv0S2qOqgvEDtUdtyHzajYC2XKnCCVpXRlFvlvo,109441 +transformers/models/deprecated/mmbt/__init__.py,sha256=X5f5OKVKnz-mOSV_v9IbfPsDFzOpYCf2yU4ktLWWmOA,991 +transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc,, +transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc,, +transformers/models/deprecated/mmbt/configuration_mmbt.py,sha256=UNksVsSmP6e_52vlf5pa9ETgiQw6M2pM2ocVxq52fWY,1624 +transformers/models/deprecated/mmbt/modeling_mmbt.py,sha256=xil7uW5Q1fHo-0yo4eC0K6egN-sO0LasqBxe2wp9nTE,18983 +transformers/models/deprecated/nat/__init__.py,sha256=Ggl4KcqVEX5Ub66NyyA7fyMz_oBLHOMUlqRTVrYwAYs,989 +transformers/models/deprecated/nat/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/nat/__pycache__/configuration_nat.cpython-310.pyc,, +transformers/models/deprecated/nat/__pycache__/modeling_nat.cpython-310.pyc,, +transformers/models/deprecated/nat/configuration_nat.py,sha256=7ZZXfsex0BfTQ5HMdINit2aAC1j_6me30ctX4IDM35o,7001 +transformers/models/deprecated/nat/modeling_nat.py,sha256=ykeIInNhtHJ5JYit22j5sk5ND2fbQwcHNcvMLSGSPDA,39841 +transformers/models/deprecated/nezha/__init__.py,sha256=3WxwqDdNckh4KfXKV4gxIeKvkr_U1GBDA-MdEHux3JM,993 +transformers/models/deprecated/nezha/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/nezha/__pycache__/configuration_nezha.cpython-310.pyc,, +transformers/models/deprecated/nezha/__pycache__/modeling_nezha.cpython-310.pyc,, +transformers/models/deprecated/nezha/configuration_nezha.py,sha256=gZvb3NVibiLmMrTrjzlKcChmBET6dOhyAQCjFFDyp0Y,4845 +transformers/models/deprecated/nezha/modeling_nezha.py,sha256=KYidlGB1Aa9P8VdUgVzHqDF0D6ZJSiRDaCoMcnRhP24,73783 +transformers/models/deprecated/open_llama/__init__.py,sha256=hhWBBxouawhwSYkuWi7Co_dO86xNFofKrtxacOlcmiM,1023 +transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc,, +transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc,, +transformers/models/deprecated/open_llama/configuration_open_llama.py,sha256=Iwpxsxa85jUukrVctVnhz1zOswDR889ZYcXbW6-bxuA,7800 +transformers/models/deprecated/open_llama/modeling_open_llama.py,sha256=mWbX7s6MlxpO-txXdhtNJyz2z1shBtbjtGl6PilnNE8,42632 +transformers/models/deprecated/qdqbert/__init__.py,sha256=0sVNCbOvGXfJhrGbtQ7zV4v8rctY5pMzYKUvngVcvRg,1020 +transformers/models/deprecated/qdqbert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc,, +transformers/models/deprecated/qdqbert/__pycache__/modeling_qdqbert.cpython-310.pyc,, +transformers/models/deprecated/qdqbert/configuration_qdqbert.py,sha256=HCvSo5NpospAUVinJu8NEGtDo4Oa2KHQX-_1kTkFA6g,5719 +transformers/models/deprecated/qdqbert/modeling_qdqbert.py,sha256=Gfuw-tq7W442wi_oacfR4AuyoxwFkDAr29lZxy593T4,76658 +transformers/models/deprecated/realm/__init__.py,sha256=Cqg86mvi125eaBzeoP10ykpvXvHD-InC6JYTDJXM3Ik,1109 +transformers/models/deprecated/realm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/realm/__pycache__/configuration_realm.cpython-310.pyc,, +transformers/models/deprecated/realm/__pycache__/modeling_realm.cpython-310.pyc,, +transformers/models/deprecated/realm/__pycache__/retrieval_realm.cpython-310.pyc,, +transformers/models/deprecated/realm/__pycache__/tokenization_realm.cpython-310.pyc,, +transformers/models/deprecated/realm/__pycache__/tokenization_realm_fast.cpython-310.pyc,, +transformers/models/deprecated/realm/configuration_realm.py,sha256=1CmnEKCJyYUmedP5pXcvLwrT2ThND3YHeuHfaEokz3M,7585 +transformers/models/deprecated/realm/modeling_realm.py,sha256=RPWqkWaE7P8yh-26lo-qhaKHl_-c9UpFIVtlRR5JWcg,83315 +transformers/models/deprecated/realm/retrieval_realm.py,sha256=bGzuAOl8j59toVMwzUHZbpKkNBuAeP-qo1kg8Wdh0q8,7012 +transformers/models/deprecated/realm/tokenization_realm.py,sha256=c_H4sBrcnU_bXnQZrHyPuPRvMJx_7Coc3jB0Skkyxzs,21995 +transformers/models/deprecated/realm/tokenization_realm_fast.py,sha256=vzueU81dDD3DLcdK5nhZIRJH6avUkPAB2lJ5e8F9jbA,9858 +transformers/models/deprecated/retribert/__init__.py,sha256=bitEp-fOvn6_HvMY2CUlvJCGV5-baV6Bvl7EcbBh1jM,1090 +transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc,, +transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc,, +transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc,, +transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc,, +transformers/models/deprecated/retribert/configuration_retribert.py,sha256=nAqsKCL46N2eJwlcyfDs2ijqCWo7SDsOInq4rOsDAhs,5232 +transformers/models/deprecated/retribert/modeling_retribert.py,sha256=PzP_j4PTRb0wLpYvS3A6D9CjRFNT-gc99MZos1LwBR8,9349 +transformers/models/deprecated/retribert/tokenization_retribert.py,sha256=Uo7UQxxwhM-SonyGQtjXvpTZK7a9FLs2UTENCOXYIfo,19536 +transformers/models/deprecated/retribert/tokenization_retribert_fast.py,sha256=EXkSqeK6zEF8yng9UPSXjrxKaTHnhyGHWqlRugUsEYQ,6730 +transformers/models/deprecated/speech_to_text_2/__init__.py,sha256=gpV3g4cmZOc1rTvOVZQN9dY1eGoXQvVnSq0LMzYYJm0,1111 +transformers/models/deprecated/speech_to_text_2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/speech_to_text_2/__pycache__/configuration_speech_to_text_2.cpython-310.pyc,, +transformers/models/deprecated/speech_to_text_2/__pycache__/modeling_speech_to_text_2.cpython-310.pyc,, +transformers/models/deprecated/speech_to_text_2/__pycache__/processing_speech_to_text_2.cpython-310.pyc,, +transformers/models/deprecated/speech_to_text_2/__pycache__/tokenization_speech_to_text_2.cpython-310.pyc,, +transformers/models/deprecated/speech_to_text_2/configuration_speech_to_text_2.py,sha256=0nf8Vrheuc_3fpZhc3fNFfd05fXLfKvCe6n9B5hAcNA,6052 +transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py,sha256=1_Aq5i_BwHCi0uZ8LbpqR9RPISZ4Tq_qaQDy4JzswkE,43222 +transformers/models/deprecated/speech_to_text_2/processing_speech_to_text_2.py,sha256=24rIZ8aH2NxHMa9ZDleVpVrKUeOF1yJao5983ZxsBG4,4830 +transformers/models/deprecated/speech_to_text_2/tokenization_speech_to_text_2.py,sha256=vTNJBHuH4CwO6SvtOgx24mWtZzjjdHpY1ee7TZw1XxQ,8424 +transformers/models/deprecated/tapex/__init__.py,sha256=YDgKE4wAmqYPQ9U94PaZXHGDiLkZQAoMt4mNiO3QrXg,958 +transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc,, +transformers/models/deprecated/tapex/tokenization_tapex.py,sha256=mrRkwgPKHyVUpOKYSI12x9_Pgr3Yyv8uWcePFxKWuI8,64396 +transformers/models/deprecated/trajectory_transformer/__init__.py,sha256=qhJ78kxJOG5Q5d_NDrIiH5_btuaAKfluEzKD_nuESPw,1027 +transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc,, +transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc,, +transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py,sha256=EEkSTX_sw2eYmNqYutj1acxjZnlM-jFldEtqycOwJko,7105 +transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py,sha256=g5IJaL5QZ1ZDza3GDjD5MpqRV0J2TKgs--PLMOAbqgI,25472 +transformers/models/deprecated/transfo_xl/__init__.py,sha256=_wXu1dOeNxgJemZTynDRPmYOWcMQpYwkxbHIC_070_M,1088 +transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc,, +transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc,, +transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc,, +transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc,, +transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc,, +transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc,, +transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py,sha256=5lUsNUzrmw3wWw_35s4TQgXQhf7C-QovFDkLs7R74Io,7905 +transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py,sha256=j4yoV9YRQ6xroWV1XXTIYH46EEKXgFA9_x8YIIGu76Q,46054 +transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py,sha256=Dlv3ZzRduWFBnZZHn8RegbW45XeCecuYCzzzZC3bDXs,7633 +transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py,sha256=yBTvTAF0qBlqPU5U4mQ9J733wzzYmfibMgTlgnya1U8,56094 +transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py,sha256=L1l4K7sj8rwXzvhn7_-RK2UbOnYtfDUF0VdFr4L8nxA,10859 +transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py,sha256=PcUEB2E_ZkrCDiyWhxtVaHC3oa6DHEBQfja72VZ65bA,32193 +transformers/models/deprecated/tvlt/__init__.py,sha256=5kgH30TJlq9WEsRw6f5Bs7U4MxNFKzw2dvlENb-ZsPM,674 +transformers/models/deprecated/tvlt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/tvlt/__pycache__/configuration_tvlt.cpython-310.pyc,, +transformers/models/deprecated/tvlt/__pycache__/feature_extraction_tvlt.cpython-310.pyc,, +transformers/models/deprecated/tvlt/__pycache__/image_processing_tvlt.cpython-310.pyc,, +transformers/models/deprecated/tvlt/__pycache__/modeling_tvlt.cpython-310.pyc,, +transformers/models/deprecated/tvlt/__pycache__/processing_tvlt.cpython-310.pyc,, +transformers/models/deprecated/tvlt/configuration_tvlt.py,sha256=PaJXPttCw4t832Pqo1pV0MBYa9f-oDljfmc2SBMFXCI,8650 +transformers/models/deprecated/tvlt/feature_extraction_tvlt.py,sha256=DnmB8RIloaTfmfYIKTL-9hMGuVtWMgwazIJihx-6dxc,10591 +transformers/models/deprecated/tvlt/image_processing_tvlt.py,sha256=zwfD6pq_rpAKl0Ns_XIlmsNXVMrfPZ8KPzy4a_PMHT4,20296 +transformers/models/deprecated/tvlt/modeling_tvlt.py,sha256=lYo_4VgLoKtIcGmwH5Q5hMg-tyA0vKOrrZWoYDBhTKc,56305 +transformers/models/deprecated/tvlt/processing_tvlt.py,sha256=YJ_YbqLKY3l34sbomN9U57z_CjhCKC411QBqy1zSNJs,3537 +transformers/models/deprecated/van/__init__.py,sha256=zH2jgRuTkGqz0fzogoEi1HhRMNmg8BbWjhZ9dwVWyM0,989 +transformers/models/deprecated/van/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/van/__pycache__/configuration_van.cpython-310.pyc,, +transformers/models/deprecated/van/__pycache__/modeling_van.cpython-310.pyc,, +transformers/models/deprecated/van/configuration_van.py,sha256=fTfaChmHuw2qoi_mZxIQxUH7JoVRBdkA38R_qPbrc3E,4683 +transformers/models/deprecated/van/modeling_van.py,sha256=AwO4-4nC0WktnXGICoO5HbuAKEVX6rMCUHQyBeRO0wc,21234 +transformers/models/deprecated/vit_hybrid/__init__.py,sha256=9OIBt-kLfL3VHtfpoj3rVLFzXbpwFu1F5QotHqQAUuM,1050 +transformers/models/deprecated/vit_hybrid/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/vit_hybrid/__pycache__/configuration_vit_hybrid.cpython-310.pyc,, +transformers/models/deprecated/vit_hybrid/__pycache__/image_processing_vit_hybrid.cpython-310.pyc,, +transformers/models/deprecated/vit_hybrid/__pycache__/modeling_vit_hybrid.cpython-310.pyc,, +transformers/models/deprecated/vit_hybrid/configuration_vit_hybrid.py,sha256=Eof6B__quu-2A2SDw7erhD6goglbdVsgB3wqJGEp9WE,8477 +transformers/models/deprecated/vit_hybrid/image_processing_vit_hybrid.py,sha256=LSZhAqrSMnyD-6z-aGFumKc_YeWwgwgC-tl74e9SnyU,16347 +transformers/models/deprecated/vit_hybrid/modeling_vit_hybrid.py,sha256=JCcbErRV1fNL-ENnf0vGb85cyjnu4SOl_HjG-7wTaQg,32420 +transformers/models/deprecated/xlm_prophetnet/__init__.py,sha256=q9zJIXoPqoGPw0x9PQXvpTrpSCw1y1WyYoNCFz-X554,1058 +transformers/models/deprecated/xlm_prophetnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/deprecated/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc,, +transformers/models/deprecated/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc,, +transformers/models/deprecated/xlm_prophetnet/__pycache__/tokenization_xlm_prophetnet.cpython-310.pyc,, +transformers/models/deprecated/xlm_prophetnet/configuration_xlm_prophetnet.py,sha256=eFMoiTH5qFKL6R0aLhV9-uqUvB2oVVcgrAia6wuLnAY,8968 +transformers/models/deprecated/xlm_prophetnet/modeling_xlm_prophetnet.py,sha256=ttnBHBgwOMlbeQU6urNmkKZdplY72ISgEGd-vCwt9VE,114219 +transformers/models/deprecated/xlm_prophetnet/tokenization_xlm_prophetnet.py,sha256=5m053u9Y9NEbmCUJlhprGGdC8ZrXy9VkZF3UGg3fsmo,13153 +transformers/models/depth_anything/__init__.py,sha256=Jbd8LXt-fU3_cTF7jBrkBBw-Kzscv6o7O0YiZy0R8-A,1009 +transformers/models/depth_anything/__pycache__/__init__.cpython-310.pyc,, +transformers/models/depth_anything/__pycache__/configuration_depth_anything.cpython-310.pyc,, +transformers/models/depth_anything/__pycache__/modeling_depth_anything.cpython-310.pyc,, +transformers/models/depth_anything/configuration_depth_anything.py,sha256=s_lwOMcrASuIud4jMw8rBn54vNn2zBzb1RTVVm_1KDU,8189 +transformers/models/depth_anything/modeling_depth_anything.py,sha256=_nNAnZwSPqgXq0wXwlTIdWrRMevkFx_gTGzt883Ip4Y,16684 +transformers/models/depth_pro/__init__.py,sha256=5R4N4IVUQuK8bCFtg9qGvJFceJaHXNj4HdCWkcsyELc,1096 +transformers/models/depth_pro/__pycache__/__init__.cpython-310.pyc,, +transformers/models/depth_pro/__pycache__/configuration_depth_pro.cpython-310.pyc,, +transformers/models/depth_pro/__pycache__/image_processing_depth_pro.cpython-310.pyc,, +transformers/models/depth_pro/__pycache__/image_processing_depth_pro_fast.cpython-310.pyc,, +transformers/models/depth_pro/__pycache__/modeling_depth_pro.cpython-310.pyc,, +transformers/models/depth_pro/configuration_depth_pro.py,sha256=s6k1XPk6kjjT_nMBTB2pO7sxYuxC5ZdHB1697Wu7dJs,10715 +transformers/models/depth_pro/image_processing_depth_pro.py,sha256=GTpvelj8rKZDmEQ9OL7zzVTzDvWA1C-b-O5qyZQZwkU,18952 +transformers/models/depth_pro/image_processing_depth_pro_fast.py,sha256=H_0Ta-dtsYv0nX17bwP9A9hGfdicUP79l2ePq0JKJ5I,6947 +transformers/models/depth_pro/modeling_depth_pro.py,sha256=H8MZ8hN9TD2zTCR1_bSkF8olNAyRw4qpWGWFNDeiTbQ,43092 +transformers/models/detr/__init__.py,sha256=YEWZnoCCgWt4KZNfbSi-v4KNDOJT2-ii2sxanyVDkvY,1120 +transformers/models/detr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc,, +transformers/models/detr/__pycache__/feature_extraction_detr.cpython-310.pyc,, +transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc,, +transformers/models/detr/__pycache__/image_processing_detr_fast.cpython-310.pyc,, +transformers/models/detr/__pycache__/modeling_detr.cpython-310.pyc,, +transformers/models/detr/configuration_detr.py,sha256=wSdE-pszgelpLPMBREhmjXk77t8sPYthn9Cj4Qo02Y8,13918 +transformers/models/detr/feature_extraction_detr.py,sha256=VudvO9SXjwtxL9PPT8vM3vFKcpiOGOe6Mt8zbZuIV1I,1586 +transformers/models/detr/image_processing_detr.py,sha256=szTS50ZKG0sZHKe1c66sAga7ORp-EZ5WhNMIvX614bY,94088 +transformers/models/detr/image_processing_detr_fast.py,sha256=qBiqOfvNOZmH6vtcHv_sXagwYmehecn4w1-5V33R11s,59745 +transformers/models/detr/modeling_detr.py,sha256=mMW5P2vEGmzPbgUTNoWXIlgIBr7AtgCmHg2DqA-E5iY,77631 +transformers/models/dia/__init__.py,sha256=fvBcwJ7FAFDO6RNyUUMGrdSlUtowciNo3YYv7R2Qz1c,1133 +transformers/models/dia/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dia/__pycache__/configuration_dia.cpython-310.pyc,, +transformers/models/dia/__pycache__/feature_extraction_dia.cpython-310.pyc,, +transformers/models/dia/__pycache__/generation_dia.cpython-310.pyc,, +transformers/models/dia/__pycache__/modeling_dia.cpython-310.pyc,, +transformers/models/dia/__pycache__/modular_dia.cpython-310.pyc,, +transformers/models/dia/__pycache__/processing_dia.cpython-310.pyc,, +transformers/models/dia/__pycache__/tokenization_dia.cpython-310.pyc,, +transformers/models/dia/configuration_dia.py,sha256=6rARpGLt2jq6fUInltwlEzORfqT-Yt-DdU57qrHy1P0,20607 +transformers/models/dia/feature_extraction_dia.py,sha256=B4cSUOa-nXRyzZEPcJxkhlT52pTAt3ys4xQ3-EJWEo4,8503 +transformers/models/dia/generation_dia.py,sha256=lB35jIqH7vxAKR9zaunyi1F0mnJ0gUMzeb8L3VfMo6U,21821 +transformers/models/dia/modeling_dia.py,sha256=t3B8ZaGvodoYgQsPv8xDIlOvbq6gSe_Cj_N36rWXfEs,42391 +transformers/models/dia/modular_dia.py,sha256=m_QltzooNLg_YoNlWeJri5oUxS4VhDyzKlEhWRprfcw,33435 +transformers/models/dia/processing_dia.py,sha256=qsB6oHO-WNwqhfTLI-cXIhEz9SuZ84kCZWPolQffSj0,20461 +transformers/models/dia/tokenization_dia.py,sha256=O4dTQjoErcFLHF5IbLHp2IdlPj_8POhLfkiRr0p0BNI,4511 +transformers/models/dialogpt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/diffllama/__init__.py,sha256=Yosk5eQ82PblntLff-bL3pfJZ-AVKp5jbQK5R2SLVc8,1004 +transformers/models/diffllama/__pycache__/__init__.cpython-310.pyc,, +transformers/models/diffllama/__pycache__/configuration_diffllama.cpython-310.pyc,, +transformers/models/diffllama/__pycache__/modeling_diffllama.cpython-310.pyc,, +transformers/models/diffllama/__pycache__/modular_diffllama.cpython-310.pyc,, +transformers/models/diffllama/configuration_diffllama.py,sha256=SQr6FM8h6EQCI-NNYYksvK0JSy2WN8q6aGQNDFbJAgA,10688 +transformers/models/diffllama/modeling_diffllama.py,sha256=lDogNzW2s60hBMYQL0S8vi9Z_7uj1qaw-BCrApVeZeM,35130 +transformers/models/diffllama/modular_diffllama.py,sha256=pXXM4bZD7EicjaCqrMYCMM1ZmCtXeJdNi-RhKpaxnNI,20373 +transformers/models/dinat/__init__.py,sha256=N0HykajUSY5KsvPQNUxc8jAuuJntmDJ-Dz8Qa8_sJ9E,991 +transformers/models/dinat/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dinat/__pycache__/configuration_dinat.cpython-310.pyc,, +transformers/models/dinat/__pycache__/modeling_dinat.cpython-310.pyc,, +transformers/models/dinat/configuration_dinat.py,sha256=fhGXUqRCEkTgWL6rpPUF7J-W7usE4e7gl3DS_J99wMc,7356 +transformers/models/dinat/modeling_dinat.py,sha256=puqCPPJJoKKLD6cl6EU0lvH_I48iVO9fUYytuaWF36w,34939 +transformers/models/dinov2/__init__.py,sha256=fDyp5N-KcJzO-vUeT3fZA8UbC21FfGEhDOlYNvXHHDc,1033 +transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc,, +transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc,, +transformers/models/dinov2/__pycache__/modeling_flax_dinov2.cpython-310.pyc,, +transformers/models/dinov2/configuration_dinov2.py,sha256=020F55Jhk4nwbpzxfDxzi77Poe-5OpuGZ-f-mxEDYFg,8291 +transformers/models/dinov2/modeling_dinov2.py,sha256=6V2-1AeN9msv3sjRU0XonnvRhS5rsFdWQigNeBY0t2Y,33624 +transformers/models/dinov2/modeling_flax_dinov2.py,sha256=dfmeUz3KQ9d7eoX0X0QqJPKLHZv7c8GOrT2VF9_g7zk,31050 +transformers/models/dinov2_with_registers/__init__.py,sha256=s0cefgSRnlIVcdZYV0qz3Q9X3IEChU7mkGbbnr2IH6E,1023 +transformers/models/dinov2_with_registers/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dinov2_with_registers/__pycache__/configuration_dinov2_with_registers.cpython-310.pyc,, +transformers/models/dinov2_with_registers/__pycache__/modeling_dinov2_with_registers.cpython-310.pyc,, +transformers/models/dinov2_with_registers/__pycache__/modular_dinov2_with_registers.cpython-310.pyc,, +transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py,sha256=Jv2lhwSt3lSvN8BQuhsK6rmW1IKtSqm4un5Qsvdm6DI,8633 +transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py,sha256=CzOF-6k-V9TsBznvM-R75KPJln0P9PyXIAwPRrtNBG8,35833 +transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py,sha256=g5SrRwqglPO9wdKAPmVqzBGSnWoeKMtmHfF01Meu2eA,21882 +transformers/models/distilbert/__init__.py,sha256=dKwCe9QsyAaNsdUJFMUa-vcuHPSQSuLKFoFBvK3cLEY,1178 +transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc,, +transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc,, +transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc,, +transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc,, +transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc,, +transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc,, +transformers/models/distilbert/configuration_distilbert.py,sha256=h2rBKH_a_aEdRDo-5JEHAbVwUf1-6Sy6xpNRjdLvhnE,6055 +transformers/models/distilbert/modeling_distilbert.py,sha256=cDxXicUpdfta3tn79Umlw-A0-GXuRRhrgySV_GlZJpQ,57139 +transformers/models/distilbert/modeling_flax_distilbert.py,sha256=U0jH7nehL1vEt1gPTDwA882r8erT6Ol_QnOhgv7owro,32922 +transformers/models/distilbert/modeling_tf_distilbert.py,sha256=tOX6XGcxJXcy3lPurE3r0pRjX0U4TMWExb-EQbFc4eA,48995 +transformers/models/distilbert/tokenization_distilbert.py,sha256=hpvjVFbpRKQ1rFKW9QoGzUf2Ds3DAYLe_ZRLWi2EdCo,20999 +transformers/models/distilbert/tokenization_distilbert_fast.py,sha256=GfYGXroocFvwRahr1KHTowy-qtYwoqeqmt31MWlf2E0,6827 +transformers/models/dit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +transformers/models/dit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/doge/__init__.py,sha256=SU7Ea0BtJ8psgsDda2RcXdb1466OYN0l5IK7aOiuanY,1024 +transformers/models/doge/__pycache__/__init__.cpython-310.pyc,, +transformers/models/doge/__pycache__/configuration_doge.cpython-310.pyc,, +transformers/models/doge/__pycache__/modeling_doge.cpython-310.pyc,, +transformers/models/doge/__pycache__/modular_doge.cpython-310.pyc,, +transformers/models/doge/configuration_doge.py,sha256=ru7TCBPp-DU7sEJj91O_LDVlmDOIG1dANiUIChhBe7k,13906 +transformers/models/doge/modeling_doge.py,sha256=1TfTq_ReRwOGlkGTkQUc6odQTUtBhVomq_KRmR9g_hU,36111 +transformers/models/doge/modular_doge.py,sha256=AjO5fGiJOiUzDqZgGJeZWf2tUVNPFgMCkgGTk6YXKeM,37037 +transformers/models/donut/__init__.py,sha256=O1JOQtPxtjcxSfWgC_PUZkmfvcKzjPgNAakujNra1PA,1170 +transformers/models/donut/__pycache__/__init__.cpython-310.pyc,, +transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc,, +transformers/models/donut/__pycache__/feature_extraction_donut.cpython-310.pyc,, +transformers/models/donut/__pycache__/image_processing_donut.cpython-310.pyc,, +transformers/models/donut/__pycache__/image_processing_donut_fast.cpython-310.pyc,, +transformers/models/donut/__pycache__/modeling_donut_swin.cpython-310.pyc,, +transformers/models/donut/__pycache__/processing_donut.cpython-310.pyc,, +transformers/models/donut/configuration_donut_swin.py,sha256=mHg0P4MRxMOw_IsHKFtBIuSuuY0tINGN3FmUImMqST8,5785 +transformers/models/donut/feature_extraction_donut.py,sha256=JfpHRB_aTYyBkySWUgofHHwGxIA8hpaS8NilnFsgIAU,1292 +transformers/models/donut/image_processing_donut.py,sha256=IlcGmiUtmw4WbiIFNfmx4-6hpxfRUCYP0A4vydTHyDQ,22469 +transformers/models/donut/image_processing_donut_fast.py,sha256=W9qwI8vceBSJonSm1_gs1T0ct2BjJIMTnDxKS8VALVY,10653 +transformers/models/donut/modeling_donut_swin.py,sha256=koP6hyXSj5jywJRfleWOv_iRV-gTtOhcR8fF22nM_wE,45759 +transformers/models/donut/processing_donut.py,sha256=jc_ZsmudftkQs7VH1-2XkAZgnsrmweSf4KDkN0mI7ow,9220 +transformers/models/dots1/__init__.py,sha256=A2jXARtNWOrbWAW2SIrsvvydm2_2keRyUBbNEz0By-I,991 +transformers/models/dots1/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dots1/__pycache__/configuration_dots1.cpython-310.pyc,, +transformers/models/dots1/__pycache__/modeling_dots1.cpython-310.pyc,, +transformers/models/dots1/__pycache__/modular_dots1.cpython-310.pyc,, +transformers/models/dots1/configuration_dots1.py,sha256=5sZgR6-n_wmbpEd6_DBNXf-GQT7haPQdEsgYyTMLbIY,10080 +transformers/models/dots1/modeling_dots1.py,sha256=oBSAvksffNdmQEOsPOisFSX2RmGeqbUXJwYHIXG-nUc,27372 +transformers/models/dots1/modular_dots1.py,sha256=8XKPMQTryHp4hfezgNwgdsM0Xn83wWwv5vzmU4ZxgxM,3260 +transformers/models/dpr/__init__.py,sha256=z4FocLkQ_ckWtBZctTh-aeV1haJJY-lXF0ZRKuVbVkc,1099 +transformers/models/dpr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dpr/__pycache__/configuration_dpr.cpython-310.pyc,, +transformers/models/dpr/__pycache__/modeling_dpr.cpython-310.pyc,, +transformers/models/dpr/__pycache__/modeling_tf_dpr.cpython-310.pyc,, +transformers/models/dpr/__pycache__/tokenization_dpr.cpython-310.pyc,, +transformers/models/dpr/__pycache__/tokenization_dpr_fast.cpython-310.pyc,, +transformers/models/dpr/configuration_dpr.py,sha256=-DGEGi7rH0bSlWSh2WCFvB6cdZ6bJ8kO3u8xvhDS8mk,6432 +transformers/models/dpr/modeling_dpr.py,sha256=L0Kl_E2Cu740zGQPZCT7wpmLBECZNGaS3V3DzRN2a7A,22826 +transformers/models/dpr/modeling_tf_dpr.py,sha256=VpzB67gC0sMnnuVcAxUhHxxj-Lxjnf658LIOFvhKLRc,33857 +transformers/models/dpr/tokenization_dpr.py,sha256=KuNEnPczArBcq36g3p_eueA2Z5AG4Y7vtgqMFHstzE4,15834 +transformers/models/dpr/tokenization_dpr_fast.py,sha256=ClKoqvgOkO4nnQX8R0KwwMTl5gZhh1VLMURD_9nEN-o,16215 +transformers/models/dpt/__init__.py,sha256=7i4wNFCo8NTFsJQi7TO9u0VGxpZ8xg1_QNvD862jNk4,1114 +transformers/models/dpt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/dpt/__pycache__/configuration_dpt.cpython-310.pyc,, +transformers/models/dpt/__pycache__/feature_extraction_dpt.cpython-310.pyc,, +transformers/models/dpt/__pycache__/image_processing_dpt.cpython-310.pyc,, +transformers/models/dpt/__pycache__/image_processing_dpt_fast.cpython-310.pyc,, +transformers/models/dpt/__pycache__/modeling_dpt.cpython-310.pyc,, +transformers/models/dpt/__pycache__/modular_dpt.cpython-310.pyc,, +transformers/models/dpt/configuration_dpt.py,sha256=jCbSEFTvEZ5V7iHmjL_SxTZ9azTI4N2qEcd9CcGxPGo,14909 +transformers/models/dpt/feature_extraction_dpt.py,sha256=oCMnm3Pf3cDqtuENmJyqiT0F6OOFKKC5AjwldSpx7t8,1276 +transformers/models/dpt/image_processing_dpt.py,sha256=VUTVk0cd4riKrr8Nle4lftL3RGnOgvLc_Id1AgPrXAk,31707 +transformers/models/dpt/image_processing_dpt_fast.py,sha256=Q5y6_Qc4XHD_yBaO63aaqrfC2SJpBv5SE0C1Qmtv3RI,17606 +transformers/models/dpt/modeling_dpt.py,sha256=RL-HARIOGje3Eu7mSprMG3M0kY78E0JQwHn8Sp_SNe4,54852 +transformers/models/dpt/modular_dpt.py,sha256=4Lyd8XTEvlF123LGl5n7zWid9fGyupgGab1xKu4HPQs,12357 +transformers/models/efficientloftr/__init__.py,sha256=KLzDB5R1BeMf0LOP0Ypp-RZhDrtn7RhjGk_UfAoq2O0,1060 +transformers/models/efficientloftr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/efficientloftr/__pycache__/configuration_efficientloftr.cpython-310.pyc,, +transformers/models/efficientloftr/__pycache__/image_processing_efficientloftr.cpython-310.pyc,, +transformers/models/efficientloftr/__pycache__/modeling_efficientloftr.cpython-310.pyc,, +transformers/models/efficientloftr/configuration_efficientloftr.py,sha256=EGFjtJ-uo99Yq14qqnI7sD76W6qrrDC6Y7fyX5uYHRA,10714 +transformers/models/efficientloftr/image_processing_efficientloftr.py,sha256=zaQwY0TonpYoXe7HrvnDWId7GPsg32j7FCoHHDb4ttI,21489 +transformers/models/efficientloftr/modeling_efficientloftr.py,sha256=-ST4nly8ODwZPP-ynmplVyRF2h-7eMsbfayiW76Lf90,57676 +transformers/models/efficientnet/__init__.py,sha256=0wxLCBxWBCh8uj4nH1syYJ76kRvUlMS6EUN5E2L2Qwc,1108 +transformers/models/efficientnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/efficientnet/__pycache__/configuration_efficientnet.cpython-310.pyc,, +transformers/models/efficientnet/__pycache__/image_processing_efficientnet.cpython-310.pyc,, +transformers/models/efficientnet/__pycache__/image_processing_efficientnet_fast.cpython-310.pyc,, +transformers/models/efficientnet/__pycache__/modeling_efficientnet.cpython-310.pyc,, +transformers/models/efficientnet/configuration_efficientnet.py,sha256=pzLFg-QlskKos0hEVJI55TihTkR0Kn_WvxRAtHQEN_E,7660 +transformers/models/efficientnet/image_processing_efficientnet.py,sha256=O8YZLO1cvFSg12m7as3zAcaDkWz19p8b2lEcGAVbFko,18440 +transformers/models/efficientnet/image_processing_efficientnet_fast.py,sha256=etc5FO3clqtf7xbBRYfwf3MhZmXQ1Hs8Sl5SiYYgSSY,8142 +transformers/models/efficientnet/modeling_efficientnet.py,sha256=zt2IRIpvWUbJsX9B9c0eYdbEx0ungj0mOU6Vn255g_o,21370 +transformers/models/electra/__init__.py,sha256=e6DkZL6cjtWVsTx7tamR-zsyv0tuRYLbuYn-r-04P84,1160 +transformers/models/electra/__pycache__/__init__.cpython-310.pyc,, +transformers/models/electra/__pycache__/configuration_electra.cpython-310.pyc,, +transformers/models/electra/__pycache__/modeling_electra.cpython-310.pyc,, +transformers/models/electra/__pycache__/modeling_flax_electra.cpython-310.pyc,, +transformers/models/electra/__pycache__/modeling_tf_electra.cpython-310.pyc,, +transformers/models/electra/__pycache__/tokenization_electra.cpython-310.pyc,, +transformers/models/electra/__pycache__/tokenization_electra_fast.cpython-310.pyc,, +transformers/models/electra/configuration_electra.py,sha256=Q8udyq_AymQzkwnlrPurj4mdl9f4QU2l0YODKH523uc,9170 +transformers/models/electra/modeling_electra.py,sha256=psYcfxMrH0BsiTGefV7dbBPZVZJbqWJoezeTmUgZjDY,69468 +transformers/models/electra/modeling_flax_electra.py,sha256=I8aa02ZkCCKL6ch_L38BFxYmQK-EWuMnSEUsT_qUxPE,62613 +transformers/models/electra/modeling_tf_electra.py,sha256=dW-ED6HQKsLpr_QzQy3OqL-nMZBzmT_lhrlRz2bic08,78430 +transformers/models/electra/tokenization_electra.py,sha256=n0A4Wm-kdEhLKUkbn3dFM8nGWPV0vV71AzpH-YigFmk,20108 +transformers/models/electra/tokenization_electra_fast.py,sha256=vIX5oBKDTWD2Vn_CHNOUFX9Y3PB4QTUfxGyZNUswEVY,6590 +transformers/models/emu3/__init__.py,sha256=VEBLADqeToacty2xd3Zu0F_fLQRxvhfiKPkuB9jwcFM,1070 +transformers/models/emu3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/emu3/__pycache__/configuration_emu3.cpython-310.pyc,, +transformers/models/emu3/__pycache__/image_processing_emu3.cpython-310.pyc,, +transformers/models/emu3/__pycache__/modeling_emu3.cpython-310.pyc,, +transformers/models/emu3/__pycache__/modular_emu3.cpython-310.pyc,, +transformers/models/emu3/__pycache__/processing_emu3.cpython-310.pyc,, +transformers/models/emu3/configuration_emu3.py,sha256=J1uT2xTI4-w4Z-z6jxPaGucY13t9wASKJD6_A-ee2rI,16175 +transformers/models/emu3/image_processing_emu3.py,sha256=lM5Md7MIAxIUcPB0G3wRcGLh0NUir9hOjwBb6sUCsxc,27844 +transformers/models/emu3/modeling_emu3.py,sha256=EhgGzzqKym0xhQYoNDsE2dtEe_cRUvkRgpQxuipn34g,64961 +transformers/models/emu3/modular_emu3.py,sha256=pIesdHoxD7zBqH3GIrluhZvk70U7sdfL7Aa-aZWcJXc,46417 +transformers/models/emu3/processing_emu3.py,sha256=MhJN-cLaDZPDh9kBjmJO2w3P4_mPLybVNzAD3S9ygFU,12681 +transformers/models/encodec/__init__.py,sha256=QbO9yEfCaRwYKbK0vvmwKMbqRAToyos-HTHhRmf7n5s,1041 +transformers/models/encodec/__pycache__/__init__.cpython-310.pyc,, +transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc,, +transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc,, +transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc,, +transformers/models/encodec/configuration_encodec.py,sha256=yi016nmXOdgM9NgSSWJW6rDDHpzTIPJJ8gG9OHZK83w,8705 +transformers/models/encodec/feature_extraction_encodec.py,sha256=ANwwpLKhArrcXtqC4eLpFFeJ2fZReWsaAtvdDuCcUYg,9944 +transformers/models/encodec/modeling_encodec.py,sha256=2I7D21VSVGBtfSJ5o0KAET3MLmen9j7QCCcZ12aXiLM,34636 +transformers/models/encoder_decoder/__init__.py,sha256=wxXN9-4nCvYICfq8pE592rdRiQXK7S69V2cWGVQyIkw,1107 +transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc,, +transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc,, +transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc,, +transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc,, +transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc,, +transformers/models/encoder_decoder/configuration_encoder_decoder.py,sha256=3RReXpXx5UcFH8EEfzAsyQrbJ9FmHuHfZ3vx-Br1-54,4596 +transformers/models/encoder_decoder/modeling_encoder_decoder.py,sha256=LhA8--3hT5Onu2GKOgltzwz7DWJ6FuhTSW0eBTSt_ds,30087 +transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py,sha256=3ypsQNbXgNqFOAfeP2yHphNGNmUeSGZX1fJmHef6hB0,43595 +transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py,sha256=2X1IjAql3oKDcdD7ckPkKt8CwelCszl0LjnyCDQ0okY,34246 +transformers/models/eomt/__init__.py,sha256=yXVhFSLXI_umGHqTtRHHaTevf4KwrA9LOldvY7DfuFk,1076 +transformers/models/eomt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/eomt/__pycache__/configuration_eomt.cpython-310.pyc,, +transformers/models/eomt/__pycache__/image_processing_eomt.cpython-310.pyc,, +transformers/models/eomt/__pycache__/image_processing_eomt_fast.cpython-310.pyc,, +transformers/models/eomt/__pycache__/modeling_eomt.cpython-310.pyc,, +transformers/models/eomt/__pycache__/modular_eomt.cpython-310.pyc,, +transformers/models/eomt/configuration_eomt.py,sha256=nvH6qjar0r7ejH9yM_WWvz8aiVQJG188ZdLedGMTOUw,8069 +transformers/models/eomt/image_processing_eomt.py,sha256=WNllkpsr2RT4BjQ0zVZH6E28-_l8cFli9bJbXmb6jdc,41099 +transformers/models/eomt/image_processing_eomt_fast.py,sha256=WMizVDxGmG_mOWZh-PTSC6T--34uc_J-6QRksG0WvmQ,22152 +transformers/models/eomt/modeling_eomt.py,sha256=pTCowZpK7lcMhO_HomcZhCql8lZYESIA6JBWVPpgxTA,55692 +transformers/models/eomt/modular_eomt.py,sha256=N0G8SF6297fNB8esls3UsBTloEOvnqqrwoNWKELQ1Xs,25207 +transformers/models/ernie/__init__.py,sha256=TyzaXpzGwu-WqsIn1tavDqa7BCV9X-mPho4JDa9gk0I,991 +transformers/models/ernie/__pycache__/__init__.cpython-310.pyc,, +transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc,, +transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc,, +transformers/models/ernie/configuration_ernie.py,sha256=_1shyRgpTVMQS2z7kEW8FF7spluVDc-azldGq8Clr4Y,7719 +transformers/models/ernie/modeling_ernie.py,sha256=CStj_LVJZHfjjDqfNT3F6KxVEogczVPt82eK8Q__-y0,76442 +transformers/models/ernie4_5/__init__.py,sha256=5tqpitaOWvT1CdXTgMtMLCKIkUG683y3jdcTZ8yuwfM,997 +transformers/models/ernie4_5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/ernie4_5/__pycache__/configuration_ernie4_5.cpython-310.pyc,, +transformers/models/ernie4_5/__pycache__/modeling_ernie4_5.cpython-310.pyc,, +transformers/models/ernie4_5/__pycache__/modular_ernie4_5.cpython-310.pyc,, +transformers/models/ernie4_5/configuration_ernie4_5.py,sha256=Sg3qqYRBjErH7dPcyPdfe3CKYnaxSpjo5d2tI5RAVvs,10671 +transformers/models/ernie4_5/modeling_ernie4_5.py,sha256=y2zI4OpAYikTb2yPjjQhxzsnKvEwdM2KE1Z2cvDFQ9M,20405 +transformers/models/ernie4_5/modular_ernie4_5.py,sha256=atxPkLgNJUxLZxe40oyyltpdG_nhYMrf1Q5znZnA04w,5598 +transformers/models/ernie4_5_moe/__init__.py,sha256=MJaAQxyB3YypXN79FGJpkSvnX6KWt86iunOFXjiA7a4,1005 +transformers/models/ernie4_5_moe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/ernie4_5_moe/__pycache__/configuration_ernie4_5_moe.cpython-310.pyc,, +transformers/models/ernie4_5_moe/__pycache__/modeling_ernie4_5_moe.cpython-310.pyc,, +transformers/models/ernie4_5_moe/__pycache__/modular_ernie4_5_moe.cpython-310.pyc,, +transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py,sha256=HpcWwQdyQFpvYwFzMBptIsUq90mRDxNVDQEXpJGJUc8,13489 +transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py,sha256=Vo-Kdar91VTuxC6pZXSWmOTVdoPYfYOAUZvuxkwKI2Q,33394 +transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py,sha256=9HMvylJQlXby-YDX6uiRYzxO0G2UlZtyJFZGS5LJ-kY,14432 +transformers/models/esm/__init__.py,sha256=muSqvVMt6mySkoAm7MjweiFHJVBSj70LlakjHmZ6PEE,1094 +transformers/models/esm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/esm/__pycache__/configuration_esm.cpython-310.pyc,, +transformers/models/esm/__pycache__/modeling_esm.cpython-310.pyc,, +transformers/models/esm/__pycache__/modeling_esmfold.cpython-310.pyc,, +transformers/models/esm/__pycache__/modeling_tf_esm.cpython-310.pyc,, +transformers/models/esm/__pycache__/tokenization_esm.cpython-310.pyc,, +transformers/models/esm/configuration_esm.py,sha256=ehwkp9UcXcXXp6dphMO7cqdn-G1Bv1LUB4sohOvWy6Y,14436 +transformers/models/esm/modeling_esm.py,sha256=gvzoehX55ZlQfj-w_Xpf1YmMHfc7SWz1s6DwJIEssJU,52926 +transformers/models/esm/modeling_esmfold.py,sha256=0gclXC5xNu3gPjBV78lYQ6_6Cqsnpb_Y0PuUsdILd0I,86050 +transformers/models/esm/modeling_tf_esm.py,sha256=5GsQDy8NmoHzk0s9rr1PhBQXmGke7FgtKYxd6XN0k_k,68985 +transformers/models/esm/openfold_utils/__init__.py,sha256=Xy2uqvFsLC8Ax-OOce5PgoBDiZgEJgJPqs__p5SBWUY,446 +transformers/models/esm/openfold_utils/__pycache__/__init__.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/chunk_utils.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/data_transforms.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/loss.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/protein.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/residue_constants.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/rigid_utils.cpython-310.pyc,, +transformers/models/esm/openfold_utils/__pycache__/tensor_utils.cpython-310.pyc,, +transformers/models/esm/openfold_utils/chunk_utils.py,sha256=RYj0ZXiDGTBbO2b2aBKkyUoJlf6dlvD18aFRnY1FquM,14394 +transformers/models/esm/openfold_utils/data_transforms.py,sha256=Q5J_BpJ_8Fa5fZ8nP6kPB5ops-Y4MydSQkwZ-_yMDBA,3688 +transformers/models/esm/openfold_utils/feats.py,sha256=QCYupsVINo5jJuwYk38TejNYkPlGm6Kfc1YpNUxpI8s,8355 +transformers/models/esm/openfold_utils/loss.py,sha256=sndbYMMXuL0KIHlzq7ZJUlQoIRMy2Q3ZGl3h20BR1rg,3692 +transformers/models/esm/openfold_utils/protein.py,sha256=rx3YMO93zal9R6F9equP6DgCIURENBu_943N-gho8R8,11499 +transformers/models/esm/openfold_utils/residue_constants.py,sha256=-B0kLYqC9xO75yTmIB4JJsajPm9doeNin-sRg1Z56_w,37940 +transformers/models/esm/openfold_utils/rigid_utils.py,sha256=oM1q5gGBukDtpQqrJosTmfASUEQRjM7Lo-u2PR-W6Cs,41006 +transformers/models/esm/openfold_utils/tensor_utils.py,sha256=94wNOGOftULOVB_WsyH6b-Sv38Ny1QCa4R6he3iRyl8,4763 +transformers/models/esm/tokenization_esm.py,sha256=HcbVQ9J-e7NjuhVSqcHMj-2PIGTtok-5zRRT-YhffdE,5379 +transformers/models/evolla/__init__.py,sha256=pOj8KGoc9jqtS_PYTeNCxZUtQrlFR_txE-kdZpiAkCw,1030 +transformers/models/evolla/__pycache__/__init__.cpython-310.pyc,, +transformers/models/evolla/__pycache__/configuration_evolla.cpython-310.pyc,, +transformers/models/evolla/__pycache__/modeling_evolla.cpython-310.pyc,, +transformers/models/evolla/__pycache__/modular_evolla.cpython-310.pyc,, +transformers/models/evolla/__pycache__/processing_evolla.cpython-310.pyc,, +transformers/models/evolla/configuration_evolla.py,sha256=dVQ0KACXeZOH8cY-oLyP_DrK9JWeHCX_J4mlDKcAplw,13876 +transformers/models/evolla/modeling_evolla.py,sha256=0snFeO-3wTdP4ijhGmvWUkuSRLl6SI8QLCFEXZyrsUU,76447 +transformers/models/evolla/modular_evolla.py,sha256=H-uDh2X8hSw1CTp5-VEF_BzKuv6eMvZeBPEB7MXIeLw,40255 +transformers/models/evolla/processing_evolla.py,sha256=scRGnauCc4EzWjSeVIh4sPgQpwKPUVF6KORn7OQz_40,11480 +transformers/models/exaone4/__init__.py,sha256=gUDbb0olRjqxaPnB3APYKYKRlDPG2phGkfrmf7mIVD4,1018 +transformers/models/exaone4/__pycache__/__init__.cpython-310.pyc,, +transformers/models/exaone4/__pycache__/configuration_exaone4.cpython-310.pyc,, +transformers/models/exaone4/__pycache__/modeling_exaone4.cpython-310.pyc,, +transformers/models/exaone4/__pycache__/modular_exaone4.cpython-310.pyc,, +transformers/models/exaone4/configuration_exaone4.py,sha256=H0T9yMltFro-TsEpDDRJdu_DIcSPvKBQJatrPRWKiX8,12610 +transformers/models/exaone4/modeling_exaone4.py,sha256=L58Wi-x95eQls3bSI72qBUrCXWlcMWmn0QGKO-MNkwI,23677 +transformers/models/exaone4/modular_exaone4.py,sha256=DTCaskhGWGiEMoKztVlcQsCGdMV9t483cAezIXY5eYE,23695 +transformers/models/falcon/__init__.py,sha256=qmBlF_xusyrueKMfriC2ldVrHzeLIT7ruSdduMODuE4,993 +transformers/models/falcon/__pycache__/__init__.cpython-310.pyc,, +transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc,, +transformers/models/falcon/__pycache__/modeling_falcon.cpython-310.pyc,, +transformers/models/falcon/configuration_falcon.py,sha256=5vh10LAkioX6y3qJh84u3SlXmu3gPArXNMe1apM7f9g,10917 +transformers/models/falcon/modeling_falcon.py,sha256=cbmAelXbGIjqTPfVpjJ9RR6l9RVsu-jym9mdnA4p4M0,64352 +transformers/models/falcon_h1/__init__.py,sha256=cpix3f3f_xMDLf2OLuyYZULnb7enZl3UZapPQuf0YZc,1012 +transformers/models/falcon_h1/__pycache__/__init__.cpython-310.pyc,, +transformers/models/falcon_h1/__pycache__/configuration_falcon_h1.cpython-310.pyc,, +transformers/models/falcon_h1/__pycache__/modeling_falcon_h1.cpython-310.pyc,, +transformers/models/falcon_h1/__pycache__/modular_falcon_h1.cpython-310.pyc,, +transformers/models/falcon_h1/configuration_falcon_h1.py,sha256=ql8WPS_TKtf2KPz0Ko_WxJpf7NVnyufn-B-j20kewfc,13894 +transformers/models/falcon_h1/modeling_falcon_h1.py,sha256=rfKC6eKO15co3I1KmIUVVMlEuyxkUekPEyvDlywIk_U,74352 +transformers/models/falcon_h1/modular_falcon_h1.py,sha256=PBE3oN6pDTxKFxqX4sYTF145gODbBcNGqF63zPvW0S8,61728 +transformers/models/falcon_mamba/__init__.py,sha256=Czo-T_Nt73nvRbK-yJEZAYsU3Bxu4i1fOxFuPosiFPw,1005 +transformers/models/falcon_mamba/__pycache__/__init__.cpython-310.pyc,, +transformers/models/falcon_mamba/__pycache__/configuration_falcon_mamba.cpython-310.pyc,, +transformers/models/falcon_mamba/__pycache__/modeling_falcon_mamba.cpython-310.pyc,, +transformers/models/falcon_mamba/__pycache__/modular_falcon_mamba.cpython-310.pyc,, +transformers/models/falcon_mamba/configuration_falcon_mamba.py,sha256=xbRMQ4ZHRpJmNHxVJh2tIYtnMJWAME0aDbKe1oMY_34,8846 +transformers/models/falcon_mamba/modeling_falcon_mamba.py,sha256=Kgi8wE3btuRVqd42YVVbYgBXN3vSTY4JzopyI-tLZ4w,41956 +transformers/models/falcon_mamba/modular_falcon_mamba.py,sha256=WlSEN1gxSqdzb1DxJEuAtjDaCaLM5qeLv8hPQ15aY9k,24518 +transformers/models/fastspeech2_conformer/__init__.py,sha256=pILmX51CcqSiFGtl_dsX1yW2S_QugA3UHAT8f4psOtA,1077 +transformers/models/fastspeech2_conformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc,, +transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc,, +transformers/models/fastspeech2_conformer/__pycache__/tokenization_fastspeech2_conformer.cpython-310.pyc,, +transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py,sha256=TZ6a2rSWE3ikugOBx_sr4tULm2FpX8Qtj2S7MLBdnNQ,24656 +transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py,sha256=H1ysWNoXQ_noF121qoeyRa9n5glBrwGx2rRGhziTZXQ,68521 +transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py,sha256=x8b0G-lsibtRBg-I3FzLBHC1YhiTmr8A2o6V8LbEz6M,6258 +transformers/models/flaubert/__init__.py,sha256=LdGmxq7pcDPVcvqO1ol7VYtpjKKCAQuiJ1ISrNT9nEs,1078 +transformers/models/flaubert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/flaubert/__pycache__/configuration_flaubert.cpython-310.pyc,, +transformers/models/flaubert/__pycache__/modeling_flaubert.cpython-310.pyc,, +transformers/models/flaubert/__pycache__/modeling_tf_flaubert.cpython-310.pyc,, +transformers/models/flaubert/__pycache__/tokenization_flaubert.cpython-310.pyc,, +transformers/models/flaubert/configuration_flaubert.py,sha256=920NSmtA4I1NbeTk642E8OvKEWD9TnwBggtaIGyx70U,11250 +transformers/models/flaubert/modeling_flaubert.py,sha256=-SzMlu8FYCiKg97RLc8q5eRvvvNE691zP-c40FxA7IU,81204 +transformers/models/flaubert/modeling_tf_flaubert.py,sha256=zeZWpnYi56nPKArmP43f6TTDPOxxObhgjd0CXJ4Y3Qo,57170 +transformers/models/flaubert/tokenization_flaubert.py,sha256=ACYpzkElWcCyW9lJX9mRqh1-uEI9qqV2IQIXSr8JhPk,20970 +transformers/models/flava/__init__.py,sha256=UZ-PnfpalIOh2pPXWj_WSjsxjLgMBh2kKVyyLsNTUOk,1160 +transformers/models/flava/__pycache__/__init__.cpython-310.pyc,, +transformers/models/flava/__pycache__/configuration_flava.cpython-310.pyc,, +transformers/models/flava/__pycache__/feature_extraction_flava.cpython-310.pyc,, +transformers/models/flava/__pycache__/image_processing_flava.cpython-310.pyc,, +transformers/models/flava/__pycache__/image_processing_flava_fast.cpython-310.pyc,, +transformers/models/flava/__pycache__/modeling_flava.cpython-310.pyc,, +transformers/models/flava/__pycache__/processing_flava.cpython-310.pyc,, +transformers/models/flava/configuration_flava.py,sha256=bYaFpEYjHxYp07JhrUvr9ds5kFubfw51DAy-AAjIGwo,34125 +transformers/models/flava/feature_extraction_flava.py,sha256=fZzf449ea7VNw1xyNfCuoa_e2pMEfGSxqNTX9YdoE5I,1314 +transformers/models/flava/image_processing_flava.py,sha256=9_9CvPqhUtFXrkvtXFppD_Su6xS81IQC4wToozdKt_U,37658 +transformers/models/flava/image_processing_flava_fast.py,sha256=BL68CIlZHfYpx3HstI3cLVGpjsTxhYIIpYykXWjmml0,22270 +transformers/models/flava/modeling_flava.py,sha256=aximt-49IR_DEUGRjwklRojhpmuDXkl9Q0jI17ww23U,94780 +transformers/models/flava/processing_flava.py,sha256=4UWlall0AJqLU9cpNn85o4u6EHzHmHJ8e7z-P1NH4yc,6857 +transformers/models/fnet/__init__.py,sha256=V3nuz_DsD_K5-RuL-Gt4hr5FVtNz12s46O_Vtx_xvCY,1068 +transformers/models/fnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/fnet/__pycache__/configuration_fnet.cpython-310.pyc,, +transformers/models/fnet/__pycache__/modeling_fnet.cpython-310.pyc,, +transformers/models/fnet/__pycache__/tokenization_fnet.cpython-310.pyc,, +transformers/models/fnet/__pycache__/tokenization_fnet_fast.cpython-310.pyc,, +transformers/models/fnet/configuration_fnet.py,sha256=oZVGszdEYsE-nJnpSlmU3r4tENCfwHnNKaL4NmrD7N4,5567 +transformers/models/fnet/modeling_fnet.py,sha256=DD9ndiUWGbr7ikjckfblZGcmJ4xfRqtixb70Bq8MgL8,44223 +transformers/models/fnet/tokenization_fnet.py,sha256=1oHKKZ05BkW9gY2Ibq__USJVrfxIL6ee2_kJK3vTH_Y,13537 +transformers/models/fnet/tokenization_fnet_fast.py,sha256=Ed77wG8t5cE351Rx2shX98ysFjQFasEor4-U0zj2wYk,6841 +transformers/models/focalnet/__init__.py,sha256=kFk7pYv4troBIWdCYosHMKh8PAnpXqjlxaRRQ5adkG0,997 +transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc,, +transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc,, +transformers/models/focalnet/configuration_focalnet.py,sha256=L6CS3mcLLDZTIFeiTqweu8W1MogNQq8ZMrIiD_-g1x4,8057 +transformers/models/focalnet/modeling_focalnet.py,sha256=C0b31uipxCOBa_-yx7VmY-WHYo8HBt1uSwqhvV-atBA,38624 +transformers/models/fsmt/__init__.py,sha256=u_Xx7d3qDicqwR_W0js1h2wPiLKWM1RlMu7fsBdIHy4,1026 +transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc,, +transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc,, +transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc,, +transformers/models/fsmt/configuration_fsmt.py,sha256=aObyXA-L5avgtlYwtkng6IaNzgeCyp9OyruBNHeM25M,10292 +transformers/models/fsmt/modeling_fsmt.py,sha256=OHn-tbwO9UTeooo_a5vfP5OkI8zhzDwmWYNOVH537aQ,53210 +transformers/models/fsmt/tokenization_fsmt.py,sha256=86Txz3pYk6fL5nWcqfbpBSo_EuaC-O6tqqHo5zu9GUw,17944 +transformers/models/funnel/__init__.py,sha256=087Y3Xz6y0HA5SgKe-s2z-ZzUIq1u_axxCRh2__gVro,1182 +transformers/models/funnel/__pycache__/__init__.cpython-310.pyc,, +transformers/models/funnel/__pycache__/configuration_funnel.cpython-310.pyc,, +transformers/models/funnel/__pycache__/modeling_funnel.cpython-310.pyc,, +transformers/models/funnel/__pycache__/modeling_tf_funnel.cpython-310.pyc,, +transformers/models/funnel/__pycache__/tokenization_funnel.cpython-310.pyc,, +transformers/models/funnel/__pycache__/tokenization_funnel_fast.cpython-310.pyc,, +transformers/models/funnel/configuration_funnel.py,sha256=b53gi5CW7KpmzFFAM2klOVODwb1Jq30XbzX1rINu7x8,7682 +transformers/models/funnel/modeling_funnel.py,sha256=k5rkMNmHEkJ2YON2bDDmb5a9krS9z-ZjxZR-1OSIuMo,61585 +transformers/models/funnel/modeling_tf_funnel.py,sha256=hgLjudqsqKDO5ary0v3CQvfMysW7w9S-hX0nWkPO11I,80339 +transformers/models/funnel/tokenization_funnel.py,sha256=2VRzAH-LPCQcL_gm0LFKsoIPdAKajzCC3gUhrBWuPRE,22685 +transformers/models/funnel/tokenization_funnel_fast.py,sha256=dMo_pnTLyD926bjcPiormC4L_l6oV3W-Xt7xy858Mfs,8666 +transformers/models/fuyu/__init__.py,sha256=NcygIhTFvIZzXPZUReC1WYReGAVINSpG0xW7KqEmd8c,1065 +transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc,, +transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc,, +transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc,, +transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc,, +transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc,, +transformers/models/fuyu/configuration_fuyu.py,sha256=7jgdM3jnzy-Z-IZGjx3VgAhuS5_bnEa7FIO8y6kVTaw,10198 +transformers/models/fuyu/image_processing_fuyu.py,sha256=Wit3xBHJVI1-pqsdXHqxHa1kv8LlRaP4DBrvtp8Z9e0,33509 +transformers/models/fuyu/modeling_fuyu.py,sha256=T2MuNiISVCxmcbSV_4j8DNmxlEtDPMpAY13-rZsqAyk,18116 +transformers/models/fuyu/processing_fuyu.py,sha256=PiI50oOXHvE999M4fTaKaHKJeQbbP8hN7uJkH21gOlk,36574 +transformers/models/gemma/__init__.py,sha256=xXoIfeCXNQOEnARxU3QucfH5mn-a_AE4wp69YkykT50,1111 +transformers/models/gemma/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gemma/__pycache__/configuration_gemma.cpython-310.pyc,, +transformers/models/gemma/__pycache__/modeling_flax_gemma.cpython-310.pyc,, +transformers/models/gemma/__pycache__/modeling_gemma.cpython-310.pyc,, +transformers/models/gemma/__pycache__/modular_gemma.cpython-310.pyc,, +transformers/models/gemma/__pycache__/tokenization_gemma.cpython-310.pyc,, +transformers/models/gemma/__pycache__/tokenization_gemma_fast.cpython-310.pyc,, +transformers/models/gemma/configuration_gemma.py,sha256=LWnoaGz53xxPIbZT1WzQEuF7Rt_MplPVQ9NvP55XE9I,8375 +transformers/models/gemma/modeling_flax_gemma.py,sha256=rRGlPaBXI_lxtLz47GfwZabrdzM8EHctNgitlXNjz4Q,32439 +transformers/models/gemma/modeling_gemma.py,sha256=Glbxs_M6B3HTx39kNhErJMDiJfpSlamuQaVOWASo7t8,20953 +transformers/models/gemma/modular_gemma.py,sha256=47KbtBRgtAaiRJPcOARYVj-A9fmybNzoluhK-bOBE10,20320 +transformers/models/gemma/tokenization_gemma.py,sha256=AcVuuIvQS7kCoS03rX8VkC86S_ywQYKPUGq4ouFXdUY,14229 +transformers/models/gemma/tokenization_gemma_fast.py,sha256=iEJm0bejSYb1DmmXbb6UuRZaeGX0SLG1uCx5v625hTI,8097 +transformers/models/gemma2/__init__.py,sha256=H0jWJX-AcGRTjdzkGJagKnjB6GnpqVUG4ODFhMF9OWM,993 +transformers/models/gemma2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gemma2/__pycache__/configuration_gemma2.cpython-310.pyc,, +transformers/models/gemma2/__pycache__/modeling_gemma2.cpython-310.pyc,, +transformers/models/gemma2/__pycache__/modular_gemma2.cpython-310.pyc,, +transformers/models/gemma2/configuration_gemma2.py,sha256=d3AKBGul20cPAuq6zFgUlexHzjWPK5GBDW1IcQZv_E8,9578 +transformers/models/gemma2/modeling_gemma2.py,sha256=8NMG7X8O4UG4vfU6DUiJfkd36F54UZ1cYlhDNShFA8U,25411 +transformers/models/gemma2/modular_gemma2.py,sha256=jhpjeTvjGIUEQpa31YJAzxFier7Q1rk_LcLTV_srZuc,24914 +transformers/models/gemma3/__init__.py,sha256=yDt-ADg8e57SlRlpfsC7KzQCeYYgUrTz9ZO5VC5v_W4,1121 +transformers/models/gemma3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gemma3/__pycache__/configuration_gemma3.cpython-310.pyc,, +transformers/models/gemma3/__pycache__/image_processing_gemma3.cpython-310.pyc,, +transformers/models/gemma3/__pycache__/image_processing_gemma3_fast.cpython-310.pyc,, +transformers/models/gemma3/__pycache__/modeling_gemma3.cpython-310.pyc,, +transformers/models/gemma3/__pycache__/modular_gemma3.cpython-310.pyc,, +transformers/models/gemma3/__pycache__/processing_gemma3.cpython-310.pyc,, +transformers/models/gemma3/configuration_gemma3.py,sha256=fQ6Dor-5nOBhLaXRKruClkic6hK4CJ3oe1PMNyuJQNc,17748 +transformers/models/gemma3/image_processing_gemma3.py,sha256=1viHUF1_eWySzyurhknfiKjCXNmuuCieB3zp7_7MRgA,20087 +transformers/models/gemma3/image_processing_gemma3_fast.py,sha256=8vjmB29Dsx9vvgi26xd-QjJvKseLtQuirqetgieXDmI,11337 +transformers/models/gemma3/modeling_gemma3.py,sha256=uPzCs7s8kctKNvzmsvDj07DoAlnmAu2y3cVsPK_YRKY,57527 +transformers/models/gemma3/modular_gemma3.py,sha256=1OJ3nfGHjCbJx8Y23_7UVoC2g_iBJwGDMmCXNxes364,52429 +transformers/models/gemma3/processing_gemma3.py,sha256=khZzOnWTlGnxY2911Ydz9PHIKOmKG-kZj1aa8ElT-04,8471 +transformers/models/gemma3n/__init__.py,sha256=ZSrv5oSiULGXY7Vszb--vaJh1l7FBe1lrZD_3LX6cj4,1079 +transformers/models/gemma3n/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gemma3n/__pycache__/configuration_gemma3n.cpython-310.pyc,, +transformers/models/gemma3n/__pycache__/feature_extraction_gemma3n.cpython-310.pyc,, +transformers/models/gemma3n/__pycache__/modeling_gemma3n.cpython-310.pyc,, +transformers/models/gemma3n/__pycache__/modular_gemma3n.cpython-310.pyc,, +transformers/models/gemma3n/__pycache__/processing_gemma3n.cpython-310.pyc,, +transformers/models/gemma3n/configuration_gemma3n.py,sha256=N0cVBUp0GBUlzjPzysrEjagc8RBk3iN3jPkvUz4FtJQ,36370 +transformers/models/gemma3n/feature_extraction_gemma3n.py,sha256=5DZTmnacaWDU3cUEvyPtVdhoZ0jnvllVrwlBAHk6qGw,15120 +transformers/models/gemma3n/modeling_gemma3n.py,sha256=zv_KadvnJXpz_IYQN6SeVeFYOe7SzeKPHv6p7zrQJi8,112936 +transformers/models/gemma3n/modular_gemma3n.py,sha256=ZMbppg0GbR1x5RS-itTRHJqsgzH_ERdQsG9xN3bEiGI,130330 +transformers/models/gemma3n/processing_gemma3n.py,sha256=ZKVDBzpX_Mgx0v7EXhlTcUoj0O9ekPkWvZDH9W_yn0Q,8397 +transformers/models/git/__init__.py,sha256=jY1iLd7UMOmcCfrKgzoUJawLa0DQ55wHN26L09YSwhc,1021 +transformers/models/git/__pycache__/__init__.cpython-310.pyc,, +transformers/models/git/__pycache__/configuration_git.cpython-310.pyc,, +transformers/models/git/__pycache__/modeling_git.cpython-310.pyc,, +transformers/models/git/__pycache__/processing_git.cpython-310.pyc,, +transformers/models/git/configuration_git.py,sha256=SNcI2qHfnAuwDcYWfiP8Sb_TQXPtosHlw1vDY8bEl04,10447 +transformers/models/git/modeling_git.py,sha256=J4ojdtWxwkwRjfMquGkvMK4Oy82_jc_6Rl5BJOcC768,62452 +transformers/models/git/processing_git.py,sha256=xaj5J-K1zQic8f9NjUP3Gotg_J88hnVIJ_6UqxB6zh8,6044 +transformers/models/glm/__init__.py,sha256=fIafw6FAflbbeG_nEM_VPJyMJHnu_NbWHTHjECIAvIs,987 +transformers/models/glm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/glm/__pycache__/configuration_glm.cpython-310.pyc,, +transformers/models/glm/__pycache__/modeling_glm.cpython-310.pyc,, +transformers/models/glm/__pycache__/modular_glm.cpython-310.pyc,, +transformers/models/glm/configuration_glm.py,sha256=0i7hGoGPrP308WOqZ2ZbGCw2-06GRiDvxAv_m2Fd-Fg,7535 +transformers/models/glm/modeling_glm.py,sha256=129Y03dRSaQuvShu5oj7YRlXueTWbstFjAdVJRfKwxw,21206 +transformers/models/glm/modular_glm.py,sha256=rOSXBsyECZhENwaJ8M9bBxJ1NK2Lwv7POZu67Uu3xgQ,4093 +transformers/models/glm4/__init__.py,sha256=okqViVxR-MUlkyIdKmSwrDKA7u8pGG49OIKtW9X1hvU,989 +transformers/models/glm4/__pycache__/__init__.cpython-310.pyc,, +transformers/models/glm4/__pycache__/configuration_glm4.cpython-310.pyc,, +transformers/models/glm4/__pycache__/modeling_glm4.cpython-310.pyc,, +transformers/models/glm4/__pycache__/modular_glm4.cpython-310.pyc,, +transformers/models/glm4/configuration_glm4.py,sha256=lFRWkK1kw_GDnhi0w0BViKnQ9FBpRp0uMEyjLxNW7dY,7551 +transformers/models/glm4/modeling_glm4.py,sha256=bD3iuiCze1h3mI04iO0J6u9rag8o32R4l_qFaCRblNI,22082 +transformers/models/glm4/modular_glm4.py,sha256=li9zndSmb4dWKeG5WrqaSMIdnJ4mQtCH6UBfsIZ9WR0,5234 +transformers/models/glm4_moe/__init__.py,sha256=dfmB1kPUzq5-xfXh3zFtfGdSJu7CDDbfL401u_EayjM,997 +transformers/models/glm4_moe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/glm4_moe/__pycache__/configuration_glm4_moe.cpython-310.pyc,, +transformers/models/glm4_moe/__pycache__/modeling_glm4_moe.cpython-310.pyc,, +transformers/models/glm4_moe/__pycache__/modular_glm4_moe.cpython-310.pyc,, +transformers/models/glm4_moe/configuration_glm4_moe.py,sha256=uhNU910obiitL0_IAnbO_STy7uuIBY_uugscT0caAJE,13422 +transformers/models/glm4_moe/modeling_glm4_moe.py,sha256=o9mhOeCp9zGB88GPcac40vhxcL9QdFvdLIqO8Yf9GHk,26751 +transformers/models/glm4_moe/modular_glm4_moe.py,sha256=q6Q4Y6VFMA4ATfcE3Y2OKaktkB84hyGL7j1X-OXH8YM,15681 +transformers/models/glm4v/__init__.py,sha256=czqsAA98MYCyclV5YncS0xt0pnQcYUZc7jFgxZUEKmQ,1027 +transformers/models/glm4v/__pycache__/__init__.cpython-310.pyc,, +transformers/models/glm4v/__pycache__/configuration_glm4v.cpython-310.pyc,, +transformers/models/glm4v/__pycache__/image_processing_glm4v.cpython-310.pyc,, +transformers/models/glm4v/__pycache__/image_processing_glm4v_fast.cpython-310.pyc,, +transformers/models/glm4v/__pycache__/modeling_glm4v.cpython-310.pyc,, +transformers/models/glm4v/__pycache__/modular_glm4v.cpython-310.pyc,, +transformers/models/glm4v/__pycache__/processing_glm4v.cpython-310.pyc,, +transformers/models/glm4v/__pycache__/video_processing_glm4v.cpython-310.pyc,, +transformers/models/glm4v/configuration_glm4v.py,sha256=D16ls4Gmb8Htu301V3YxVwzkXJAIY0WIMLRAJkRrR_U,17657 +transformers/models/glm4v/image_processing_glm4v.py,sha256=HGr4zD0SOL5VQSchmpD4C4hnm5Vc9oOQ1l6CNWZSlgY,23749 +transformers/models/glm4v/image_processing_glm4v_fast.py,sha256=ScRIbyI9N_bZi7xpD7Yf9j_MtYWrLa9ybZ2WKLWMHlA,7122 +transformers/models/glm4v/modeling_glm4v.py,sha256=qCjE1OssB9U2_zHuKy8WOzlVLXR389EN4mOBAl95rwU,79099 +transformers/models/glm4v/modular_glm4v.py,sha256=9IaYtJn2Y8rT0pccVkx_Y1oRmT-9DxeiZsCPjmpvz40,79520 +transformers/models/glm4v/processing_glm4v.py,sha256=iPXfnUuSW6jZGblm6HlBVrrBmHHkVOtkCwb7C3ImrKM,15496 +transformers/models/glm4v/video_processing_glm4v.py,sha256=9UK2Smidv1CIdHINvyF-0vRdpO5yoBVYvsU2fJeFIsE,10197 +transformers/models/glpn/__init__.py,sha256=YYoaugUj0un_FnfusrkzFfT_UtvUJEjMDaRDS8IcYAE,1073 +transformers/models/glpn/__pycache__/__init__.cpython-310.pyc,, +transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc,, +transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc,, +transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc,, +transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc,, +transformers/models/glpn/configuration_glpn.py,sha256=psEiatDZRceSeLe24Ch77es0_ugLEjzmzP81QthIXcI,5998 +transformers/models/glpn/feature_extraction_glpn.py,sha256=QC_SmxGijm3KyJtR_hEGG16TXPHpvv5pa5_0YrQLq0c,1284 +transformers/models/glpn/image_processing_glpn.py,sha256=pgSdZE05rAFDQsObYqEkDB3Pf_1ME11t-cd5AuAqFWs,12748 +transformers/models/glpn/modeling_glpn.py,sha256=qOE70YSKbsquwBdSiXD2En1oefwUtfZaNOWaVcH_IoM,29086 +transformers/models/got_ocr2/__init__.py,sha256=LBVZP8CBfOxaD9NLC2ZbZpLloHLIX7uDyM8m1-W2m6g,1138 +transformers/models/got_ocr2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/got_ocr2/__pycache__/configuration_got_ocr2.cpython-310.pyc,, +transformers/models/got_ocr2/__pycache__/image_processing_got_ocr2.cpython-310.pyc,, +transformers/models/got_ocr2/__pycache__/image_processing_got_ocr2_fast.cpython-310.pyc,, +transformers/models/got_ocr2/__pycache__/modeling_got_ocr2.cpython-310.pyc,, +transformers/models/got_ocr2/__pycache__/modular_got_ocr2.cpython-310.pyc,, +transformers/models/got_ocr2/__pycache__/processing_got_ocr2.cpython-310.pyc,, +transformers/models/got_ocr2/configuration_got_ocr2.py,sha256=n9a3boLFZN7HMCyFSrZhcZH-ceWk3_Ut9XJz1F_cEkI,9455 +transformers/models/got_ocr2/image_processing_got_ocr2.py,sha256=fCjvt5lD1zKSrEBa0w4WeeobmBuy5mgzzZ9OLVVDAbw,25532 +transformers/models/got_ocr2/image_processing_got_ocr2_fast.py,sha256=_L6ACaJcQ64AznSIYVrSen7sturWys2iFEm4SAwC7Xc,10753 +transformers/models/got_ocr2/modeling_got_ocr2.py,sha256=QtK2wLPvpBmZ2WiKX3aev4AeR3JnNZ659vWiowbGPtk,36667 +transformers/models/got_ocr2/modular_got_ocr2.py,sha256=9TxZTd7QlZ5RHErXT1WRtKbcsHdunr32JcU24nwajhU,19508 +transformers/models/got_ocr2/processing_got_ocr2.py,sha256=iqFs8SEHxSxEKZ3rMjTgdLIoqEr9XAyi2ojr4HCSIWY,13470 +transformers/models/gpt2/__init__.py,sha256=NRi7aYu3gezDPsiXiiG6dgSpCMHSIvFpC3iI0w-JMA0,1182 +transformers/models/gpt2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gpt2/__pycache__/configuration_gpt2.cpython-310.pyc,, +transformers/models/gpt2/__pycache__/modeling_flax_gpt2.cpython-310.pyc,, +transformers/models/gpt2/__pycache__/modeling_gpt2.cpython-310.pyc,, +transformers/models/gpt2/__pycache__/modeling_tf_gpt2.cpython-310.pyc,, +transformers/models/gpt2/__pycache__/tokenization_gpt2.cpython-310.pyc,, +transformers/models/gpt2/__pycache__/tokenization_gpt2_fast.cpython-310.pyc,, +transformers/models/gpt2/__pycache__/tokenization_gpt2_tf.cpython-310.pyc,, +transformers/models/gpt2/configuration_gpt2.py,sha256=oWdrBVDmgPqQJ2orzzELbqkbE_hvKk9Op4pwtEXN3hY,12059 +transformers/models/gpt2/modeling_flax_gpt2.py,sha256=IBlHlVFZw-O_BArk5pqMJWk-wsTVNLQLef6MGNDlCRk,32109 +transformers/models/gpt2/modeling_gpt2.py,sha256=ZKFsmGDn2xy59abPQnsDZXrIVOwyJ5Vux2aB705W8aU,74593 +transformers/models/gpt2/modeling_tf_gpt2.py,sha256=xWPpjaGyLDObGq4-3zwsb5gQVpuXUZ2ayTZRI4wLB2c,56440 +transformers/models/gpt2/tokenization_gpt2.py,sha256=3bLgxaap-6YpehzZI-DR5s0FU8PM0riJjsmaiqKH_3Q,13154 +transformers/models/gpt2/tokenization_gpt2_fast.py,sha256=pKJ2PVaSzWUSIjXWoLp0r8LiIupthk_8oaTNhD-PhAw,5274 +transformers/models/gpt2/tokenization_gpt2_tf.py,sha256=Ma7Z4lkXDyeJeTw5-wwkJwttvIdUnGptzmnhmvzCX7A,4071 +transformers/models/gpt_bigcode/__init__.py,sha256=KQNb7PO57eZpP345wSbe_C3iL-N4VPscw1GY2mv81uE,1003 +transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc,, +transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc,, +transformers/models/gpt_bigcode/configuration_gpt_bigcode.py,sha256=5pL1meyCVQZXvco9WsIFNvDhEdtpAEVgTOg-xg2cWrw,6375 +transformers/models/gpt_bigcode/modeling_gpt_bigcode.py,sha256=drd0ivvqpWJtXVnvIOu7mL2WGbMkA8-yE94MmhHugwI,40574 +transformers/models/gpt_neo/__init__.py,sha256=b25qxianvucgAd3OxuI00Rr5324o-CRes0zrcEIOCZI,1036 +transformers/models/gpt_neo/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gpt_neo/__pycache__/configuration_gpt_neo.cpython-310.pyc,, +transformers/models/gpt_neo/__pycache__/modeling_flax_gpt_neo.cpython-310.pyc,, +transformers/models/gpt_neo/__pycache__/modeling_gpt_neo.cpython-310.pyc,, +transformers/models/gpt_neo/configuration_gpt_neo.py,sha256=zRidKD8M7zf-YbaDWUYw8ScjQesO5BSI6d7YoHnyjwU,11907 +transformers/models/gpt_neo/modeling_flax_gpt_neo.py,sha256=fD4XijeKuru5evmV7NcPQAtgvQha8H-oEyJm2uNlE4Y,28175 +transformers/models/gpt_neo/modeling_gpt_neo.py,sha256=RDCP3oFnQTRJcmH1Uj1FpIK0-KwVn5sQNy4bQhBQZKA,51852 +transformers/models/gpt_neox/__init__.py,sha256=6CL92CuqBTIDJ-YH_doFwb-oRylAffw7pwxedv3a-40,1043 +transformers/models/gpt_neox/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gpt_neox/__pycache__/configuration_gpt_neox.cpython-310.pyc,, +transformers/models/gpt_neox/__pycache__/modeling_gpt_neox.cpython-310.pyc,, +transformers/models/gpt_neox/__pycache__/modular_gpt_neox.cpython-310.pyc,, +transformers/models/gpt_neox/__pycache__/tokenization_gpt_neox_fast.cpython-310.pyc,, +transformers/models/gpt_neox/configuration_gpt_neox.py,sha256=-z9ztrlOAgyorPeZKTp-fOvSVWOoZuqhiAfq0VTBuNM,10982 +transformers/models/gpt_neox/modeling_gpt_neox.py,sha256=X9Ym_93fMVXVKAtFTySkxmTY6fANDP-kVZ1u5hfHE-U,34577 +transformers/models/gpt_neox/modular_gpt_neox.py,sha256=g0-7sSzMePu1bbwnoA6hLHHBVuiITGr0OHlN0g_WqVE,28904 +transformers/models/gpt_neox/tokenization_gpt_neox_fast.py,sha256=iivrluP4OTkQlBQjqz1kpn0NhIrarqPafDbCgHGc80o,8986 +transformers/models/gpt_neox_japanese/__init__.py,sha256=z4kbUmZSjE-Hs9ba8ul3Yncc9ZJy7ePufbwwRlfqWqw,1065 +transformers/models/gpt_neox_japanese/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gpt_neox_japanese/__pycache__/configuration_gpt_neox_japanese.cpython-310.pyc,, +transformers/models/gpt_neox_japanese/__pycache__/modeling_gpt_neox_japanese.cpython-310.pyc,, +transformers/models/gpt_neox_japanese/__pycache__/tokenization_gpt_neox_japanese.cpython-310.pyc,, +transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py,sha256=Mae05uoCqq3q20e-da1CoCxCegR_Ng6q5R-1hrcacVI,9122 +transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py,sha256=zPq5C4A0URFXetD0768P5eiRBgFvv1cyE5vaz8xy4ws,32826 +transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py,sha256=0k_Len46JM8p50cubkBuklZtiAwcZJTaSOys0k8N4SI,16944 +transformers/models/gpt_oss/__init__.py,sha256=a3dnVKgP6RwbuxBJW3kodYKj8oVF5Y6pLJixMthP1yA,995 +transformers/models/gpt_oss/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gpt_oss/__pycache__/configuration_gpt_oss.cpython-310.pyc,, +transformers/models/gpt_oss/__pycache__/modeling_gpt_oss.cpython-310.pyc,, +transformers/models/gpt_oss/__pycache__/modular_gpt_oss.cpython-310.pyc,, +transformers/models/gpt_oss/configuration_gpt_oss.py,sha256=2xtqnLoCecRH2yTwEH2LNvM4W98-Qtzjz9wIe0BMueQ,4731 +transformers/models/gpt_oss/modeling_gpt_oss.py,sha256=O9tvFRVPVVXCZHIOAhtO8fho3en75Ha7NRBlADDr-_U,31652 +transformers/models/gpt_oss/modular_gpt_oss.py,sha256=FqvR0dfounTp3zABx-h39brXWAZicyZ1UbvlbnjNx6E,19594 +transformers/models/gpt_sw3/__init__.py,sha256=-g6WlJ6EhhrJKCCsPf78cgvGD7oWvfeW9GBGBpW6wcM,958 +transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc,, +transformers/models/gpt_sw3/tokenization_gpt_sw3.py,sha256=6z6Yd9eLqFgEKb_Z4ow8kFOuhZVTD0ejrboc9aktNIc,12565 +transformers/models/gptj/__init__.py,sha256=rgFDJcsxcq1ytl7BTZthr7sSmaxqggSbvrIseycmE-s,1063 +transformers/models/gptj/__pycache__/__init__.cpython-310.pyc,, +transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc,, +transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc,, +transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc,, +transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc,, +transformers/models/gptj/configuration_gptj.py,sha256=wJU2oz2LYuleopQEzA2soQauHwae7JinCxJi_hGz2YM,8860 +transformers/models/gptj/modeling_flax_gptj.py,sha256=zod4lQZEi_H8sy4zbtmb2Gn5mEiFSmPZjYbffpztX_8,28620 +transformers/models/gptj/modeling_gptj.py,sha256=lluJBVsZSGwtj-dd8k4f5hSAi_sjAyBF3Q09RjdYMgM,54281 +transformers/models/gptj/modeling_tf_gptj.py,sha256=RHLHR65JSAH9IC-Rp3ZUVZKFkJfahNJZzlVwW--HMxE,47831 +transformers/models/granite/__init__.py,sha256=cDxmZNuphkDCs2U8W5C95Vhu577kdZHKHUWWaQ3vk5U,1015 +transformers/models/granite/__pycache__/__init__.cpython-310.pyc,, +transformers/models/granite/__pycache__/configuration_granite.cpython-310.pyc,, +transformers/models/granite/__pycache__/modeling_granite.cpython-310.pyc,, +transformers/models/granite/__pycache__/modular_granite.cpython-310.pyc,, +transformers/models/granite/configuration_granite.py,sha256=U1CQ2gvTYGx753UX0t5IOfcLllikDU7Jupj7NlOX3Dk,9348 +transformers/models/granite/modeling_granite.py,sha256=dkpM-z1ZgCCmPUvZWfqxuM57lbfPGg7zNiRWQ1ijekM,24823 +transformers/models/granite/modular_granite.py,sha256=ViN37-5Q2So2sE4WvuDV1jaPH66LDglYNsvczakm5iM,11778 +transformers/models/granite_speech/__init__.py,sha256=xD_zbTTnBiaB6EEG4yinaWd-yza1waa01GNKVhsGL1M,1107 +transformers/models/granite_speech/__pycache__/__init__.cpython-310.pyc,, +transformers/models/granite_speech/__pycache__/configuration_granite_speech.cpython-310.pyc,, +transformers/models/granite_speech/__pycache__/feature_extraction_granite_speech.cpython-310.pyc,, +transformers/models/granite_speech/__pycache__/modeling_granite_speech.cpython-310.pyc,, +transformers/models/granite_speech/__pycache__/processing_granite_speech.cpython-310.pyc,, +transformers/models/granite_speech/configuration_granite_speech.py,sha256=EYXvGbPHGa6rTZa4FeeR-kg98I_gW9JTiI1MT8XyZwg,8565 +transformers/models/granite_speech/feature_extraction_granite_speech.py,sha256=CrQoQlWFlY3B3vM0mDdZCFM8t_xp21vjGieZl65gErE,7395 +transformers/models/granite_speech/modeling_granite_speech.py,sha256=oK4qCVR0mdBMhMdNa1RRQNK8Pa2oMzvDQEa5xiYtzw4,26506 +transformers/models/granite_speech/processing_granite_speech.py,sha256=pMT_ByZPVZC3xBkCfFoW8CSLkY-uMdNW_4ufPbTxFP0,3922 +transformers/models/granitemoe/__init__.py,sha256=e4KKtNT7YFkYkPBfcS0VyhpT_1vF0JkR2qdYKPqRUcE,1001 +transformers/models/granitemoe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/granitemoe/__pycache__/configuration_granitemoe.cpython-310.pyc,, +transformers/models/granitemoe/__pycache__/modeling_granitemoe.cpython-310.pyc,, +transformers/models/granitemoe/configuration_granitemoe.py,sha256=bZMMl3W8IDz9VfbN98Y39K12K7C2Mm0D1-41vngyxfU,9513 +transformers/models/granitemoe/modeling_granitemoe.py,sha256=cLo-Z9HEUKduA9Y_IxyOxd5MgW6M9qn0CBz8c_nAVRM,44627 +transformers/models/granitemoehybrid/__init__.py,sha256=yiZusdNxb3DK3MNKdwcVNM2bFfeASr76tKQwQwmSJ68,1043 +transformers/models/granitemoehybrid/__pycache__/__init__.cpython-310.pyc,, +transformers/models/granitemoehybrid/__pycache__/configuration_granitemoehybrid.cpython-310.pyc,, +transformers/models/granitemoehybrid/__pycache__/modeling_granitemoehybrid.cpython-310.pyc,, +transformers/models/granitemoehybrid/__pycache__/modular_granitemoehybrid.cpython-310.pyc,, +transformers/models/granitemoehybrid/configuration_granitemoehybrid.py,sha256=ePlwYSexpKg8rXCl3X66ccnsejRSAQLrIsQcKR2m26U,12559 +transformers/models/granitemoehybrid/modeling_granitemoehybrid.py,sha256=XKqz8LsNBLqwvYnCE9CMYvT_fWrZ6sj4nVl2bkM0AN4,84634 +transformers/models/granitemoehybrid/modular_granitemoehybrid.py,sha256=xcXqBkyBtWGfVZeQY2Qq6abDEep6vB8qSIHLlpq9owI,16614 +transformers/models/granitemoeshared/__init__.py,sha256=vmY98tLts1c_yvkLn9X-xk6CFtXIKskzYvFGMqQAskc,1013 +transformers/models/granitemoeshared/__pycache__/__init__.cpython-310.pyc,, +transformers/models/granitemoeshared/__pycache__/configuration_granitemoeshared.cpython-310.pyc,, +transformers/models/granitemoeshared/__pycache__/modeling_granitemoeshared.cpython-310.pyc,, +transformers/models/granitemoeshared/__pycache__/modular_granitemoeshared.cpython-310.pyc,, +transformers/models/granitemoeshared/configuration_granitemoeshared.py,sha256=lYbrzbYRHxR5Xf8xb_7GVlw3NAkWGaMe_cVq8H6jIH8,9942 +transformers/models/granitemoeshared/modeling_granitemoeshared.py,sha256=tj91bp1seFvOIOiH_PtKZ0ge7WVkXA6021knLRE-nwY,46986 +transformers/models/granitemoeshared/modular_granitemoeshared.py,sha256=Qf-MNipor8eFIm6rWZPcCcnYaFvshZ-UKKQXAnNUndE,7976 +transformers/models/grounding_dino/__init__.py,sha256=nTxZfZioCpS8hj_L80qZQkgPviMZrTxkz14B9sQQJjk,1161 +transformers/models/grounding_dino/__pycache__/__init__.cpython-310.pyc,, +transformers/models/grounding_dino/__pycache__/configuration_grounding_dino.cpython-310.pyc,, +transformers/models/grounding_dino/__pycache__/image_processing_grounding_dino.cpython-310.pyc,, +transformers/models/grounding_dino/__pycache__/image_processing_grounding_dino_fast.cpython-310.pyc,, +transformers/models/grounding_dino/__pycache__/modeling_grounding_dino.cpython-310.pyc,, +transformers/models/grounding_dino/__pycache__/modular_grounding_dino.cpython-310.pyc,, +transformers/models/grounding_dino/__pycache__/processing_grounding_dino.cpython-310.pyc,, +transformers/models/grounding_dino/configuration_grounding_dino.py,sha256=v0jseFxOwfmdOxes3oL75e-GmIdjIEngnJdMNPCES_Q,15246 +transformers/models/grounding_dino/image_processing_grounding_dino.py,sha256=4H8zRmNstHClKrV5CF1YqSAt2AqjJixh99uS5HA7G1M,72265 +transformers/models/grounding_dino/image_processing_grounding_dino_fast.py,sha256=3UeabVTAmZXbhnAcvJvvbpy3Hqq8RPyignJy7kMK2-U,34809 +transformers/models/grounding_dino/modeling_grounding_dino.py,sha256=lRCCe9aBnYxXcd1gNAsla76BjDdxSJ3MWRGA0EgbOpY,130759 +transformers/models/grounding_dino/modular_grounding_dino.py,sha256=rLbksNDxVIZnhL-3vQD8-ITqeJtBHwhgnn6IrgYF1Rw,5286 +transformers/models/grounding_dino/processing_grounding_dino.py,sha256=HuksLX9pwWIFWCQfYTO_kGHbSyOhUYc1wVRyeyjWejU,14059 +transformers/models/groupvit/__init__.py,sha256=vrJ-tBa1XOd1CloHhXKMCIlggMxOS4M7jCcqlLQxMo4,1037 +transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc,, +transformers/models/groupvit/__pycache__/modeling_groupvit.cpython-310.pyc,, +transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc,, +transformers/models/groupvit/configuration_groupvit.py,sha256=XzBlYKFnyHGVmGdQN22pgWdPer0OGbvPWnCZvouQL_M,18684 +transformers/models/groupvit/modeling_groupvit.py,sha256=cVYDDVMUkRAZqXkX2XNOi_ikP1wXuA89ThWafIBGLRQ,61501 +transformers/models/groupvit/modeling_tf_groupvit.py,sha256=m65pNT0kRTVJZySd6tWeqY6Zx7KayxucNHSj_B3BSA4,90064 +transformers/models/helium/__init__.py,sha256=b1Senw5Mr129rzZSd1sW6-Ies2kIAUHfplpzgGeuTFE,993 +transformers/models/helium/__pycache__/__init__.cpython-310.pyc,, +transformers/models/helium/__pycache__/configuration_helium.cpython-310.pyc,, +transformers/models/helium/__pycache__/modeling_helium.cpython-310.pyc,, +transformers/models/helium/__pycache__/modular_helium.cpython-310.pyc,, +transformers/models/helium/configuration_helium.py,sha256=kctXqQTceihfsqRx0vImc_urU3Ii5hyp-cUGy8TUT0E,7380 +transformers/models/helium/modeling_helium.py,sha256=h0GSWeGMWI_ajg6jyQ70UZy8-HP4wWsgNsM62Mt6C20,20730 +transformers/models/helium/modular_helium.py,sha256=UhpSRdXdaOGzV2jp4BqoRxYQ5PKWMhdgc93gtp1XLrs,5484 +transformers/models/herbert/__init__.py,sha256=3i5hlRANc-OFP86y2qzb_OCWVjJQ9XQswiglh5KbU7Y,1003 +transformers/models/herbert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc,, +transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc,, +transformers/models/herbert/tokenization_herbert.py,sha256=-1q4wQyllXCSydUXL0DO8Nm9OqmAcIGdNhCvgb6rxeg,23808 +transformers/models/herbert/tokenization_herbert_fast.py,sha256=S_47DZCpq7SK9d21QKf8jF5FZvVkRFERzD_iRV2IMg0,4919 +transformers/models/hgnet_v2/__init__.py,sha256=sBFNC0RNpS-oEnOiwtxy2SkUPAJgmI5uXXq2WjSHRd8,999 +transformers/models/hgnet_v2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/hgnet_v2/__pycache__/configuration_hgnet_v2.cpython-310.pyc,, +transformers/models/hgnet_v2/__pycache__/modeling_hgnet_v2.cpython-310.pyc,, +transformers/models/hgnet_v2/__pycache__/modular_hgnet_v2.cpython-310.pyc,, +transformers/models/hgnet_v2/configuration_hgnet_v2.py,sha256=Gr8N48fXLivF_jKgpm62-1mTvqo5QDfw-h0JceCXbjY,8823 +transformers/models/hgnet_v2/modeling_hgnet_v2.py,sha256=b8VfH5Im-Df8REz-dG55YQIkZnESM9WNHOdVBTdcTqc,18994 +transformers/models/hgnet_v2/modular_hgnet_v2.py,sha256=1dqZuZfYR5Yxx43zSfCT3QIbIhr_qlCU7lk5qlUGQZw,25434 +transformers/models/hiera/__init__.py,sha256=b1kwKtpZVISJZ5Pri421uvH2v3IoRQ6XXHzxFOPHN-g,991 +transformers/models/hiera/__pycache__/__init__.cpython-310.pyc,, +transformers/models/hiera/__pycache__/configuration_hiera.cpython-310.pyc,, +transformers/models/hiera/__pycache__/modeling_hiera.cpython-310.pyc,, +transformers/models/hiera/configuration_hiera.py,sha256=QbF2S73pDapMC0_AoQVnPZTqWgs0tXvWWgSAvjNxEFE,9319 +transformers/models/hiera/modeling_hiera.py,sha256=JgEE7y4yUCT9Ktfuahv_Dq53qOIl6QTqgXecQt2Kr-Y,63049 +transformers/models/hubert/__init__.py,sha256=ai560JtgkksShocy0zcDejelkRZnK4IZPVKaTHCOxPQ,1031 +transformers/models/hubert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc,, +transformers/models/hubert/__pycache__/modeling_hubert.cpython-310.pyc,, +transformers/models/hubert/__pycache__/modeling_tf_hubert.cpython-310.pyc,, +transformers/models/hubert/__pycache__/modular_hubert.cpython-310.pyc,, +transformers/models/hubert/configuration_hubert.py,sha256=XFh70tUL-ITYtn-RMt-lZl_Ej2qQ24vJLZZyBYF3PwA,14962 +transformers/models/hubert/modeling_hubert.py,sha256=D0WYbO8xGUHMYntJ7ZNyObytNc4DosDc_A3E73ByYgM,54623 +transformers/models/hubert/modeling_tf_hubert.py,sha256=ED9pJy2PK08C_dRaPOdWgkLvd09BZ6sQmKSuqQbOYok,70566 +transformers/models/hubert/modular_hubert.py,sha256=fhDox-l7V90dmRmOWoJ0bGwhkJZHNmEzAlPLP5BdD-E,11949 +transformers/models/ibert/__init__.py,sha256=UMTcE54y6O9UNF8l9VV2rrTlJSAHooxeNeHNzPSgr_E,991 +transformers/models/ibert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/ibert/__pycache__/configuration_ibert.cpython-310.pyc,, +transformers/models/ibert/__pycache__/modeling_ibert.cpython-310.pyc,, +transformers/models/ibert/__pycache__/quant_modules.cpython-310.pyc,, +transformers/models/ibert/configuration_ibert.py,sha256=nLRgpOzXz8rNprkGfnz5LVuPKWbYnQfkAubJp9sJYyE,7120 +transformers/models/ibert/modeling_ibert.py,sha256=Z8as-nMx-iaAnfgynZ3JmdVs2kHo02hqHi2OAeouIpw,51526 +transformers/models/ibert/quant_modules.py,sha256=IRq4JOfDn8BBDan2zDy8Fa70bMJ8Wa2gorNDeNVB6uc,30076 +transformers/models/idefics/__init__.py,sha256=zc4m1Vd6-Szs7Urt0Ry6eUScpza8iD-QPG4cq4xX34g,1116 +transformers/models/idefics/__pycache__/__init__.cpython-310.pyc,, +transformers/models/idefics/__pycache__/configuration_idefics.cpython-310.pyc,, +transformers/models/idefics/__pycache__/image_processing_idefics.cpython-310.pyc,, +transformers/models/idefics/__pycache__/modeling_idefics.cpython-310.pyc,, +transformers/models/idefics/__pycache__/modeling_tf_idefics.cpython-310.pyc,, +transformers/models/idefics/__pycache__/perceiver.cpython-310.pyc,, +transformers/models/idefics/__pycache__/perceiver_tf.cpython-310.pyc,, +transformers/models/idefics/__pycache__/processing_idefics.cpython-310.pyc,, +transformers/models/idefics/__pycache__/vision.cpython-310.pyc,, +transformers/models/idefics/__pycache__/vision_tf.cpython-310.pyc,, +transformers/models/idefics/configuration_idefics.py,sha256=4j7sAul74adsu3fXPiq34FePCqJJaafCg2dmHU9h_GU,15304 +transformers/models/idefics/image_processing_idefics.py,sha256=yUJE14jVPmH4RwuQlTX7XU878wBz6P2VsSbpNO8DO1w,9222 +transformers/models/idefics/modeling_idefics.py,sha256=akObLaDYFed4iNFlW5irmX0I2DEumwc71uxgi3RlplY,69639 +transformers/models/idefics/modeling_tf_idefics.py,sha256=odMifoYG8AO7YoME7OgCtP7GzuLPBHEyeJLt39HZ000,79345 +transformers/models/idefics/perceiver.py,sha256=MkJ34X4dgVNJddcn8wUWyDf0rTioVl4WG3dP5GLXR0Q,9426 +transformers/models/idefics/perceiver_tf.py,sha256=XGRP3FaYcbHbxQa9_NoaLkipFfy9tiymgfx2w1GBT6E,9999 +transformers/models/idefics/processing_idefics.py,sha256=x6_c3HsP5blXeNyg2hE4r0eMiIRbCKD4gBMK_KiTgRk,23564 +transformers/models/idefics/vision.py,sha256=ArKhUWBWkCP1fVn8HVcxXaIVZ_rHNuDsOmABUlxK6W0,21794 +transformers/models/idefics/vision_tf.py,sha256=_NmxcrJPfFVHC5mSl4oPI2IMa44ZrKjvojilfqyPeLw,26013 +transformers/models/idefics2/__init__.py,sha256=YmU2OQi-BTXESv52a4jtTwWC2ingparNU4-rXVCPWzQ,1131 +transformers/models/idefics2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/idefics2/__pycache__/configuration_idefics2.cpython-310.pyc,, +transformers/models/idefics2/__pycache__/image_processing_idefics2.cpython-310.pyc,, +transformers/models/idefics2/__pycache__/image_processing_idefics2_fast.cpython-310.pyc,, +transformers/models/idefics2/__pycache__/modeling_idefics2.cpython-310.pyc,, +transformers/models/idefics2/__pycache__/processing_idefics2.cpython-310.pyc,, +transformers/models/idefics2/configuration_idefics2.py,sha256=TUpFg-9WZ_ISWYfKXlVXT3Z5oTh0gxQ4BY-4fbHcRf0,12223 +transformers/models/idefics2/image_processing_idefics2.py,sha256=jou_cXD3NnR4mA3qqGVEFdQUGSjMOW0DXP2Kvypr3OQ,26485 +transformers/models/idefics2/image_processing_idefics2_fast.py,sha256=vVxdMrUrChWQFaRCKZJ5dkWJkSMCIKbV_JbFcy-eMj0,12169 +transformers/models/idefics2/modeling_idefics2.py,sha256=d3G1WYR2XVL37OwXf9Lc6sIXkyVALIrQSRfts8rn4HI,58951 +transformers/models/idefics2/processing_idefics2.py,sha256=z1ATjFITTlGVOoydJmbypd-Z_JPpZoanRXgm5BLBsdQ,12903 +transformers/models/idefics3/__init__.py,sha256=zLsOtUFi074lvfGbwZEMVSvV08TZgoq0DVhJJagYoRo,1131 +transformers/models/idefics3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/idefics3/__pycache__/configuration_idefics3.cpython-310.pyc,, +transformers/models/idefics3/__pycache__/image_processing_idefics3.cpython-310.pyc,, +transformers/models/idefics3/__pycache__/image_processing_idefics3_fast.cpython-310.pyc,, +transformers/models/idefics3/__pycache__/modeling_idefics3.cpython-310.pyc,, +transformers/models/idefics3/__pycache__/processing_idefics3.cpython-310.pyc,, +transformers/models/idefics3/configuration_idefics3.py,sha256=fQYZ0Eo-q7InVjNFat2RjL9RZQmR-9nAIlK9v_XNkZE,8566 +transformers/models/idefics3/image_processing_idefics3.py,sha256=BRylPU8Vg-f7pYUXA5V-FMq-VZYPNU77Im7NDFDubXs,43567 +transformers/models/idefics3/image_processing_idefics3_fast.py,sha256=aghb3FZx5pZbn-bFF6YYR8wIwJn-R132CrOmKMv-ccU,23711 +transformers/models/idefics3/modeling_idefics3.py,sha256=tbW82Kpf6Sa5Kvr2E9OmsSeF3lml4h2qgqn_KdvFtpw,48258 +transformers/models/idefics3/processing_idefics3.py,sha256=RWq1jTlNkjipz44wgleGrUjUr-HUlYZNblrviJhNDnw,20203 +transformers/models/ijepa/__init__.py,sha256=O0_Jqpy8kmorYC-x0QsoMYSHdqQt3E1j-UZGLQ9aCv0,991 +transformers/models/ijepa/__pycache__/__init__.cpython-310.pyc,, +transformers/models/ijepa/__pycache__/configuration_ijepa.cpython-310.pyc,, +transformers/models/ijepa/__pycache__/modeling_ijepa.cpython-310.pyc,, +transformers/models/ijepa/__pycache__/modular_ijepa.cpython-310.pyc,, +transformers/models/ijepa/configuration_ijepa.py,sha256=8lO360USWRUnrnBXO2SeiZN0ozKHJNb2K2D0_vCKeX8,5445 +transformers/models/ijepa/modeling_ijepa.py,sha256=9pugWZG0ixK56mIm6WA0I4HHpbGgvl_710h7ePS4P3U,28287 +transformers/models/ijepa/modular_ijepa.py,sha256=S38Aag9mcQMCKRBVAluCUArJ5CXUeOvCt76pqU14Sx0,9504 +transformers/models/imagegpt/__init__.py,sha256=XxwI4UaVyyvTcGuJQGruvLi-dHHl8MdOvhAum3FXaGo,1089 +transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/imagegpt/__pycache__/configuration_imagegpt.cpython-310.pyc,, +transformers/models/imagegpt/__pycache__/feature_extraction_imagegpt.cpython-310.pyc,, +transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc,, +transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc,, +transformers/models/imagegpt/configuration_imagegpt.py,sha256=bp6I42shNZUoPmwpTOWEiyUH3-UQkDzK7AkSLgsMZCo,8799 +transformers/models/imagegpt/feature_extraction_imagegpt.py,sha256=sU7HaHR9bGhzHYLuRDnvcHRCnxlJHkfTVItQYn7ZS5E,1316 +transformers/models/imagegpt/image_processing_imagegpt.py,sha256=Y_wk7hz3Ew6MmdRCesrPu64cW2VYIX3Dyhb8CGbociQ,14448 +transformers/models/imagegpt/modeling_imagegpt.py,sha256=aa76_8Z3i_JKFCQxnVW02tw5XVKHV27_QMv5vQNEhN0,46993 +transformers/models/informer/__init__.py,sha256=L-BwVQfdq5ve06VJJ-OnTh-m_YqSMNcpDQ1z6sbDtNI,997 +transformers/models/informer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/informer/__pycache__/configuration_informer.cpython-310.pyc,, +transformers/models/informer/__pycache__/modeling_informer.cpython-310.pyc,, +transformers/models/informer/__pycache__/modular_informer.cpython-310.pyc,, +transformers/models/informer/configuration_informer.py,sha256=w_Si5k5ZakGLXANemPDBY7RX_j3rfcLQr_NawdVURcA,12488 +transformers/models/informer/modeling_informer.py,sha256=xfIiponoY7XZDIdp8WfV3PXkg85cPoG4FmIFPx_Bvq4,106918 +transformers/models/informer/modular_informer.py,sha256=lB4aH2GvLgcm2nhaQIQy04mjiJZRKAY05KagERw5Zl0,48429 +transformers/models/instructblip/__init__.py,sha256=gI7F0N1dRSYdZtTumtuoPcIJcuBI8PO4DEOQS4_nWuc,1048 +transformers/models/instructblip/__pycache__/__init__.cpython-310.pyc,, +transformers/models/instructblip/__pycache__/configuration_instructblip.cpython-310.pyc,, +transformers/models/instructblip/__pycache__/modeling_instructblip.cpython-310.pyc,, +transformers/models/instructblip/__pycache__/processing_instructblip.cpython-310.pyc,, +transformers/models/instructblip/configuration_instructblip.py,sha256=_EYzBIHXuM6DThV5EfpJVuglsL7OCu-axvN0u_Yfk2M,15823 +transformers/models/instructblip/modeling_instructblip.py,sha256=x5rot-3VjAOydqv2a1CZQggIgBhUKAOqhegvLee-YqA,78121 +transformers/models/instructblip/processing_instructblip.py,sha256=-13GeeZODXWl3irxiqkMUKXlLEyvLoirXX9anIfwpaI,9706 +transformers/models/instructblipvideo/__init__.py,sha256=sgK7MEwrqKB6mQyEvhxcgOQc_OAtMDc9tAZqKF0sxfM,1171 +transformers/models/instructblipvideo/__pycache__/__init__.cpython-310.pyc,, +transformers/models/instructblipvideo/__pycache__/configuration_instructblipvideo.cpython-310.pyc,, +transformers/models/instructblipvideo/__pycache__/image_processing_instructblipvideo.cpython-310.pyc,, +transformers/models/instructblipvideo/__pycache__/modeling_instructblipvideo.cpython-310.pyc,, +transformers/models/instructblipvideo/__pycache__/modular_instructblipvideo.cpython-310.pyc,, +transformers/models/instructblipvideo/__pycache__/processing_instructblipvideo.cpython-310.pyc,, +transformers/models/instructblipvideo/__pycache__/video_processing_instructblipvideo.cpython-310.pyc,, +transformers/models/instructblipvideo/configuration_instructblipvideo.py,sha256=RWJu0fOE-UcvZRLYLCZB0MHmXOUReRv3YhN9lsliQ68,16931 +transformers/models/instructblipvideo/image_processing_instructblipvideo.py,sha256=Y-UrJ6sREhk2eEznUyfMLc5Ehhnng1VT0LC8K9IqO-k,17032 +transformers/models/instructblipvideo/modeling_instructblipvideo.py,sha256=WSGTMq6cY-vyqG9hsxjeeBS0Fw_7VmfIzdv-C7PEPnQ,79171 +transformers/models/instructblipvideo/modular_instructblipvideo.py,sha256=padfqy1AVSpogxtwGRM3uxRizt4SLCRoSjkxjBgob5E,27319 +transformers/models/instructblipvideo/processing_instructblipvideo.py,sha256=7fots7t9Fo4Fpbw-thVNDYVDjKnq9vFo3YOBaiDtQU4,10641 +transformers/models/instructblipvideo/video_processing_instructblipvideo.py,sha256=-C4e7w5jHwXQ8OspMxe7kzNfbPp0zX3HYdh8P3oZuXI,5337 +transformers/models/internvl/__init__.py,sha256=tNXeZ8TIWlY70CelRiihyPOudKQtRBZp-c9WqglJ8ss,1081 +transformers/models/internvl/__pycache__/__init__.cpython-310.pyc,, +transformers/models/internvl/__pycache__/configuration_internvl.cpython-310.pyc,, +transformers/models/internvl/__pycache__/modeling_internvl.cpython-310.pyc,, +transformers/models/internvl/__pycache__/modular_internvl.cpython-310.pyc,, +transformers/models/internvl/__pycache__/processing_internvl.cpython-310.pyc,, +transformers/models/internvl/__pycache__/video_processing_internvl.cpython-310.pyc,, +transformers/models/internvl/configuration_internvl.py,sha256=pHPwAbDBs-a2mCoadTGA-86y8w9io-9rn-mhFSMFe20,10622 +transformers/models/internvl/modeling_internvl.py,sha256=5J-z6OWW7C5rbjA2Xvh47ajQYbRIrFB_pPJ1dfHiXyw,42091 +transformers/models/internvl/modular_internvl.py,sha256=S4ONWu0QE1OonNjGwG-GPF9ucFfyQmaHmFq-3DjRCDY,27971 +transformers/models/internvl/processing_internvl.py,sha256=eVuASRYHbKcL0RqS8_BJkFsa9x_7c9PDiWSZmxOcANY,16266 +transformers/models/internvl/video_processing_internvl.py,sha256=d6wMz2hHEl0K2pZpHBoXPNywy_eaoEurT2fIFdgLkpY,8168 +transformers/models/jamba/__init__.py,sha256=zN7Rmr--d5GCEJzMA7gxIz-BYFydPN3cyuif85YU0Fk,991 +transformers/models/jamba/__pycache__/__init__.cpython-310.pyc,, +transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc,, +transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc,, +transformers/models/jamba/configuration_jamba.py,sha256=_tAnWFB7DxGv4MF3dq2rSmzJ3Ys-8jamkEtN67mTPWQ,11745 +transformers/models/jamba/modeling_jamba.py,sha256=xbPEDFrFvNxkL9miNoYgtFDZCuhxlB5XmsHBcggyovM,68929 +transformers/models/janus/__init__.py,sha256=rTnJnHMmmoPxIVasip4sdS5aZ1wvetOtZUobIMYMHX4,1132 +transformers/models/janus/__pycache__/__init__.cpython-310.pyc,, +transformers/models/janus/__pycache__/configuration_janus.cpython-310.pyc,, +transformers/models/janus/__pycache__/image_processing_janus.cpython-310.pyc,, +transformers/models/janus/__pycache__/image_processing_janus_fast.cpython-310.pyc,, +transformers/models/janus/__pycache__/modeling_janus.cpython-310.pyc,, +transformers/models/janus/__pycache__/modular_janus.cpython-310.pyc,, +transformers/models/janus/__pycache__/processing_janus.cpython-310.pyc,, +transformers/models/janus/configuration_janus.py,sha256=7fL58_kFc-hWwhpq-6N_BJG82194A3uHz9QEmRK0A14,14908 +transformers/models/janus/image_processing_janus.py,sha256=kDOV-UMsqTcVsmdfv3gOPmu6EUpCmcewPPRaiI4Jyf0,25665 +transformers/models/janus/image_processing_janus_fast.py,sha256=FOt17pcWVFhG8awWA-NgZb4gcs7ztLQKpejWaNCHZvY,9327 +transformers/models/janus/modeling_janus.py,sha256=LOUIHksbip4Q006puDiZphs1njXG4dNPvuvUGWiITrA,61460 +transformers/models/janus/modular_janus.py,sha256=LLWC1U0cSK_s9BJWR7209rTqsvUnsYD-hdHQ1uyDkBI,69482 +transformers/models/janus/processing_janus.py,sha256=ynGyHeifrMd6LTs836qWj8jrvKVIDu1MFnYWsBjHmuM,8408 +transformers/models/jetmoe/__init__.py,sha256=zhqtP2ZDCCl3Fp3VBnnuaA044Ztbh7fsUKogAKABOt0,993 +transformers/models/jetmoe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/jetmoe/__pycache__/configuration_jetmoe.cpython-310.pyc,, +transformers/models/jetmoe/__pycache__/modeling_jetmoe.cpython-310.pyc,, +transformers/models/jetmoe/configuration_jetmoe.py,sha256=jVvNefILiJpDnH0QcMd4SP8L_5-0xS1eUAa-S43dNG0,6803 +transformers/models/jetmoe/modeling_jetmoe.py,sha256=34c5IMGre15TWdRYbwZz9Z-DvQ3nVzEyUamQETYrR3Q,54495 +transformers/models/kosmos2/__init__.py,sha256=Ow8cLelhxl6fm5XvXzNQtPLt1xjIdVmGUwz5NoVVVto,1033 +transformers/models/kosmos2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/kosmos2/__pycache__/configuration_kosmos2.cpython-310.pyc,, +transformers/models/kosmos2/__pycache__/modeling_kosmos2.cpython-310.pyc,, +transformers/models/kosmos2/__pycache__/processing_kosmos2.cpython-310.pyc,, +transformers/models/kosmos2/configuration_kosmos2.py,sha256=J8MCJ8SzDJ03_8DiSMNiYyU46i0tl6LEc2K_u1dnUY8,11888 +transformers/models/kosmos2/modeling_kosmos2.py,sha256=MuBMkoV8888F2DG6PGtaC1hDJ_aUgvEQlyJGvqMqI2g,83095 +transformers/models/kosmos2/processing_kosmos2.py,sha256=mjrTT2YEQS-XbOOOgvUHBH1iGZaKfGIGeDcgc1cJH2k,31841 +transformers/models/kyutai_speech_to_text/__init__.py,sha256=KxatXD7pSOmwZWzs7nFOXG9Hc2wxaAS3CYwxg54lq9g,1135 +transformers/models/kyutai_speech_to_text/__pycache__/__init__.cpython-310.pyc,, +transformers/models/kyutai_speech_to_text/__pycache__/configuration_kyutai_speech_to_text.cpython-310.pyc,, +transformers/models/kyutai_speech_to_text/__pycache__/feature_extraction_kyutai_speech_to_text.cpython-310.pyc,, +transformers/models/kyutai_speech_to_text/__pycache__/modeling_kyutai_speech_to_text.cpython-310.pyc,, +transformers/models/kyutai_speech_to_text/__pycache__/modular_kyutai_speech_to_text.cpython-310.pyc,, +transformers/models/kyutai_speech_to_text/__pycache__/processing_kyutai_speech_to_text.cpython-310.pyc,, +transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py,sha256=79_lAwlhuNgITTfiaG1jtXj5hndcZMfFK6XEdeZy9eM,9014 +transformers/models/kyutai_speech_to_text/feature_extraction_kyutai_speech_to_text.py,sha256=d70GfDJOpBy6_mbq6XkN0DXXm7-hv19WbxGA7AJBu5A,11809 +transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py,sha256=r8LLKu-tKgur0UgI3JQ4DxQiQSzxYxZr0j_7l2rPx3Q,64696 +transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py,sha256=ubPeru0865L0Nm9-SlF0svGqm5vXRIQonQV6x93T4vM,23356 +transformers/models/kyutai_speech_to_text/processing_kyutai_speech_to_text.py,sha256=4_9VxAjsbHaYOho_gEhFyJs7WPiWgvE6ZgUiVquCgvs,4142 +transformers/models/layoutlm/__init__.py,sha256=Mv-k01_9_SxbADuSx2pWoNGBxgUe4IH15Kcg-vc_0OI,1124 +transformers/models/layoutlm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/layoutlm/__pycache__/configuration_layoutlm.cpython-310.pyc,, +transformers/models/layoutlm/__pycache__/modeling_layoutlm.cpython-310.pyc,, +transformers/models/layoutlm/__pycache__/modeling_tf_layoutlm.cpython-310.pyc,, +transformers/models/layoutlm/__pycache__/tokenization_layoutlm.cpython-310.pyc,, +transformers/models/layoutlm/__pycache__/tokenization_layoutlm_fast.cpython-310.pyc,, +transformers/models/layoutlm/configuration_layoutlm.py,sha256=dXx_U1hCr7OEKkX1KmnvX28_YFG-3YfhyoohtIKJvuA,9585 +transformers/models/layoutlm/modeling_layoutlm.py,sha256=ME6tc8MDHxIkgLONgWRTOYYkpHTiPbKea9BuZcJaQuM,47621 +transformers/models/layoutlm/modeling_tf_layoutlm.py,sha256=nbnFaMwHF6o4H37KjumFss3PkBG1JvR9bqSaCUgeQO0,73207 +transformers/models/layoutlm/tokenization_layoutlm.py,sha256=xmr9ECm3iKIekmGUFJCh6doTOuto43pZoGRJtOaAsnc,20141 +transformers/models/layoutlm/tokenization_layoutlm_fast.py,sha256=LQl2LTmosc0mhrdir332AMepzHmuwxCDrM8RxYxUrJM,6691 +transformers/models/layoutlmv2/__init__.py,sha256=8f9dWBf1riaQI2KAw-gIrQGNKP4f2uUdJxyRKNVD2lI,1281 +transformers/models/layoutlmv2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/configuration_layoutlmv2.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/feature_extraction_layoutlmv2.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/image_processing_layoutlmv2.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/image_processing_layoutlmv2_fast.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/modeling_layoutlmv2.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/processing_layoutlmv2.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/tokenization_layoutlmv2.cpython-310.pyc,, +transformers/models/layoutlmv2/__pycache__/tokenization_layoutlmv2_fast.cpython-310.pyc,, +transformers/models/layoutlmv2/configuration_layoutlmv2.py,sha256=LmhbEBUesm93DINDXZRfWIKrB0Agy3cflhMh7UCMtqY,10914 +transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py,sha256=C-PxmCgb7CMj-nrs7-yOEvGwLcW1okgKjK2OvIsKjHE,1313 +transformers/models/layoutlmv2/image_processing_layoutlmv2.py,sha256=LGi3r1DbOk-Qi7O8Xw-etrkNM6zt_Zp2ucYomH1FaxA,13605 +transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py,sha256=VBZrKwU79g-NQEfy9RyiVTQxcgRtYbGLg0vqeBnhBVc,5841 +transformers/models/layoutlmv2/modeling_layoutlmv2.py,sha256=x3DUWXBaYRXDaOlKoLfHlEbOj8plIyZ6Ut0ljfROTms,61956 +transformers/models/layoutlmv2/processing_layoutlmv2.py,sha256=n-QBUDkZQKBysGkY_yvCIOzzYEeFNH0lkTgGmpvyI80,9332 +transformers/models/layoutlmv2/tokenization_layoutlmv2.py,sha256=NPwb7mXxwLzRHqfDHqcKww8QEXePE8W-25cyJXYpeSg,72137 +transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py,sha256=kszD7kD1-Zi6tos9t3gSTja-wWCi0k8e3D5uhZjR3ws,37064 +transformers/models/layoutlmv3/__init__.py,sha256=N-Ty2DqEyDqyd5i-k89LMi2qSbojaasZ-ozSTxs1GHo,1323 +transformers/models/layoutlmv3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/configuration_layoutlmv3.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/feature_extraction_layoutlmv3.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3_fast.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/modeling_layoutlmv3.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/modeling_tf_layoutlmv3.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/processing_layoutlmv3.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3.cpython-310.pyc,, +transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3_fast.cpython-310.pyc,, +transformers/models/layoutlmv3/configuration_layoutlmv3.py,sha256=-FaapM8qkEUrlkfsFAG27v5hPJh-SmkQkXSYb5OiBjU,13288 +transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py,sha256=G2oB86aN-ebGJjw6YMsuEVuSfnIAylsteMWpZR3Gcvs,1313 +transformers/models/layoutlmv3/image_processing_layoutlmv3.py,sha256=myp6OC50cdh60lwE2BX2fj2iPMsjo4SFaAtHtTngG7Y,18571 +transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py,sha256=HaT1eiU3fvt0m4pdZXaidIPMTNTpcRnLaxKLRdRspRc,6428 +transformers/models/layoutlmv3/modeling_layoutlmv3.py,sha256=As83lqRUQ_wwK8bBy-RTMcjeCVffss-CzZoyyFT6Fo8,53596 +transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py,sha256=QMYgY31NUVETlCDaX9WIwSnPZwb5ZfHl8N0JfwVIEJc,76633 +transformers/models/layoutlmv3/processing_layoutlmv3.py,sha256=3dcr9b7u4KP8Ynox7t6Zu6ZNkAKuXfFS5AVH70D0X1g,9183 +transformers/models/layoutlmv3/tokenization_layoutlmv3.py,sha256=vazUk6NCQAVRanYrmHJ4LduEwW_RNuLkepmUi25j8QY,73237 +transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py,sha256=l7JmjZ3uMA3fRimszuxqpQF5_0AFB-wjXwTwSwj7Rcg,39917 +transformers/models/layoutxlm/__init__.py,sha256=djfI2YGJISwww_XDfyf4kCj3a_HiC6Hld1rlaHRtHPg,1047 +transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc,, +transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc,, +transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc,, +transformers/models/layoutxlm/processing_layoutxlm.py,sha256=f43xcpnPILBjHTMeUJ0EbMhFmBPZO5wIShi2tHQSpGo,9263 +transformers/models/layoutxlm/tokenization_layoutxlm.py,sha256=BLbZIliyQIRcXAtExyWN0sIV2ZRf1FqFdsnlKsB-iIo,58329 +transformers/models/layoutxlm/tokenization_layoutxlm_fast.py,sha256=1J_rW1fGdTsQsJRbLfVhbWngXnKecX-xELBtYmu98QA,40471 +transformers/models/led/__init__.py,sha256=KaOht9jIet9WQrPRli8DwD7q5fzTWsffxf7LK-sQuw4,1099 +transformers/models/led/__pycache__/__init__.cpython-310.pyc,, +transformers/models/led/__pycache__/configuration_led.cpython-310.pyc,, +transformers/models/led/__pycache__/modeling_led.cpython-310.pyc,, +transformers/models/led/__pycache__/modeling_tf_led.cpython-310.pyc,, +transformers/models/led/__pycache__/tokenization_led.cpython-310.pyc,, +transformers/models/led/__pycache__/tokenization_led_fast.cpython-310.pyc,, +transformers/models/led/configuration_led.py,sha256=wuLDY2wIywEU_23WuDfcoGW8-bg_X8SmP843xnYFyZQ,7455 +transformers/models/led/modeling_led.py,sha256=1bRkBYwiO6PylqTbLlGUtAeSH8umadKRVf3Cj-m_n_g,125327 +transformers/models/led/modeling_tf_led.py,sha256=04PA5IKphAodzyV234v2sZ825j_xY9ob_2REtmu1e-s,123119 +transformers/models/led/tokenization_led.py,sha256=m7FhNIvAQer9k9t_WqYMGSE3k59yn7L5tIuB3JH7uzE,19843 +transformers/models/led/tokenization_led_fast.py,sha256=yO0G0Q6yGmsPCyEII9t-AnYQeEaJ-HQDbLjfAcGI9Cs,14170 +transformers/models/levit/__init__.py,sha256=acEjEeDtpQ9q3a-hf90z6TZ0js04BtZvbCcn4HGWCyk,1124 +transformers/models/levit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/levit/__pycache__/configuration_levit.cpython-310.pyc,, +transformers/models/levit/__pycache__/feature_extraction_levit.cpython-310.pyc,, +transformers/models/levit/__pycache__/image_processing_levit.cpython-310.pyc,, +transformers/models/levit/__pycache__/image_processing_levit_fast.cpython-310.pyc,, +transformers/models/levit/__pycache__/modeling_levit.cpython-310.pyc,, +transformers/models/levit/configuration_levit.py,sha256=UOtUDcZK6i4kPrtjYgCoWpZsxf7A1BQZeZjCYoXnMek,5772 +transformers/models/levit/feature_extraction_levit.py,sha256=sR1MZBqvbep8KdqX45Sw3V--ZqCe3fePzC1CT9cv4Js,1317 +transformers/models/levit/image_processing_levit.py,sha256=XHpdCc_BJVuKz8AfDUMObT6cJovSyW06eWri77KprwU,16718 +transformers/models/levit/image_processing_levit_fast.py,sha256=kyf4vXAG0LMRRXpQRZ8swvedueDYr6BrLbI8NRXQrPY,3946 +transformers/models/levit/modeling_levit.py,sha256=YLK0NAfGhQZfUWKDEu7dLAX4lu2qQvo7b4VFzEFOvUM,26365 +transformers/models/lfm2/__init__.py,sha256=9fNMRtqveDp18iRtUjNqCu0XELuvwJOmizUyI1h0zHw,989 +transformers/models/lfm2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/lfm2/__pycache__/configuration_lfm2.cpython-310.pyc,, +transformers/models/lfm2/__pycache__/modeling_lfm2.cpython-310.pyc,, +transformers/models/lfm2/__pycache__/modular_lfm2.cpython-310.pyc,, +transformers/models/lfm2/configuration_lfm2.py,sha256=6jJ2kBds8PYg6DyAHBFdpMIcJW7rBv-bkMrM1LLVgII,7831 +transformers/models/lfm2/modeling_lfm2.py,sha256=V0PtewNXQMeK8_g4EVU-vUoFa_MfPJGZx_PIpSSWF38,32170 +transformers/models/lfm2/modular_lfm2.py,sha256=uXYsOWH4VtTaKMWWjWvfj38VSAFoald_LQCJAf7vc_o,20934 +transformers/models/lightglue/__init__.py,sha256=xdtDUaVLHRf2vWjlv-l2JWlllw6Obz8ZRZeV297Ds1Y,1045 +transformers/models/lightglue/__pycache__/__init__.cpython-310.pyc,, +transformers/models/lightglue/__pycache__/configuration_lightglue.cpython-310.pyc,, +transformers/models/lightglue/__pycache__/image_processing_lightglue.cpython-310.pyc,, +transformers/models/lightglue/__pycache__/modeling_lightglue.cpython-310.pyc,, +transformers/models/lightglue/__pycache__/modular_lightglue.cpython-310.pyc,, +transformers/models/lightglue/configuration_lightglue.py,sha256=H0fIxvZuHCXnz1UUuwtmjOugvvMv0SzbNYq0XYBEfj8,8131 +transformers/models/lightglue/image_processing_lightglue.py,sha256=sJuqREeqCZ4IJg2p46DPdodfe60oGWVwhaUTyKSBkqk,24854 +transformers/models/lightglue/modeling_lightglue.py,sha256=UtTTlqRqTeeUvs5tdC0p1Bx5PbI1kHBYp9_ZXvzTMHw,43234 +transformers/models/lightglue/modular_lightglue.py,sha256=Gj5s5EWsPS4D_rcqtVFMIiLpbb60r5TFvQA6-pz2aHM,51000 +transformers/models/lilt/__init__.py,sha256=9XEq7kJwN0mKO469mR0mtlRUdljjq7V80gejpqb59K0,989 +transformers/models/lilt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/lilt/__pycache__/configuration_lilt.cpython-310.pyc,, +transformers/models/lilt/__pycache__/modeling_lilt.cpython-310.pyc,, +transformers/models/lilt/configuration_lilt.py,sha256=rPA7P9f9B2f29_Q74Dx93-WXyhK1JT5W67PoSrDOoQc,6737 +transformers/models/lilt/modeling_lilt.py,sha256=LXNnKgQdbsgM3XAH6KXv17y5024NyKW91D13tM6sg7Y,48265 +transformers/models/llama/__init__.py,sha256=k1HnOc4-BwvgSizE8E0IlrkCh_TVgv1XX8G-xozfgLo,1111 +transformers/models/llama/__pycache__/__init__.cpython-310.pyc,, +transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc,, +transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc,, +transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc,, +transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc,, +transformers/models/llama/__pycache__/tokenization_llama_fast.cpython-310.pyc,, +transformers/models/llama/configuration_llama.py,sha256=m-ywRVkzFiivG0ty9E1ooHJlvQyV44PRHUxdpe06QI4,12077 +transformers/models/llama/modeling_flax_llama.py,sha256=3_PSmX_OPr7ENnUScfAyuepDwejN-2qFRj5vmy8y-KM,30675 +transformers/models/llama/modeling_llama.py,sha256=s_mJLrePri8KlTVoPoYlBAy69EyPZeBC5g3F6JoD1SQ,20536 +transformers/models/llama/tokenization_llama.py,sha256=KQOtC9Jzm6vs9ugT--SyHjb_XLOUFZ4MwTNZuU6gUm8,18729 +transformers/models/llama/tokenization_llama_fast.py,sha256=nYK5v4GRDhoLkxhtgEoLqq8koigofPTAuRv8-rSvk8U,10965 +transformers/models/llama4/__init__.py,sha256=YLpUGkKivYWky6rr715H2yMb9fCPr_3AV8OwWd2mrpA,1078 +transformers/models/llama4/__pycache__/__init__.cpython-310.pyc,, +transformers/models/llama4/__pycache__/configuration_llama4.cpython-310.pyc,, +transformers/models/llama4/__pycache__/image_processing_llama4_fast.cpython-310.pyc,, +transformers/models/llama4/__pycache__/modeling_llama4.cpython-310.pyc,, +transformers/models/llama4/__pycache__/processing_llama4.cpython-310.pyc,, +transformers/models/llama4/configuration_llama4.py,sha256=7pyr1pHYBuDNm90ex28dxLxEC8YGzSvOq4tcInxnTaI,23087 +transformers/models/llama4/image_processing_llama4_fast.py,sha256=pAOHts1dbg6pbr3HCEoirJ1hhIL-4XCglPFZP5LLGf8,18270 +transformers/models/llama4/modeling_llama4.py,sha256=BhRJfW0XxqNdAsxMxtCcmkjkVUv1tcMIzzb84B_pXAA,59765 +transformers/models/llama4/processing_llama4.py,sha256=J_2Qh1qmrXlAyYUJiITPu8E2Rr4eG-HJDHVPU7-4Y7o,16940 +transformers/models/llava/__init__.py,sha256=h7TDiwhtiqDQbay9v760sbmBGM6yWs3J1tmnIr3PCys,1074 +transformers/models/llava/__pycache__/__init__.cpython-310.pyc,, +transformers/models/llava/__pycache__/configuration_llava.cpython-310.pyc,, +transformers/models/llava/__pycache__/image_processing_llava.cpython-310.pyc,, +transformers/models/llava/__pycache__/image_processing_llava_fast.cpython-310.pyc,, +transformers/models/llava/__pycache__/modeling_llava.cpython-310.pyc,, +transformers/models/llava/__pycache__/processing_llava.cpython-310.pyc,, +transformers/models/llava/configuration_llava.py,sha256=MzKTXjeGMlr-KNFW7L8T4hg0bx-oZ9Yh2bhVtv3QUt4,5760 +transformers/models/llava/image_processing_llava.py,sha256=l8YkOxrks0QqXipgvP-DWTa_r4ONAYspK1BDSNt6lgs,21202 +transformers/models/llava/image_processing_llava_fast.py,sha256=j1fWNLr0tdm_P5W1B2kQ9TaU00dq3-Qi5HVB_8fmMa8,7262 +transformers/models/llava/modeling_llava.py,sha256=VoXhPp5E19vo6L4tEPKfNIUO4QwchQ43CkmbmzTzVYU,22032 +transformers/models/llava/processing_llava.py,sha256=RsmpjG7hWYB5LsJDqgIzy9eD39AY8mXr-6cMDDeDp-Q,11199 +transformers/models/llava_next/__init__.py,sha256=gyT3qcEjuxecgCiFoQoz-tf10ShqzfOL8IzPOhpjfto,1141 +transformers/models/llava_next/__pycache__/__init__.cpython-310.pyc,, +transformers/models/llava_next/__pycache__/configuration_llava_next.cpython-310.pyc,, +transformers/models/llava_next/__pycache__/image_processing_llava_next.cpython-310.pyc,, +transformers/models/llava_next/__pycache__/image_processing_llava_next_fast.cpython-310.pyc,, +transformers/models/llava_next/__pycache__/modeling_llava_next.cpython-310.pyc,, +transformers/models/llava_next/__pycache__/processing_llava_next.cpython-310.pyc,, +transformers/models/llava_next/configuration_llava_next.py,sha256=8LvnjmDgKsNRvsrlqZRkN2NQsuHZXkPmEo-H-8PzkUI,6776 +transformers/models/llava_next/image_processing_llava_next.py,sha256=pWHGpGs6y8tYqmVA9-4vOwYl9twH_jst_2TFPQ6Xy38,35730 +transformers/models/llava_next/image_processing_llava_next_fast.py,sha256=EdEgk36IEoixCAVaPRiO1B_wU5utOgATfxomtBsrtlc,11241 +transformers/models/llava_next/modeling_llava_next.py,sha256=CPoHT2WK3drREj_G7MBjebZO5HOZcXK5nFgdaR_A7sc,36053 +transformers/models/llava_next/processing_llava_next.py,sha256=bb-arte9DslHp9WlwSpWkDDdWmmTYIdtEpcyd65AS3U,13955 +transformers/models/llava_next_video/__init__.py,sha256=OGiUL7X9x0bzmsnZi0KA6Sl2ycalLQHkTgOpISYu3q8,1113 +transformers/models/llava_next_video/__pycache__/__init__.cpython-310.pyc,, +transformers/models/llava_next_video/__pycache__/configuration_llava_next_video.cpython-310.pyc,, +transformers/models/llava_next_video/__pycache__/image_processing_llava_next_video.cpython-310.pyc,, +transformers/models/llava_next_video/__pycache__/modeling_llava_next_video.cpython-310.pyc,, +transformers/models/llava_next_video/__pycache__/modular_llava_next_video.cpython-310.pyc,, +transformers/models/llava_next_video/__pycache__/processing_llava_next_video.cpython-310.pyc,, +transformers/models/llava_next_video/__pycache__/video_processing_llava_next_video.cpython-310.pyc,, +transformers/models/llava_next_video/configuration_llava_next_video.py,sha256=XG-wMKQFmGwGhbQSE8bivrfVS2RPQgSqmW0PEbZRoCU,8266 +transformers/models/llava_next_video/image_processing_llava_next_video.py,sha256=BuWVG6V64kF5TeOcSlrpNdCdVIl8A0GYTb7ygGrAWNk,21243 +transformers/models/llava_next_video/modeling_llava_next_video.py,sha256=jZXTVcoSYdCiAR4ySOebPCeD4lKqkwrqexxKVWcOzoI,46475 +transformers/models/llava_next_video/modular_llava_next_video.py,sha256=i8ajeUUk2O8xN1Lfm3A4T7j-QOwyuWQAopdD6Lj3Tc0,34888 +transformers/models/llava_next_video/processing_llava_next_video.py,sha256=f10OyUVN_HNVC5-llXFE_LmqboyBRfeRA3nqxcB5JN4,16723 +transformers/models/llava_next_video/video_processing_llava_next_video.py,sha256=p9YDNQfyNl_fTPVpCtlauIDCde3zugQz8PBcN4zg4fQ,1904 +transformers/models/llava_onevision/__init__.py,sha256=Eeg8yGcfdjCxwjSCg_zoXG48JG6gSYH8_aXBcOxQvnA,1218 +transformers/models/llava_onevision/__pycache__/__init__.cpython-310.pyc,, +transformers/models/llava_onevision/__pycache__/configuration_llava_onevision.cpython-310.pyc,, +transformers/models/llava_onevision/__pycache__/image_processing_llava_onevision.cpython-310.pyc,, +transformers/models/llava_onevision/__pycache__/image_processing_llava_onevision_fast.cpython-310.pyc,, +transformers/models/llava_onevision/__pycache__/modeling_llava_onevision.cpython-310.pyc,, +transformers/models/llava_onevision/__pycache__/modular_llava_onevision.cpython-310.pyc,, +transformers/models/llava_onevision/__pycache__/processing_llava_onevision.cpython-310.pyc,, +transformers/models/llava_onevision/__pycache__/video_processing_llava_onevision.cpython-310.pyc,, +transformers/models/llava_onevision/configuration_llava_onevision.py,sha256=eGeC9jBTNHeouYrJ8KP_IUYVLSbjO8ySj0oFNvGIJFA,8061 +transformers/models/llava_onevision/image_processing_llava_onevision.py,sha256=tz5t1ikZajyKE3j38p9Ehgt2PoYwcgyEsgqiL2CYarg,38136 +transformers/models/llava_onevision/image_processing_llava_onevision_fast.py,sha256=66t4V3CSWnwpc0N_HZsf5BW-l7a06a_T87M9SUAfmOU,15134 +transformers/models/llava_onevision/modeling_llava_onevision.py,sha256=2QdnH6Aeub_T6bnwV4S0oDq41ztP0qo0Ln6KdKypuG0,47515 +transformers/models/llava_onevision/modular_llava_onevision.py,sha256=z0hqQOfb0Ocik1qUE8YDMT4svgvHuzgcykguIlyqTwk,35503 +transformers/models/llava_onevision/processing_llava_onevision.py,sha256=3y5e4n1KHmOJXmf9iq7mjR8dZSbMhWKigCAHXExCduE,17923 +transformers/models/llava_onevision/video_processing_llava_onevision.py,sha256=Ii_ymw2HVSyTupmHhlJTNbcULTBH5KXKS_z9wdgRPB4,1914 +transformers/models/longformer/__init__.py,sha256=vg5ScmyEX2D-xPfnxNNBhdj6-Xj0t3HoPmt709PQjTE,1134 +transformers/models/longformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc,, +transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc,, +transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc,, +transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc,, +transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc,, +transformers/models/longformer/configuration_longformer.py,sha256=2AwcxPAnCJw5CbCdgTXndYBTlJTWi4QzDnilgG_63T0,8867 +transformers/models/longformer/modeling_longformer.py,sha256=4A_pGrgnQB2V91QPPwIQjye4vle2t60gg1l3a1QbzIY,108092 +transformers/models/longformer/modeling_tf_longformer.py,sha256=6qdQfcrz6rCbMVl6ncqVcGEk8eNfCOCAvEp4p9k1X4s,129568 +transformers/models/longformer/tokenization_longformer.py,sha256=cK8ke6cLfMRWXlXw_8FpjTIzLoaBi3iY7Mgf4brMRu8,16818 +transformers/models/longformer/tokenization_longformer_fast.py,sha256=Gg8zvjr0Mwqi4aHupJZLykCilqE7jeXREXbMo59ziqQ,11230 +transformers/models/longt5/__init__.py,sha256=TzoI1JGkvJIf9NlHDQY8_EUuW-upkQZ23wh_8Urtet0,1033 +transformers/models/longt5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc,, +transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc,, +transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc,, +transformers/models/longt5/configuration_longt5.py,sha256=ktQfrCmI60usRiyk-hcZinOpcs7p94zfOV47l_jQnWc,8116 +transformers/models/longt5/modeling_flax_longt5.py,sha256=lNP-l4MlXq4zGNYlo0gyF3oXvcNCk-9qDLQYkBElGE8,105838 +transformers/models/longt5/modeling_longt5.py,sha256=P6eYAZRtE70Xqym227JWbv0uXsT_cMNzyusrxfdQWhE,103260 +transformers/models/luke/__init__.py,sha256=YQL403sV6tk5t8sjvi-4hgvx1rvyThx45l7S4T4xpEE,1026 +transformers/models/luke/__pycache__/__init__.cpython-310.pyc,, +transformers/models/luke/__pycache__/configuration_luke.cpython-310.pyc,, +transformers/models/luke/__pycache__/modeling_luke.cpython-310.pyc,, +transformers/models/luke/__pycache__/tokenization_luke.cpython-310.pyc,, +transformers/models/luke/configuration_luke.py,sha256=q_QLFRDrJfABob9_6-xvSy7ES4VMYKg9A3_gG8DsxAM,6628 +transformers/models/luke/modeling_luke.py,sha256=7ZfaxPaBKmsM0qWTDliSv2pMKhq0D1s9UytTAM7LaTg,98970 +transformers/models/luke/tokenization_luke.py,sha256=SHtnsAm-h1VwdHWjMPXQ0leHSCoqVFVdLA2KEJI2RpE,85643 +transformers/models/lxmert/__init__.py,sha256=iUyLmlBuiz_av7H5ghaQB4RNbpw275N7wwdmiiV0PAc,1114 +transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc,, +transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc,, +transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc,, +transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc,, +transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc,, +transformers/models/lxmert/configuration_lxmert.py,sha256=etr-nrYjobgiPW4H9-PTC9VuGgOdR13DRiqifXFkna4,8934 +transformers/models/lxmert/modeling_lxmert.py,sha256=aG5GYE6DL2xFFLOMe_g6t8YSl1ZUSW2kbW8sSArPQns,63580 +transformers/models/lxmert/modeling_tf_lxmert.py,sha256=aGYP4T1uTzZchJkhZZNZXMgACuFe5TFNkHm8jTIX4S4,72722 +transformers/models/lxmert/tokenization_lxmert.py,sha256=He3yKkZAcjAoy0l7rNzc-W3mk_93FAWRB4YX8awnCU4,20165 +transformers/models/lxmert/tokenization_lxmert_fast.py,sha256=5E0lKrkPi1dSrXcZ2BxipbDtMMBVjcwFStrOBVELRv8,6625 +transformers/models/m2m_100/__init__.py,sha256=0uPov299rgQmMwwSyM_m0yGFejP5djgaUY37GkNGnC8,1035 +transformers/models/m2m_100/__pycache__/__init__.cpython-310.pyc,, +transformers/models/m2m_100/__pycache__/configuration_m2m_100.cpython-310.pyc,, +transformers/models/m2m_100/__pycache__/modeling_m2m_100.cpython-310.pyc,, +transformers/models/m2m_100/__pycache__/tokenization_m2m_100.cpython-310.pyc,, +transformers/models/m2m_100/configuration_m2m_100.py,sha256=iwR_eDM_JlooTz08PGdw8zqeFmXNq3_3ttNe1ivQjj0,13454 +transformers/models/m2m_100/modeling_m2m_100.py,sha256=DS8Am5GjiUe6aV4laG9PtbL0-DXiTh3UiD9fF2K_Euc,67373 +transformers/models/m2m_100/tokenization_m2m_100.py,sha256=ZWAVadNEoJ9YCCDSe7lEzGIvp9tdBC9RfCELTGR1wHg,16416 +transformers/models/mamba/__init__.py,sha256=4oGJySQbwoALRGVWMEwXBm0A6fhKsr4Raly46a5g1G0,991 +transformers/models/mamba/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mamba/__pycache__/configuration_mamba.cpython-310.pyc,, +transformers/models/mamba/__pycache__/modeling_mamba.cpython-310.pyc,, +transformers/models/mamba/configuration_mamba.py,sha256=krht7Qj-1yfYxdMr3zB9WhBVqUKiINt2o5BvDC8v-XI,7433 +transformers/models/mamba/modeling_mamba.py,sha256=rSi18aRkpk0eq_lrQjlPQe8yLb5fQnboXU8nNothp5Y,38910 +transformers/models/mamba2/__init__.py,sha256=Ui4j-I2cnPEEszkzRTLSUW42SE4Qg1YTuW6hGeaOFZg,993 +transformers/models/mamba2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mamba2/__pycache__/configuration_mamba2.cpython-310.pyc,, +transformers/models/mamba2/__pycache__/modeling_mamba2.cpython-310.pyc,, +transformers/models/mamba2/configuration_mamba2.py,sha256=YWJ7Y_-cEiTLv45b5oChKdzHFh61VWFUMdDZhcjNygU,8214 +transformers/models/mamba2/modeling_mamba2.py,sha256=UvvZ0-n3zuk0ejZXfXYehBJ_J265tARNi4TRunuxqdw,47723 +transformers/models/marian/__init__.py,sha256=Yg8jbvM0Hf6WXua0__v_G-34dvG6zFib5R5e_qHtmYM,1110 +transformers/models/marian/__pycache__/__init__.cpython-310.pyc,, +transformers/models/marian/__pycache__/configuration_marian.cpython-310.pyc,, +transformers/models/marian/__pycache__/modeling_flax_marian.cpython-310.pyc,, +transformers/models/marian/__pycache__/modeling_marian.cpython-310.pyc,, +transformers/models/marian/__pycache__/modeling_tf_marian.cpython-310.pyc,, +transformers/models/marian/__pycache__/tokenization_marian.cpython-310.pyc,, +transformers/models/marian/configuration_marian.py,sha256=2Sv1CzVYYIXvLBnytaLG0T4k-ptoyphVLAJk-zRcsyw,18420 +transformers/models/marian/modeling_flax_marian.py,sha256=qkUUpKC4MPHmESuI0wjGQ95uu0Fs0jIjQMpKU_0-xMM,64429 +transformers/models/marian/modeling_marian.py,sha256=2oQk247w8wuCiWqjrUwdEEw-KumXJ3gtTtBqavYVrvU,79137 +transformers/models/marian/modeling_tf_marian.py,sha256=5bdZNB0AV82RWOvPMGe86hUelH97WrVQPtqIIxKK4rQ,72668 +transformers/models/marian/tokenization_marian.py,sha256=4Ox1R818g5c4x_pR41JgJfOhAZOaLXTRDgOQPQrOkRU,16868 +transformers/models/markuplm/__init__.py,sha256=PyhrxFsms-oD4SOBO5j3t2mIPLN3PHjKBjTGaUTITMY,1170 +transformers/models/markuplm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/markuplm/__pycache__/configuration_markuplm.cpython-310.pyc,, +transformers/models/markuplm/__pycache__/feature_extraction_markuplm.cpython-310.pyc,, +transformers/models/markuplm/__pycache__/modeling_markuplm.cpython-310.pyc,, +transformers/models/markuplm/__pycache__/processing_markuplm.cpython-310.pyc,, +transformers/models/markuplm/__pycache__/tokenization_markuplm.cpython-310.pyc,, +transformers/models/markuplm/__pycache__/tokenization_markuplm_fast.cpython-310.pyc,, +transformers/models/markuplm/configuration_markuplm.py,sha256=0TPvMhU795p-jmaPyBefVuwBESZGJoHOOIRNk6NgyXc,7747 +transformers/models/markuplm/feature_extraction_markuplm.py,sha256=WquMM3IybHzSpniaKvRUux-liOst9-l3Z1ZmfDNab80,6443 +transformers/models/markuplm/modeling_markuplm.py,sha256=ky6xhPmgHf7b2yZGaLn_sCfv9Ql84m4k9ps-ha5XSoo,43402 +transformers/models/markuplm/processing_markuplm.py,sha256=WuabRmuYMRBgWn3y4aLlwx4Dff8NEnXmu7GNU41DGko,6383 +transformers/models/markuplm/tokenization_markuplm.py,sha256=dRfXzjXAgSlHgTnXHMQKNUOF9id4l-NbmvQWdsmOlRg,70151 +transformers/models/markuplm/tokenization_markuplm_fast.py,sha256=G_2BWkPgi0FMRqxFlZhL0ecalVeqx2uQVcpthoLBjQE,43320 +transformers/models/mask2former/__init__.py,sha256=ceWZ-4gEaZxvgdhdvM-K18wZxuovtdS8vXkQIFNlfr4,1104 +transformers/models/mask2former/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mask2former/__pycache__/configuration_mask2former.cpython-310.pyc,, +transformers/models/mask2former/__pycache__/image_processing_mask2former.cpython-310.pyc,, +transformers/models/mask2former/__pycache__/image_processing_mask2former_fast.cpython-310.pyc,, +transformers/models/mask2former/__pycache__/modeling_mask2former.cpython-310.pyc,, +transformers/models/mask2former/__pycache__/modular_mask2former.cpython-310.pyc,, +transformers/models/mask2former/configuration_mask2former.py,sha256=6fJ8GbDInXB2Cly_QbrYx5xU4hNv-T8TV_9glVUiCtM,12588 +transformers/models/mask2former/image_processing_mask2former.py,sha256=gLJo5jiYScr98EXWlC4LFVFi2V6sLNB1PcgdtNbBpyA,59113 +transformers/models/mask2former/image_processing_mask2former_fast.py,sha256=ZJxIAlbawx2aXtZe7EQkImYn5mDxe80mmCJkB2zGqMA,34645 +transformers/models/mask2former/modeling_mask2former.py,sha256=RDkuez07i_3CBaT8W3wZ0T6rMhGjBDaeN0R16qtKw84,116603 +transformers/models/mask2former/modular_mask2former.py,sha256=oYG1hqwYXm3mJg957jBDEjxf3d1Y0gTeIf4nRclzwuU,15779 +transformers/models/maskformer/__init__.py,sha256=DWCF0SA7DGofmzu3y-j05x1NLeuKZHADvyyx4XfJwkw,1242 +transformers/models/maskformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/maskformer/__pycache__/configuration_maskformer.cpython-310.pyc,, +transformers/models/maskformer/__pycache__/configuration_maskformer_swin.cpython-310.pyc,, +transformers/models/maskformer/__pycache__/feature_extraction_maskformer.cpython-310.pyc,, +transformers/models/maskformer/__pycache__/image_processing_maskformer.cpython-310.pyc,, +transformers/models/maskformer/__pycache__/image_processing_maskformer_fast.cpython-310.pyc,, +transformers/models/maskformer/__pycache__/modeling_maskformer.cpython-310.pyc,, +transformers/models/maskformer/__pycache__/modeling_maskformer_swin.cpython-310.pyc,, +transformers/models/maskformer/configuration_maskformer.py,sha256=KBU5bzrkpaNiYZM8f3JX_EfVybtiSls5CeC5iTiXAW0,10673 +transformers/models/maskformer/configuration_maskformer_swin.py,sha256=-72GuV7OyDeU0wCX7OltUAcMPSpKZqwlqJwdsLWlijE,7253 +transformers/models/maskformer/feature_extraction_maskformer.py,sha256=QYnLeWeCeAJDLO9bK1W8hVhZ8QdUslC9nJK-3qNdaUc,1332 +transformers/models/maskformer/image_processing_maskformer.py,sha256=hy0EZozPOehhC9KF7Xl2liAJwP4x8goDQIUhj7nCVaE,59962 +transformers/models/maskformer/image_processing_maskformer_fast.py,sha256=c6yqVKUMVlQOjWpwS3NM2-9YMPTM_xYcakXTQwDSZyw,35986 +transformers/models/maskformer/modeling_maskformer.py,sha256=evGzg53hMYfoQpxk5jnbzMcfW6vOuGnamKZXYT5Bpqs,84123 +transformers/models/maskformer/modeling_maskformer_swin.py,sha256=zkts_CCglUw6TKgfigt0s2JVBs_Zw8mD965D7XqqNi4,40577 +transformers/models/mbart/__init__.py,sha256=VefKwprf7OVOTgkXowKV2hT8X3mM369sRJXDY5a49ig,1148 +transformers/models/mbart/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc,, +transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc,, +transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc,, +transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc,, +transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc,, +transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc,, +transformers/models/mbart/configuration_mbart.py,sha256=aWNWjpmHjvRVH6hw6JccAsXbameD2-fXBxuvd7xTmR8,18252 +transformers/models/mbart/modeling_flax_mbart.py,sha256=zKLyi2BuhINSDYU-WfFy48LSzz9hJBBoR3EZmeBBFRI,75373 +transformers/models/mbart/modeling_mbart.py,sha256=FIG0JJlUxmMU_6anLkPv9kjzuOyHTd-Idg5Ot5Nnxww,88702 +transformers/models/mbart/modeling_tf_mbart.py,sha256=U_LB_z7NNp5ASoQf0HdfEPdPpL_KmeEP-AJ8C60SVHk,74106 +transformers/models/mbart/tokenization_mbart.py,sha256=5Qeg8LmCiYRQA_kgdnsHalP1Nux7VwBYJamfs-E6ERA,14200 +transformers/models/mbart/tokenization_mbart_fast.py,sha256=744tsO1V7FtO_MM_9E0OnGk98UViv9VKHB-g-KK0y-M,10880 +transformers/models/mbart50/__init__.py,sha256=9ukVFi1NqU3OoJcCJ-iKpJUZiu-K0t8yINuJHGltup0,1003 +transformers/models/mbart50/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mbart50/__pycache__/tokenization_mbart50.cpython-310.pyc,, +transformers/models/mbart50/__pycache__/tokenization_mbart50_fast.cpython-310.pyc,, +transformers/models/mbart50/tokenization_mbart50.py,sha256=bHfZAjdxNKAsVruUc-gQ6768ga1qPzgLJwugIrarNgU,16403 +transformers/models/mbart50/tokenization_mbart50_fast.py,sha256=cNhawmrk4KjiiA4LVgsh1oUUAWMJzkZgSePFdHMi1Gc,11479 +transformers/models/megatron_bert/__init__.py,sha256=u1UIYjQlrfHcy81i2FzehRDJpt6KNfNJ4AePQYKgwOU,1007 +transformers/models/megatron_bert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/megatron_bert/__pycache__/configuration_megatron_bert.cpython-310.pyc,, +transformers/models/megatron_bert/__pycache__/modeling_megatron_bert.cpython-310.pyc,, +transformers/models/megatron_bert/configuration_megatron_bert.py,sha256=Z8A_6hWPyBaC_64AHDlvxGB-08uqpGAyHlX12ty1k2s,6517 +transformers/models/megatron_bert/modeling_megatron_bert.py,sha256=xzC5R6C0q5NXG-9BIxV2AndKA-3qtTiGFqgFRXAHaPc,71818 +transformers/models/megatron_gpt2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc,, +transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py,sha256=3Oe0z75_0SQSM4OR-hRtH_w24LmhSV9AgsyzwKA2R9Y,37650 +transformers/models/mgp_str/__init__.py,sha256=Qb3mXPCrWbQ1ksMRYMeXorrva97OOFNr1zoy4YQg-9k,1073 +transformers/models/mgp_str/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mgp_str/__pycache__/configuration_mgp_str.cpython-310.pyc,, +transformers/models/mgp_str/__pycache__/modeling_mgp_str.cpython-310.pyc,, +transformers/models/mgp_str/__pycache__/processing_mgp_str.cpython-310.pyc,, +transformers/models/mgp_str/__pycache__/tokenization_mgp_str.cpython-310.pyc,, +transformers/models/mgp_str/configuration_mgp_str.py,sha256=Pvwj6oBIFPp219NkKV3b4kisp77UzkN2JCCy31z2RZQ,5810 +transformers/models/mgp_str/modeling_mgp_str.py,sha256=4nH1YjAgwNfajU3g_BqrMeUMfN0PQ4JcI_OFs0ArC-Y,18932 +transformers/models/mgp_str/processing_mgp_str.py,sha256=MYtqHJoIIi9fPAlLQXeoPsBNOs8qE11HcoaIHo7OacY,9433 +transformers/models/mgp_str/tokenization_mgp_str.py,sha256=VKVCt4TKcIK2yhqPTWgtPdaLz7v1-I2rn6LuoN4OvFw,3793 +transformers/models/mimi/__init__.py,sha256=VXRZ-D8-AyOYcmRGvSxhjwTYQcSNXcCXi5ubks6Qxhk,989 +transformers/models/mimi/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mimi/__pycache__/configuration_mimi.cpython-310.pyc,, +transformers/models/mimi/__pycache__/modeling_mimi.cpython-310.pyc,, +transformers/models/mimi/configuration_mimi.py,sha256=7t5b2t3PWpU7F3Vc4Ow6vfe1_8MxwpYsG3RUlyqfKUI,13508 +transformers/models/mimi/modeling_mimi.py,sha256=Mu1tuZLH7cbjcFl25c8EB-Z6PO6XuZbOfrKStUA1NNM,80379 +transformers/models/minimax/__init__.py,sha256=3Ob5TqJX21OU-wQ5NF6aeyRbTRXRmoGzeaFFqtzkf7c,1028 +transformers/models/minimax/__pycache__/__init__.cpython-310.pyc,, +transformers/models/minimax/__pycache__/configuration_minimax.cpython-310.pyc,, +transformers/models/minimax/__pycache__/modeling_minimax.cpython-310.pyc,, +transformers/models/minimax/__pycache__/modular_minimax.cpython-310.pyc,, +transformers/models/minimax/configuration_minimax.py,sha256=w51CZTNcMLN0t6ANVXIb-P9t3J0uP_SkgxsOQlT8Z9g,11810 +transformers/models/minimax/modeling_minimax.py,sha256=AD8Mk_gYrnwN8pFYK1gS85rMKjLMb_Opor5XGp3ww50,41905 +transformers/models/minimax/modular_minimax.py,sha256=mn2tw0iOGxQYv7XeWbIqflSJ3cKQrBidZ6eYkMNK7T4,27457 +transformers/models/mistral/__init__.py,sha256=PDX9s8k0BrsBlmNShhdijHKAp6zC3QYBUwgl1Dx9EsM,1095 +transformers/models/mistral/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mistral/__pycache__/configuration_mistral.cpython-310.pyc,, +transformers/models/mistral/__pycache__/modeling_flax_mistral.cpython-310.pyc,, +transformers/models/mistral/__pycache__/modeling_mistral.cpython-310.pyc,, +transformers/models/mistral/__pycache__/modeling_tf_mistral.cpython-310.pyc,, +transformers/models/mistral/__pycache__/modular_mistral.cpython-310.pyc,, +transformers/models/mistral/configuration_mistral.py,sha256=e423bLQqzKYrnIr6tkVAfcMJVqmReWzlc_jVj4jBH2o,7757 +transformers/models/mistral/modeling_flax_mistral.py,sha256=S79pNF-3Ljh26bZC0FVx__SUxk_DmRVvk6mj_xpfYJc,31805 +transformers/models/mistral/modeling_mistral.py,sha256=ZX1W8nxtEjbO0xxZ26GC3CwRKcM3esnAXICzRXo23cA,20596 +transformers/models/mistral/modeling_tf_mistral.py,sha256=mj_ujpOFGaoluObMg8ls5rgBiTZtDKgqR8l7QZRfoYM,44477 +transformers/models/mistral/modular_mistral.py,sha256=Z_n0bveMbALsiNTQRRc-1nsnxmpQezxFcRX_WBJjm5g,7517 +transformers/models/mistral3/__init__.py,sha256=ccR4AQqjFkPl8JVYyVmVvbVm618FlOw4cpwT7N-8ZD4,1036 +transformers/models/mistral3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mistral3/__pycache__/configuration_mistral3.cpython-310.pyc,, +transformers/models/mistral3/__pycache__/modeling_mistral3.cpython-310.pyc,, +transformers/models/mistral3/__pycache__/modular_mistral3.cpython-310.pyc,, +transformers/models/mistral3/configuration_mistral3.py,sha256=YAiUxcFcCzFCK0ny41_8ENqm5Ui8HssnKsjUEzdOvJE,5709 +transformers/models/mistral3/modeling_mistral3.py,sha256=1mLBniw4oS0OdzHgPIPMXe4zoSKSX6feI53wPX127w0,23561 +transformers/models/mistral3/modular_mistral3.py,sha256=92Y-e6WxRlT9euLfXXkJFucRlqCesoW7QZe1SuIOKc8,14415 +transformers/models/mixtral/__init__.py,sha256=_i66uHDx5A0-UBwgR2nwibxSf0ZePqpTa_Qsm0Cg_Bs,1015 +transformers/models/mixtral/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mixtral/__pycache__/configuration_mixtral.cpython-310.pyc,, +transformers/models/mixtral/__pycache__/modeling_mixtral.cpython-310.pyc,, +transformers/models/mixtral/__pycache__/modular_mixtral.cpython-310.pyc,, +transformers/models/mixtral/configuration_mixtral.py,sha256=7OMbg8pfaU0WfYnGIIqq5nDzx98x8NPKi-NOb3oSAdk,9073 +transformers/models/mixtral/modeling_mixtral.py,sha256=H28l90y0u1h1gMVCPs0BycOeK7WTSupsbvyTwti28J0,30146 +transformers/models/mixtral/modular_mixtral.py,sha256=Of7fU6edoj9UUvtI8DKD4lNYJ1rs8Qt6dU3AUCtIzaM,18849 +transformers/models/mlcd/__init__.py,sha256=hLiLB1E0jT7sI3s8TraLb_Z1WOpwS69zac5kyHNfx4E,989 +transformers/models/mlcd/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mlcd/__pycache__/configuration_mlcd.cpython-310.pyc,, +transformers/models/mlcd/__pycache__/modeling_mlcd.cpython-310.pyc,, +transformers/models/mlcd/__pycache__/modular_mlcd.cpython-310.pyc,, +transformers/models/mlcd/configuration_mlcd.py,sha256=8WPScG0ONO9SbjrfdhSZtxIGCR5NNOl21sleN6Q4hQI,5805 +transformers/models/mlcd/modeling_mlcd.py,sha256=Fhw4V9WC4DdGYHjzSfNueC4xH8grl2T3H4rd7Y-w7Jc,27300 +transformers/models/mlcd/modular_mlcd.py,sha256=oasTUnxnhqD37aoliw5Xg5K9w06IJPndIhM1B3zFcZw,23393 +transformers/models/mllama/__init__.py,sha256=2lTGCiL6EZirXNcu4aKV7vSmv50iRsQnCV-c9sahNXg,1073 +transformers/models/mllama/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mllama/__pycache__/configuration_mllama.cpython-310.pyc,, +transformers/models/mllama/__pycache__/image_processing_mllama.cpython-310.pyc,, +transformers/models/mllama/__pycache__/modeling_mllama.cpython-310.pyc,, +transformers/models/mllama/__pycache__/processing_mllama.cpython-310.pyc,, +transformers/models/mllama/configuration_mllama.py,sha256=xNSwdFPE4V0MAKsiCljoK3UFekwLvRvMdSrNi_L5qZ0,18209 +transformers/models/mllama/image_processing_mllama.py,sha256=kytPhb0CNLMd8iEa7mDL25-1Wpy5IBGU_-1uqoI-CA8,38223 +transformers/models/mllama/modeling_mllama.py,sha256=6sGW1yb2K49I1D9XRcQFpf4YpVHlsT-sUcU_831oyqk,78614 +transformers/models/mllama/processing_mllama.py,sha256=p8rAbSZi8iHBgcVZ3tVqU9OgI18WElJGtTSUMju2bZs,18214 +transformers/models/mluke/__init__.py,sha256=e_3cNftWOmhNXk-zsA1-2DOBT9L56SHr-6qev0xI7Ws,956 +transformers/models/mluke/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mluke/__pycache__/tokenization_mluke.cpython-310.pyc,, +transformers/models/mluke/tokenization_mluke.py,sha256=PpWmJFXmwp40fIQvDN_m7Xo4LJNtWNAt6x5JH6Wm0us,82153 +transformers/models/mm_grounding_dino/__init__.py,sha256=mk2hUY_rZw6JSjfkqSL4hVYJKxh5ViM3TZfib-09kpc,1015 +transformers/models/mm_grounding_dino/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mm_grounding_dino/__pycache__/configuration_mm_grounding_dino.cpython-310.pyc,, +transformers/models/mm_grounding_dino/__pycache__/modeling_mm_grounding_dino.cpython-310.pyc,, +transformers/models/mm_grounding_dino/__pycache__/modular_mm_grounding_dino.cpython-310.pyc,, +transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py,sha256=XThz49Ur8-mjVXeY_OplYAL177I_qHXdLFKGe6aE6NQ,15313 +transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py,sha256=2ChOMwPfHOGow2LQUylZaTIEE74Uj2l1vtzazMDMobg,129535 +transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py,sha256=WgmStf1WlZef_SnYsSte2oFk5vkGbt_0bUyMlWuBSso,19408 +transformers/models/mobilebert/__init__.py,sha256=Jy7IZ2oQAjyE_KOoT-I7Z9bqPRVLfsOwx8XY3Y43RFc,1134 +transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc,, +transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc,, +transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc,, +transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc,, +transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc,, +transformers/models/mobilebert/configuration_mobilebert.py,sha256=kSjUZXRAtgvEjp4C2pxC8Po5MS6rM4i4v_xAzvqqHVk,8283 +transformers/models/mobilebert/modeling_mobilebert.py,sha256=icrVzODm-Ep83hkqQzFqygFEKKLpB5yxltVhi1BXc28,63164 +transformers/models/mobilebert/modeling_tf_mobilebert.py,sha256=kTa_oE6_AnYfGoWefuzHT1PXzB7GLMwVmkwocK3kDNw,83932 +transformers/models/mobilebert/tokenization_mobilebert.py,sha256=u3_ex8yv14oHz90QsfBjkELZaDwQDNicgNFgXDgtkvU,20149 +transformers/models/mobilebert/tokenization_mobilebert_fast.py,sha256=_cLnh_Vn7JHMg973Q4kjdkO35VIpNOW9UQt9OIGAvok,6703 +transformers/models/mobilenet_v1/__init__.py,sha256=kS0kf8Q0rDhNcqIJM6iI6iVufztYwEl-TsOzVQZwn-Y,1159 +transformers/models/mobilenet_v1/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mobilenet_v1/__pycache__/configuration_mobilenet_v1.cpython-310.pyc,, +transformers/models/mobilenet_v1/__pycache__/feature_extraction_mobilenet_v1.cpython-310.pyc,, +transformers/models/mobilenet_v1/__pycache__/image_processing_mobilenet_v1.cpython-310.pyc,, +transformers/models/mobilenet_v1/__pycache__/image_processing_mobilenet_v1_fast.cpython-310.pyc,, +transformers/models/mobilenet_v1/__pycache__/modeling_mobilenet_v1.cpython-310.pyc,, +transformers/models/mobilenet_v1/configuration_mobilenet_v1.py,sha256=aR3QacEyWEphbgo_mcEvHS7NOVOwxQUhDvoWziA-q54,4939 +transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py,sha256=Yydhc-fHuAzWeUOk7qrLrrs-HzMeaLE_IWeploaoOQc,1341 +transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py,sha256=qgVHPsmZfRylHFkY-eTLSV-SgZyiQubTJixR8P0Ysb8,15383 +transformers/models/mobilenet_v1/image_processing_mobilenet_v1_fast.py,sha256=RQI4Ujpx8yV9YFFvXzZEVZxqiZrQ97JSHnP_640V6Bs,1498 +transformers/models/mobilenet_v1/modeling_mobilenet_v1.py,sha256=8k2_ViEsSc0tMeyFwkry-kCV4u4-5aYsqqAc1KsZNSA,16343 +transformers/models/mobilenet_v2/__init__.py,sha256=PGEnF5QRb3bZg_Iux0DuD7VPysu8nA0WxJEd6YOXtmw,1159 +transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc,, +transformers/models/mobilenet_v2/__pycache__/feature_extraction_mobilenet_v2.cpython-310.pyc,, +transformers/models/mobilenet_v2/__pycache__/image_processing_mobilenet_v2.cpython-310.pyc,, +transformers/models/mobilenet_v2/__pycache__/image_processing_mobilenet_v2_fast.cpython-310.pyc,, +transformers/models/mobilenet_v2/__pycache__/modeling_mobilenet_v2.cpython-310.pyc,, +transformers/models/mobilenet_v2/configuration_mobilenet_v2.py,sha256=kEuy8U-ShxevXr2lGluqA4BRAq_3-UsWN4YutMm1yoc,6835 +transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py,sha256=aO5lNZnnoRPfhoBEiDiLwuctZkGlFsX1Io-u167A7QU,1341 +transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py,sha256=FQZfha3hB02w2JJRFfAj5XTzB1jQa6Hj_g2cQvKzwVg,24747 +transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py,sha256=NNM4YlpbRW8eI-aufadfL1i1uGGnO7ZwRUC7-D4jgEk,10049 +transformers/models/mobilenet_v2/modeling_mobilenet_v2.py,sha256=KNRJrJi13pbH8lKXpeSPiGhZK5VwvIU0mUxaXxaKCHQ,32034 +transformers/models/mobilevit/__init__.py,sha256=-pqcwwjQsaYPNEPlRiS9B06Rl9kZf4b8yWGWtW3d4K0,1185 +transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc,, +transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc,, +transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc,, +transformers/models/mobilevit/__pycache__/image_processing_mobilevit_fast.cpython-310.pyc,, +transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc,, +transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc,, +transformers/models/mobilevit/configuration_mobilevit.py,sha256=tyQANkpBRv8MHvXm8nYGlMI_5gQJQekS25pQTQcbfPw,7596 +transformers/models/mobilevit/feature_extraction_mobilevit.py,sha256=rS3UvVaXJwUDc7ZsVoi33DAvQewGdnC4SOgqdxISEwk,1324 +transformers/models/mobilevit/image_processing_mobilevit.py,sha256=DWFDxSuSJ-rhdLwqOfKGRsxrqTRPdzBP_wcNTz9AdBo,23509 +transformers/models/mobilevit/image_processing_mobilevit_fast.py,sha256=lz0jTGmciDKG8Dk3K7Mt_sv37SchKlDnwIPu2R0hV7A,10206 +transformers/models/mobilevit/modeling_mobilevit.py,sha256=c92G2aQol47HjAXDNrWgLl0oh8rPfljeRS1wgvhxP6c,37819 +transformers/models/mobilevit/modeling_tf_mobilevit.py,sha256=sNH44uDhci-1MnfZ2HqXm3mrkQn4KfOwHpbjKbRVwB8,54757 +transformers/models/mobilevitv2/__init__.py,sha256=pAGk_9X22yOYvlcwbqTc4nm6fL4rPhAhDpdBguna5Q0,1003 +transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mobilevitv2/__pycache__/configuration_mobilevitv2.cpython-310.pyc,, +transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc,, +transformers/models/mobilevitv2/configuration_mobilevitv2.py,sha256=2mQCHZ8tq2bfrswTfb1fnottcJfY3p_g_vLoWXjkmBE,7159 +transformers/models/mobilevitv2/modeling_mobilevitv2.py,sha256=MXpWQeXenuvWUDQi1zMrvEhWiiB2IgoCHywBtF5xrhs,35568 +transformers/models/modernbert/__init__.py,sha256=BEQFRFfcKvUlphA1ibW3s34Vkbm-MUuyqzaLbrIFiAA,1006 +transformers/models/modernbert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/modernbert/__pycache__/configuration_modernbert.cpython-310.pyc,, +transformers/models/modernbert/__pycache__/modeling_modernbert.cpython-310.pyc,, +transformers/models/modernbert/__pycache__/modular_modernbert.cpython-310.pyc,, +transformers/models/modernbert/configuration_modernbert.py,sha256=HQK0ucX7it6g0ilY3vHTUyRupzHOz50KpLNyNWsP92g,11493 +transformers/models/modernbert/modeling_modernbert.py,sha256=9-qAhL04gx7Rs9p9IZPOgVBcmbbOCkI0hu6keoz_gjc,65348 +transformers/models/modernbert/modular_modernbert.py,sha256=kUSrcHuLASXyd4mu-XfT9dBM41qjC4zeqIhIXHLBgfE,71128 +transformers/models/modernbert_decoder/__init__.py,sha256=RjLebVKPcGgNEORY5xypTPd8oYWcnFmbGm3hBqQX-HE,1022 +transformers/models/modernbert_decoder/__pycache__/__init__.cpython-310.pyc,, +transformers/models/modernbert_decoder/__pycache__/configuration_modernbert_decoder.cpython-310.pyc,, +transformers/models/modernbert_decoder/__pycache__/modeling_modernbert_decoder.cpython-310.pyc,, +transformers/models/modernbert_decoder/__pycache__/modular_modernbert_decoder.cpython-310.pyc,, +transformers/models/modernbert_decoder/configuration_modernbert_decoder.py,sha256=08paqhKhXbw-sTasdy5vNsujOFc5wGVCyxWJmtAqkHk,10629 +transformers/models/modernbert_decoder/modeling_modernbert_decoder.py,sha256=q5-dYiUwIhn-0nWL9eZGRXO78IDaMmX_sVUZDEYV_6Y,25929 +transformers/models/modernbert_decoder/modular_modernbert_decoder.py,sha256=v_xlWZYwHj0EkBx7aHERSORsgqS7Gogk7Lw39PFevg4,34120 +transformers/models/moonshine/__init__.py,sha256=eBgvc9LtoDnB6HnNvrObDWL3h_L4Sgn5-D-hepNfAmI,999 +transformers/models/moonshine/__pycache__/__init__.cpython-310.pyc,, +transformers/models/moonshine/__pycache__/configuration_moonshine.cpython-310.pyc,, +transformers/models/moonshine/__pycache__/modeling_moonshine.cpython-310.pyc,, +transformers/models/moonshine/__pycache__/modular_moonshine.cpython-310.pyc,, +transformers/models/moonshine/configuration_moonshine.py,sha256=FCiqs9BayGl3oUGFh4FIAFz-3eZKlpqQOGusZvhFBZ8,13524 +transformers/models/moonshine/modeling_moonshine.py,sha256=vDyJ9BE4fZ77EkzbeRu0Umw2uGv71EcVZfo6Qq_LuI0,48805 +transformers/models/moonshine/modular_moonshine.py,sha256=F7OgtTxQFAwbemH2udbxfnZnA88OgcjiXrvF2ubltY4,43523 +transformers/models/moshi/__init__.py,sha256=uW4oqTKZdbmURZaC_xwwHXnYEMyLJrMEJAlfbUzSWO8,991 +transformers/models/moshi/__pycache__/__init__.cpython-310.pyc,, +transformers/models/moshi/__pycache__/configuration_moshi.cpython-310.pyc,, +transformers/models/moshi/__pycache__/modeling_moshi.cpython-310.pyc,, +transformers/models/moshi/configuration_moshi.py,sha256=zVqARdo5wQHERpun-Z1f1mw1_ddLTn0fQsvs2SjE5J8,16104 +transformers/models/moshi/modeling_moshi.py,sha256=ATZXNGnvXp7y_OVj6D9TpcmhCZ2tZJTufqxL9woO4mE,125077 +transformers/models/mpnet/__init__.py,sha256=agt4uraqHTtlIphsDB17XVAPzCKHaPBKlVaQkKHxRyM,1109 +transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc,, +transformers/models/mpnet/__pycache__/modeling_mpnet.cpython-310.pyc,, +transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc,, +transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc,, +transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc,, +transformers/models/mpnet/configuration_mpnet.py,sha256=DsCgTVE6hDGcaVxd2yqEPj7Ph-JLE2nPyt1AJlVZkx4,5327 +transformers/models/mpnet/modeling_mpnet.py,sha256=3SI7QLERArtBqN0rRbsTMaskn_GynI-8V7DGSnob9xA,37898 +transformers/models/mpnet/modeling_tf_mpnet.py,sha256=tIjgRjIel676orRDAYK6oQ1YnPsPMY7rlCD0cMBdc_Y,55535 +transformers/models/mpnet/tokenization_mpnet.py,sha256=NNYv8Zwj-6RFWQ7Rynjpe1oIvEZSOBl209DlaBju-Ro,22442 +transformers/models/mpnet/tokenization_mpnet_fast.py,sha256=2j7lcdtgYzsQy52RV-dTbGUwgczxY5qO6siJauxsnuY,9180 +transformers/models/mpt/__init__.py,sha256=DAIIAY0kPL-bXMkPUvxmP97HCXPi-SoM3NLnlJJYarg,987 +transformers/models/mpt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mpt/__pycache__/configuration_mpt.cpython-310.pyc,, +transformers/models/mpt/__pycache__/modeling_mpt.cpython-310.pyc,, +transformers/models/mpt/configuration_mpt.py,sha256=S-Ah1uArFpNIGv97PC0mNcW_JBvM5I83TRjM61KjNZ0,10499 +transformers/models/mpt/modeling_mpt.py,sha256=xknvcplWTSV4gk8HU5xW8ssg4yqDIRXxXPeh86dnqYw,35469 +transformers/models/mra/__init__.py,sha256=51mnm4DFq6aWxOsmaaVZDL28QozNauXyTtbEihDxUQU,987 +transformers/models/mra/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mra/__pycache__/configuration_mra.cpython-310.pyc,, +transformers/models/mra/__pycache__/modeling_mra.cpython-310.pyc,, +transformers/models/mra/configuration_mra.py,sha256=oNhRz6PdvUK_ugoiAhHDuNkGgBNyDguATgQdKeTJBnY,6536 +transformers/models/mra/modeling_mra.py,sha256=PUTNTUScRSfb1bcMzT0gL9v-hMNA7DJEvJNWEoUDpvI,57319 +transformers/models/mt5/__init__.py,sha256=UK8vGX9r6fPdzPaJKCbGJ7RCqKOdIo-7H9V-Qp8rwEg,1095 +transformers/models/mt5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc,, +transformers/models/mt5/__pycache__/modeling_flax_mt5.cpython-310.pyc,, +transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc,, +transformers/models/mt5/__pycache__/modeling_tf_mt5.cpython-310.pyc,, +transformers/models/mt5/__pycache__/tokenization_mt5.cpython-310.pyc,, +transformers/models/mt5/__pycache__/tokenization_mt5_fast.cpython-310.pyc,, +transformers/models/mt5/configuration_mt5.py,sha256=oXTmtVxXx5i-S70WEcmaZo_kI1CUKCaxA7aeos5iX7k,8011 +transformers/models/mt5/modeling_flax_mt5.py,sha256=9WjlLB_EV9WDiy-rBxzVUPocsHrv02cEa4OB8lVR6EA,4329 +transformers/models/mt5/modeling_mt5.py,sha256=i_JUen8UIojb3EHxGZrP8TrBFeEwFVg6oaHV8EpVuK4,113558 +transformers/models/mt5/modeling_tf_mt5.py,sha256=EIUkWvuApAbiaX6qhveT1KC43s_NDmQazLrbYT45aao,3406 +transformers/models/mt5/tokenization_mt5.py,sha256=AckaXSw5OojOGLezMhrsv2a9BMZXwzhy5IsT3hvp_Q8,746 +transformers/models/mt5/tokenization_mt5_fast.py,sha256=1npEFH_c4nDQxOFNoqcGNW30KCWe04BpLrrv7aDcDQ8,762 +transformers/models/musicgen/__init__.py,sha256=iwtW9pg6iDe5D2dWVC4IRU8QbNmRK5kMqPCM8fsUSgo,1036 +transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc,, +transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc,, +transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc,, +transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc,, +transformers/models/musicgen/configuration_musicgen.py,sha256=YpsrhRaEqRAOYYlkx5uyelz5iq2dwvU8_mkknRIN9AM,10959 +transformers/models/musicgen/modeling_musicgen.py,sha256=-G_9iqc335TOLZlh2k07L8MqX7UNJ9rGaZr6H1kb_rE,118303 +transformers/models/musicgen/processing_musicgen.py,sha256=UIeK_BGwttTAYk_7IDy3s7-MLTieUfgvwtjAg-ncGtc,5695 +transformers/models/musicgen_melody/__init__.py,sha256=WVEsVs7g0XlpO_yd1X0X4QnMjhG0h_n6T41FpdJcnS8,1011 +transformers/models/musicgen_melody/__pycache__/__init__.cpython-310.pyc,, +transformers/models/musicgen_melody/__pycache__/configuration_musicgen_melody.cpython-310.pyc,, +transformers/models/musicgen_melody/__pycache__/feature_extraction_musicgen_melody.cpython-310.pyc,, +transformers/models/musicgen_melody/__pycache__/modeling_musicgen_melody.cpython-310.pyc,, +transformers/models/musicgen_melody/__pycache__/processing_musicgen_melody.cpython-310.pyc,, +transformers/models/musicgen_melody/configuration_musicgen_melody.py,sha256=wlnZBqJQ0XElTRvI2zPc1PGLarzMsWBQaHKYmkensmk,12016 +transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py,sha256=onUVyD4VSztogKli3SlXeuhd6cNn5EnH6PSjx6Lj36Y,15359 +transformers/models/musicgen_melody/modeling_musicgen_melody.py,sha256=FLxYKe-gxKp--6oCCR0n1-W_XXTv2Ubh-cccA01uxVI,111496 +transformers/models/musicgen_melody/processing_musicgen_melody.py,sha256=52Py-03j7QHlpzBfORpFb_PE6xUfj6IIbcyTvlasRf0,8747 +transformers/models/mvp/__init__.py,sha256=0e0-wP4EkfzPiO_BlHlmyVUEq-1kb9RHY2Ikbk66W7s,1064 +transformers/models/mvp/__pycache__/__init__.cpython-310.pyc,, +transformers/models/mvp/__pycache__/configuration_mvp.cpython-310.pyc,, +transformers/models/mvp/__pycache__/modeling_mvp.cpython-310.pyc,, +transformers/models/mvp/__pycache__/tokenization_mvp.cpython-310.pyc,, +transformers/models/mvp/__pycache__/tokenization_mvp_fast.cpython-310.pyc,, +transformers/models/mvp/configuration_mvp.py,sha256=Ah_EG0nItOD3_y_WDad6cnCcgGY2TXWuJzxuLJQ6fq4,8451 +transformers/models/mvp/modeling_mvp.py,sha256=EwuClPsNoN3bHYfOCeEDffWjZdH2XRZpcmWMeg0LTw4,82086 +transformers/models/mvp/tokenization_mvp.py,sha256=7Q1V8hHo1hd9RCKu-A1lv2WNAcTbxhBidKNPHOLjwyc,16206 +transformers/models/mvp/tokenization_mvp_fast.py,sha256=xQrPiI91_CG7II-gQEJs7rtTSMRbKNARc185MOa5JQs,11819 +transformers/models/myt5/__init__.py,sha256=MFQX-RuvZujGb_twBWBQpTt4NZq6FxreEysWmF2fFGI,955 +transformers/models/myt5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/myt5/__pycache__/tokenization_myt5.cpython-310.pyc,, +transformers/models/myt5/tokenization_myt5.py,sha256=GdQXAMMCaCIbfJI-Hry-9myIFXO2TLpoO6D8VC1HpAs,15535 +transformers/models/nemotron/__init__.py,sha256=ZwaMH1AQ0VIuFnouYe0Sx0HcCGA7PaCp3-_yw3xjeQA,997 +transformers/models/nemotron/__pycache__/__init__.cpython-310.pyc,, +transformers/models/nemotron/__pycache__/configuration_nemotron.cpython-310.pyc,, +transformers/models/nemotron/__pycache__/modeling_nemotron.cpython-310.pyc,, +transformers/models/nemotron/configuration_nemotron.py,sha256=QMH_Mw48ZgCvovfE3MtUM6W_34DVUb7unpr27JaVTIg,7399 +transformers/models/nemotron/modeling_nemotron.py,sha256=ULmclrPSYUlZOrciihjQNy6N9LX6MHQJ-V9vSLih20k,43692 +transformers/models/nllb/__init__.py,sha256=MLFrxhOJ3xvOAcRulvCEMoKsajLuudllZLMrYDYQOas,997 +transformers/models/nllb/__pycache__/__init__.cpython-310.pyc,, +transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc,, +transformers/models/nllb/__pycache__/tokenization_nllb_fast.cpython-310.pyc,, +transformers/models/nllb/tokenization_nllb.py,sha256=nHIf5mrI6gDEugszKVOwpB3mWPE5a1KcBFK2qRTrg-E,19158 +transformers/models/nllb/tokenization_nllb_fast.py,sha256=uT2QAtg_upIEJ9W6lKkDi7EDHOqrB08oUH6OTGNagm4,15822 +transformers/models/nllb_moe/__init__.py,sha256=sAfoAnhHK_reU1a2WUoF1rFtPBckeGGrzJCD8gUv54A,997 +transformers/models/nllb_moe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/nllb_moe/__pycache__/configuration_nllb_moe.cpython-310.pyc,, +transformers/models/nllb_moe/__pycache__/modeling_nllb_moe.cpython-310.pyc,, +transformers/models/nllb_moe/configuration_nllb_moe.py,sha256=jHKoRpGbrlNEcBiOk3b2fPOo1m6OD7Tx11F9r8SSd1Y,11222 +transformers/models/nllb_moe/modeling_nllb_moe.py,sha256=7e1mRTC3BuqRV9D1bUjILNkynQxlUq4dHJcXr09hDsk,82135 +transformers/models/nougat/__init__.py,sha256=sFYK9O1tIETKks9tQ5d6X3gGWAFfTXNge06ZdnnKV9s,1090 +transformers/models/nougat/__pycache__/__init__.cpython-310.pyc,, +transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc,, +transformers/models/nougat/__pycache__/image_processing_nougat_fast.cpython-310.pyc,, +transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc,, +transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc,, +transformers/models/nougat/image_processing_nougat.py,sha256=u0IvhghXvVllApUTfDcBHL_4mXpjTqDNM2Ue_autoho,24342 +transformers/models/nougat/image_processing_nougat_fast.py,sha256=-vz4-EYom3g97WnEysl1IYEQSvNlceJFVftVol98-LU,11727 +transformers/models/nougat/processing_nougat.py,sha256=CF3GnxGFAXFRVS-JuHYxAy9bFzodpNJ9-lWV25RRqAM,6841 +transformers/models/nougat/tokenization_nougat_fast.py,sha256=cECSaLYYmZArdgLFFVFykYiOS37UHUMtfBsbJ6EsVyg,24466 +transformers/models/nystromformer/__init__.py,sha256=CwEg6m4nJW_AfNDws_MIv1O1x5IO3xPp-FYqirlFXwk,1007 +transformers/models/nystromformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/nystromformer/__pycache__/configuration_nystromformer.cpython-310.pyc,, +transformers/models/nystromformer/__pycache__/modeling_nystromformer.cpython-310.pyc,, +transformers/models/nystromformer/configuration_nystromformer.py,sha256=UyLmPF2li3_ADTz9tS1h5t4CDY5d5GzsfeC9hG42RzI,6402 +transformers/models/nystromformer/modeling_nystromformer.py,sha256=SAn8GLKjQudRNXxBdtgGOEb4TGfoENs81wFqXjvWvLk,43501 +transformers/models/olmo/__init__.py,sha256=x9u_5vqI52-uBuj89-6aYucGDlvBUEPSOhPLLB1asok,1009 +transformers/models/olmo/__pycache__/__init__.cpython-310.pyc,, +transformers/models/olmo/__pycache__/configuration_olmo.cpython-310.pyc,, +transformers/models/olmo/__pycache__/modeling_olmo.cpython-310.pyc,, +transformers/models/olmo/__pycache__/modular_olmo.cpython-310.pyc,, +transformers/models/olmo/configuration_olmo.py,sha256=s_YotovUtp-VmB7q9RV4B1TKgjhp3gE6Ucqg72I3GJc,9423 +transformers/models/olmo/modeling_olmo.py,sha256=FNafupNOuYguXIHZzGfaGTZyTMZX2Pz5mlumrDLM3H4,19739 +transformers/models/olmo/modular_olmo.py,sha256=MkYiUN7PqtaI4JcIqKJocwBEv0VLZhyPkV03BeN-CRk,7028 +transformers/models/olmo2/__init__.py,sha256=Frt9nEMsfPszod1lkFTAJUobU50IjOFlqI6uJkuQVcY,1011 +transformers/models/olmo2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/olmo2/__pycache__/configuration_olmo2.cpython-310.pyc,, +transformers/models/olmo2/__pycache__/modeling_olmo2.cpython-310.pyc,, +transformers/models/olmo2/__pycache__/modular_olmo2.cpython-310.pyc,, +transformers/models/olmo2/configuration_olmo2.py,sha256=kqBjnAs0k-pZ-Y8zNMRWQ-6gav-YpOpg6oy9LZ-AhpE,9439 +transformers/models/olmo2/modeling_olmo2.py,sha256=FdkBi9k5K7smpXjZaXefk8eQ-eG2ao8mYFjKxHiZShY,20228 +transformers/models/olmo2/modular_olmo2.py,sha256=3PBmMPwuLY4bxsbouAGeI2RhBRNkZ8bKAJuRQ_9MVLk,14091 +transformers/models/olmoe/__init__.py,sha256=eQ6mx9aBIcA4RiK3p7dbqORokkuMfQNRss06E8uWNrk,991 +transformers/models/olmoe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/olmoe/__pycache__/configuration_olmoe.cpython-310.pyc,, +transformers/models/olmoe/__pycache__/modeling_olmoe.cpython-310.pyc,, +transformers/models/olmoe/configuration_olmoe.py,sha256=_NEJ3nvuHqEOJND_jWjpqfV-mR7-CB9lUnVPoDmhCp4,9069 +transformers/models/olmoe/modeling_olmoe.py,sha256=6OeANVoDpInv9ckRMkFXItYYL0zL8q7GUCWpH3g3pHk,51375 +transformers/models/omdet_turbo/__init__.py,sha256=XIckpuo9tkT7NB5uTs9wLdpxr9GDedQPVJL2P8XU-7Q,1045 +transformers/models/omdet_turbo/__pycache__/__init__.cpython-310.pyc,, +transformers/models/omdet_turbo/__pycache__/configuration_omdet_turbo.cpython-310.pyc,, +transformers/models/omdet_turbo/__pycache__/modeling_omdet_turbo.cpython-310.pyc,, +transformers/models/omdet_turbo/__pycache__/processing_omdet_turbo.cpython-310.pyc,, +transformers/models/omdet_turbo/configuration_omdet_turbo.py,sha256=2XMjtVGwInCV4GpQW-FHqsvXCoce1rsiBMT9j2BXevo,14933 +transformers/models/omdet_turbo/modeling_omdet_turbo.py,sha256=w8u1x7UgfKgh6PvxWvJWsBAMSTF4LXCplQq_94bkKCo,74036 +transformers/models/omdet_turbo/processing_omdet_turbo.py,sha256=u6uYpltClr-PPg4C1N6jlhz54L7nbtGVToiYKtFQdT4,17322 +transformers/models/oneformer/__init__.py,sha256=MrdVp7ZBJOVbWMpwojOPnEzDDW2HSqs5oSZG81jdCQI,1136 +transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc,, +transformers/models/oneformer/__pycache__/image_processing_oneformer.cpython-310.pyc,, +transformers/models/oneformer/__pycache__/image_processing_oneformer_fast.cpython-310.pyc,, +transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc,, +transformers/models/oneformer/__pycache__/processing_oneformer.cpython-310.pyc,, +transformers/models/oneformer/configuration_oneformer.py,sha256=GeVa40j8P2XaKbU58JWE9C2EYXNhgUJjY13kUzXliMU,13678 +transformers/models/oneformer/image_processing_oneformer.py,sha256=YkR_m8cWn2NOCtMRBodqf_wFkV2AkWLT7J8cLQn8bAE,60616 +transformers/models/oneformer/image_processing_oneformer_fast.py,sha256=Mj8MVBMbU698SWvpdoZ25n7ReOcDLrHqCKEgWXUw8uo,41884 +transformers/models/oneformer/modeling_oneformer.py,sha256=z7n26KuG8Oe-RUDID-YnvEtxVGFbrX8JGesMjaYfGak,139690 +transformers/models/oneformer/processing_oneformer.py,sha256=qOKqFy8VD-IwWzWJL9Z7SUwjwUkk5cCkNSP5ZqAqv2w,9387 +transformers/models/openai/__init__.py,sha256=q0fAl8ajoJyknHe5A3ZHuHH3zww8xdupt_j49lIaObY,1114 +transformers/models/openai/__pycache__/__init__.cpython-310.pyc,, +transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc,, +transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc,, +transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc,, +transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc,, +transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc,, +transformers/models/openai/configuration_openai.py,sha256=ERFfcrsaGEuG-8WnuBDfYyHR7uc5ihEr9JfItBMGZm0,7109 +transformers/models/openai/modeling_openai.py,sha256=z_DEm4TvLy5l7M3oOccw524Y8Sc2jvPkY9wUfFGBspk,37487 +transformers/models/openai/modeling_tf_openai.py,sha256=mZWr6qc8fCXTR6y6GXLc_E_FgHn5-CB6dzMOhGXbfVM,40857 +transformers/models/openai/tokenization_openai.py,sha256=MhxS6G-hQLHLdmarGxOPBpvMEbNLHuFRZehOjzg-o90,15159 +transformers/models/openai/tokenization_openai_fast.py,sha256=qBalVcRbqq9AZAnzkFvYTbokp4eU-BvgO3QIWYoqndo,2553 +transformers/models/opt/__init__.py,sha256=Xk3Z-OdrOC4Y5J0KOEIB74Pp4PsfAllBI503NT7yFk8,1059 +transformers/models/opt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/opt/__pycache__/configuration_opt.cpython-310.pyc,, +transformers/models/opt/__pycache__/modeling_flax_opt.cpython-310.pyc,, +transformers/models/opt/__pycache__/modeling_opt.cpython-310.pyc,, +transformers/models/opt/__pycache__/modeling_tf_opt.cpython-310.pyc,, +transformers/models/opt/configuration_opt.py,sha256=nEHN7nBCghjCfcU_vueoTL5TfCMc6JUE6cUH6knhnxM,6694 +transformers/models/opt/modeling_flax_opt.py,sha256=a1OCINHVTj-osjuJUxfYZgTS-1j7r6EPT-TgAD9lP74,31631 +transformers/models/opt/modeling_opt.py,sha256=Te8Q2rAxLIaqLAOcJwH33fJtlBB1UGbndxFbBM1K9uU,48854 +transformers/models/opt/modeling_tf_opt.py,sha256=gZ8xHAIaZDYmsvHbxNI66ITB0QIaYoYdbLF7uNVuTEg,49360 +transformers/models/owlv2/__init__.py,sha256=vHnQwYJ0hEc12ofOV3b1k9fHscBgfQEXcLLgo4-H3GU,1116 +transformers/models/owlv2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/owlv2/__pycache__/configuration_owlv2.cpython-310.pyc,, +transformers/models/owlv2/__pycache__/image_processing_owlv2.cpython-310.pyc,, +transformers/models/owlv2/__pycache__/image_processing_owlv2_fast.cpython-310.pyc,, +transformers/models/owlv2/__pycache__/modeling_owlv2.cpython-310.pyc,, +transformers/models/owlv2/__pycache__/modular_owlv2.cpython-310.pyc,, +transformers/models/owlv2/__pycache__/processing_owlv2.cpython-310.pyc,, +transformers/models/owlv2/configuration_owlv2.py,sha256=18Krr0RoZ25eU0eCSPegHXXKAoA64zw-bknsCeBkPTs,13134 +transformers/models/owlv2/image_processing_owlv2.py,sha256=W0HZzjkhSk9himUBqfIKXK_gJ1pJ9nAn9XCt20nQXkw,28039 +transformers/models/owlv2/image_processing_owlv2_fast.py,sha256=hel-tIO6jPRqE_Rl2rksI6zo_GwRSJ8UpfFAQH5H4wk,18488 +transformers/models/owlv2/modeling_owlv2.py,sha256=OGEKTGLaTja5J75toXHtFdRs5fLUylW91chjXPXfC8M,78935 +transformers/models/owlv2/modular_owlv2.py,sha256=ddhRraRu4gJf73UwuelNoMljqegLFLM9O8RxHF3y1Fk,9067 +transformers/models/owlv2/processing_owlv2.py,sha256=xT4Jo_4w6NBIAKSj8Na8pRcYIa82kKtkfhhyjPKh5b0,15323 +transformers/models/owlvit/__init__.py,sha256=Nhrrja_j2RZtj-rQS6TDJ8upQqnMptnFukq49QAkito,1166 +transformers/models/owlvit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/owlvit/__pycache__/configuration_owlvit.cpython-310.pyc,, +transformers/models/owlvit/__pycache__/feature_extraction_owlvit.cpython-310.pyc,, +transformers/models/owlvit/__pycache__/image_processing_owlvit.cpython-310.pyc,, +transformers/models/owlvit/__pycache__/image_processing_owlvit_fast.cpython-310.pyc,, +transformers/models/owlvit/__pycache__/modeling_owlvit.cpython-310.pyc,, +transformers/models/owlvit/__pycache__/processing_owlvit.cpython-310.pyc,, +transformers/models/owlvit/configuration_owlvit.py,sha256=KxKdzp6xBZOAEBDSGu_LRGYBKhjEg7tU8dfqLQXv6wo,14435 +transformers/models/owlvit/feature_extraction_owlvit.py,sha256=49Ic56gmQQtE_WEmzzyE9bVBdS5RMkG3vOK1cBcjc5g,1300 +transformers/models/owlvit/image_processing_owlvit.py,sha256=k0YJn8x5Rdi9Jb1oaXw7_Zf3f_1J16VVPkmiTdESE0w,29461 +transformers/models/owlvit/image_processing_owlvit_fast.py,sha256=7LZLoAn_n89oYr7s2oWltV7K9PzrgaFOQZLY1IqFZeI,10574 +transformers/models/owlvit/modeling_owlvit.py,sha256=n2rzrSHHmXVzjjdMdnox0JR_HNFXl_YqszW1eMhgLyY,74369 +transformers/models/owlvit/processing_owlvit.py,sha256=_Cx3nBjrYuSmbTs0JbhEqTfQJ9SyBrkWFY4x-0rqhk4,16098 +transformers/models/paligemma/__init__.py,sha256=nKnTTLC8XYlI7uYfS8h-D4vz3gFhknkNeDlZIwZlZ9w,1039 +transformers/models/paligemma/__pycache__/__init__.cpython-310.pyc,, +transformers/models/paligemma/__pycache__/configuration_paligemma.cpython-310.pyc,, +transformers/models/paligemma/__pycache__/modeling_paligemma.cpython-310.pyc,, +transformers/models/paligemma/__pycache__/processing_paligemma.cpython-310.pyc,, +transformers/models/paligemma/configuration_paligemma.py,sha256=BTK5bsUZwUovhsGqWHIzYidzy-XyBEtDEROPh3IZJ9w,5362 +transformers/models/paligemma/modeling_paligemma.py,sha256=rolE38LifzlW5Oe2EJZS68ruDK1Wh0SM-5sBc9vEnRQ,27873 +transformers/models/paligemma/processing_paligemma.py,sha256=jmQZZLtljxjRuXCoeJ2nOS22vuLsLWk2y01gC6nl15s,16655 +transformers/models/patchtsmixer/__init__.py,sha256=deFjF_Tu67XcAcNHaq1PXO77N4kVW9wG80SnXBaeagE,1005 +transformers/models/patchtsmixer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/patchtsmixer/__pycache__/configuration_patchtsmixer.cpython-310.pyc,, +transformers/models/patchtsmixer/__pycache__/modeling_patchtsmixer.cpython-310.pyc,, +transformers/models/patchtsmixer/configuration_patchtsmixer.py,sha256=h1w-YRD_Q9AgQUKBRvzxi2JBEW35NbDap8xkdui-c3U,12580 +transformers/models/patchtsmixer/modeling_patchtsmixer.py,sha256=7LmwN4zwDH3UZYr4rvAy_nkbUVif29diRH5J3j9PFxE,85136 +transformers/models/patchtst/__init__.py,sha256=lrpuBvP25Yq6HZOCyS4yWVYZ47qWzK--rqC0AOIGGPE,997 +transformers/models/patchtst/__pycache__/__init__.cpython-310.pyc,, +transformers/models/patchtst/__pycache__/configuration_patchtst.cpython-310.pyc,, +transformers/models/patchtst/__pycache__/modeling_patchtst.cpython-310.pyc,, +transformers/models/patchtst/configuration_patchtst.py,sha256=FdiHfYFiHvo7kIuOV_zSGPHZ2Q-QYbPEB1ZkqLOc5qE,12309 +transformers/models/patchtst/modeling_patchtst.py,sha256=agtoNKf1FwixRzE18_JKZZgtXP6IDoZOESlmVNJvCqk,84644 +transformers/models/pegasus/__init__.py,sha256=4b7vCYJfIWUPuKrbcBGTG7LtobUdZ5ZjeQhloScTrXs,1160 +transformers/models/pegasus/__pycache__/__init__.cpython-310.pyc,, +transformers/models/pegasus/__pycache__/configuration_pegasus.cpython-310.pyc,, +transformers/models/pegasus/__pycache__/modeling_flax_pegasus.cpython-310.pyc,, +transformers/models/pegasus/__pycache__/modeling_pegasus.cpython-310.pyc,, +transformers/models/pegasus/__pycache__/modeling_tf_pegasus.cpython-310.pyc,, +transformers/models/pegasus/__pycache__/tokenization_pegasus.cpython-310.pyc,, +transformers/models/pegasus/__pycache__/tokenization_pegasus_fast.cpython-310.pyc,, +transformers/models/pegasus/configuration_pegasus.py,sha256=78-WMVFtUhigUXXJ4PabYJA8S3VpfQW9-2NcM5t8Hlo,7517 +transformers/models/pegasus/modeling_flax_pegasus.py,sha256=Rkc0964DKuqgYPMmzKNcZx4_g3hOqV8kD5udpqy-wRE,66161 +transformers/models/pegasus/modeling_pegasus.py,sha256=V-T_8XBrn5lxevbxL5eOI3_xGxySsPLmFhTMc8wQXj0,77791 +transformers/models/pegasus/modeling_tf_pegasus.py,sha256=_WR-vfHxUBGzBUJVEsw7pajamDJQAeZZYsMvEU_HV_4,74149 +transformers/models/pegasus/tokenization_pegasus.py,sha256=Mlf8ZdllYQGJMktD0ci2aD46NczeKcuw-NZNgt9bkgw,13231 +transformers/models/pegasus/tokenization_pegasus_fast.py,sha256=c6xFEwXqtpScjYMasosqrAlwJMsdJCC_Sjp_BYniK7s,9833 +transformers/models/pegasus_x/__init__.py,sha256=qSLaqKRA1upZOobapHW5MjSZvIEzf-ij-ZmY1VGzqaE,999 +transformers/models/pegasus_x/__pycache__/__init__.cpython-310.pyc,, +transformers/models/pegasus_x/__pycache__/configuration_pegasus_x.cpython-310.pyc,, +transformers/models/pegasus_x/__pycache__/modeling_pegasus_x.cpython-310.pyc,, +transformers/models/pegasus_x/configuration_pegasus_x.py,sha256=RasKHKP1N0ZEvsl81J2Y3jhNAJo0zplnAKI2ZqYJdv4,8132 +transformers/models/pegasus_x/modeling_pegasus_x.py,sha256=mcd2OiI39BvxTxqv0X-l4R83fa3_DKyAoyUrDb1pxfs,79133 +transformers/models/perceiver/__init__.py,sha256=LKUlUJfZGRC1jU6TNkG-4kNy8aIeHIqvAnwLI_33AVY,1186 +transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc,, +transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc,, +transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc,, +transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc,, +transformers/models/perceiver/__pycache__/image_processing_perceiver_fast.cpython-310.pyc,, +transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc,, +transformers/models/perceiver/__pycache__/tokenization_perceiver.cpython-310.pyc,, +transformers/models/perceiver/configuration_perceiver.py,sha256=0h5NCC6iJiA_cOv9gvcpxNgiFc0r25Rvv7PIHh1jp6Q,12236 +transformers/models/perceiver/feature_extraction_perceiver.py,sha256=JK3Y4won5macefR13tx-zdUF_TaHE4RrJllJyYzIhWU,1324 +transformers/models/perceiver/image_processing_perceiver.py,sha256=GmhCl-yDAAds6vhTgOMp-ZPo6JKEiWplFvylFSi7by0,17571 +transformers/models/perceiver/image_processing_perceiver_fast.py,sha256=zyymGbSd8od2ZgNP95LJbxsfa-J-tgY-DCK7KZF7J5E,5282 +transformers/models/perceiver/modeling_perceiver.py,sha256=kk3I_R7ctA2vuaaljOCxMzJrNmJJdPY_ko8gu2zv2UE,140575 +transformers/models/perceiver/tokenization_perceiver.py,sha256=9cCQTtfKJUzwWoUQ3YEBdsh9RrJgsL4N2kxA8fPQuqc,8034 +transformers/models/perception_lm/__init__.py,sha256=RVEjQWlzsHJm3D-3JXLThzBjJLSCMpvFslQMIkvRKiA,1106 +transformers/models/perception_lm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/perception_lm/__pycache__/configuration_perception_lm.cpython-310.pyc,, +transformers/models/perception_lm/__pycache__/image_processing_perception_lm_fast.cpython-310.pyc,, +transformers/models/perception_lm/__pycache__/modeling_perception_lm.cpython-310.pyc,, +transformers/models/perception_lm/__pycache__/modular_perception_lm.cpython-310.pyc,, +transformers/models/perception_lm/__pycache__/processing_perception_lm.cpython-310.pyc,, +transformers/models/perception_lm/__pycache__/video_processing_perception_lm.cpython-310.pyc,, +transformers/models/perception_lm/configuration_perception_lm.py,sha256=O8m8_bm2W3bvHjIpj_F5nKoz4kLsnB69KwO0mcLQUOw,3913 +transformers/models/perception_lm/image_processing_perception_lm_fast.py,sha256=a4zUQStRNFEHKZipQV253VCJSRlyFCwGJ4tVP3a2LAA,13960 +transformers/models/perception_lm/modeling_perception_lm.py,sha256=7Agne2NELeSRo0ZsY5NrVbYC8JxNA4BT0Ob6f_zN42g,20901 +transformers/models/perception_lm/modular_perception_lm.py,sha256=fpbNUIWb9Yc6UIIWmpR3tCkbu3mFopd4oQyqHzyUSFg,17020 +transformers/models/perception_lm/processing_perception_lm.py,sha256=8E-OFrBDyyPBkoy3uoqljX_c99RzYqeZ0DdSYMeS5ZI,12156 +transformers/models/perception_lm/video_processing_perception_lm.py,sha256=ZcjOvRNlN9Bw1OQbnbokSiExl1Fqgb88G5ZuhUMKrjk,1776 +transformers/models/persimmon/__init__.py,sha256=T1WqyE78N2TO74u9a9QdRIGaMowYqP6vWv8KhPojkLg,999 +transformers/models/persimmon/__pycache__/__init__.cpython-310.pyc,, +transformers/models/persimmon/__pycache__/configuration_persimmon.cpython-310.pyc,, +transformers/models/persimmon/__pycache__/modeling_persimmon.cpython-310.pyc,, +transformers/models/persimmon/configuration_persimmon.py,sha256=6KV-r-B5sqC9d2Ybzind_YDxqnM0lITfbH5zPqGg6K4,9149 +transformers/models/persimmon/modeling_persimmon.py,sha256=iKIb74zJkvNr3NGVf1huU4ag-yKi_34auP6jyQOflQw,33855 +transformers/models/phi/__init__.py,sha256=4DUgmUqGKcGXxzTrxUVGcacZ43uv3SzXsOV_Ke6oeGg,1006 +transformers/models/phi/__pycache__/__init__.cpython-310.pyc,, +transformers/models/phi/__pycache__/configuration_phi.cpython-310.pyc,, +transformers/models/phi/__pycache__/modeling_phi.cpython-310.pyc,, +transformers/models/phi/__pycache__/modular_phi.cpython-310.pyc,, +transformers/models/phi/configuration_phi.py,sha256=xYk2xva2KXG5k_Dk8ND3JObDjcfPuklUdSkNIT0DYJ8,11172 +transformers/models/phi/modeling_phi.py,sha256=7Ft9QOISOyUOf51LVK_pCWbBlSTyu25PqdWYUYjiCqM,21991 +transformers/models/phi/modular_phi.py,sha256=BDYbqjm0GJsSrrIPhU04HoAJFMJBf-M0388Cr-XOd-I,11423 +transformers/models/phi3/__init__.py,sha256=dxyO-jIh0yB6t2Dzs173aRrEnTceVMIYIkg6JxIeyWs,989 +transformers/models/phi3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/phi3/__pycache__/configuration_phi3.cpython-310.pyc,, +transformers/models/phi3/__pycache__/modeling_phi3.cpython-310.pyc,, +transformers/models/phi3/__pycache__/modular_phi3.cpython-310.pyc,, +transformers/models/phi3/configuration_phi3.py,sha256=lzlUJCKk4nJl2v8R5AIK36b2bgr0stkGm9VYPZkurQU,11579 +transformers/models/phi3/modeling_phi3.py,sha256=FDfpsOU67goO9ywNScy10ZEynpzjsSG_cKuq2t7_xGs,23093 +transformers/models/phi3/modular_phi3.py,sha256=wkJaCGqqcLxMlZNOtdwAUqMtmIvlRi7n0gkjrtmcYMQ,10908 +transformers/models/phi4_multimodal/__init__.py,sha256=EqoKUvkh9f14qg07g-4MLclztlyiyLfN2qqEp3RGp2w,1170 +transformers/models/phi4_multimodal/__pycache__/__init__.cpython-310.pyc,, +transformers/models/phi4_multimodal/__pycache__/configuration_phi4_multimodal.cpython-310.pyc,, +transformers/models/phi4_multimodal/__pycache__/feature_extraction_phi4_multimodal.cpython-310.pyc,, +transformers/models/phi4_multimodal/__pycache__/image_processing_phi4_multimodal_fast.cpython-310.pyc,, +transformers/models/phi4_multimodal/__pycache__/modeling_phi4_multimodal.cpython-310.pyc,, +transformers/models/phi4_multimodal/__pycache__/modular_phi4_multimodal.cpython-310.pyc,, +transformers/models/phi4_multimodal/__pycache__/processing_phi4_multimodal.cpython-310.pyc,, +transformers/models/phi4_multimodal/configuration_phi4_multimodal.py,sha256=RIdYbnvcs_8VkFcJY96M06PUEI1kt70WFU_YDE8-9vI,24371 +transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py,sha256=T1R76IxQCkMh-_jeP8zrBecywvbDnG5hNt_VJAKs2nk,13413 +transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py,sha256=k1bjVnH8awgbBeWMPNAVquxEOP4M5cATOQ1mtJswkvo,10788 +transformers/models/phi4_multimodal/modeling_phi4_multimodal.py,sha256=We0-oKgRk6sn5nuHL8FfMy2kxAExS7RjXyVnImnyUEI,82643 +transformers/models/phi4_multimodal/modular_phi4_multimodal.py,sha256=zN76oebknlbnvmMlrxKmPNugz-XGsfEfwY7Rzfiy8EU,77469 +transformers/models/phi4_multimodal/processing_phi4_multimodal.py,sha256=PwmFTGEWC203wMR9_nEmRm2SbL8zndl82bOPeRcx3KA,9094 +transformers/models/phimoe/__init__.py,sha256=wGasPysu0EH_q0QGaZmXqQL57GxfZn8NTsvB2I6U2ro,1013 +transformers/models/phimoe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/phimoe/__pycache__/configuration_phimoe.cpython-310.pyc,, +transformers/models/phimoe/__pycache__/modeling_phimoe.cpython-310.pyc,, +transformers/models/phimoe/configuration_phimoe.py,sha256=bmX2NBq89oFKQ5PnFmkcknw5dT6A9w2D9psS8jubltw,10238 +transformers/models/phimoe/modeling_phimoe.py,sha256=_Ft4kIkCISVOvRTjtKfIDjtjGxzQrZgY9EK6omUm_ww,60162 +transformers/models/phobert/__init__.py,sha256=mau-2HIOzSk8qGIhxivVBPPYTx3hhdgoKPtnptDF38M,958 +transformers/models/phobert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc,, +transformers/models/phobert/tokenization_phobert.py,sha256=0ItqQt-YiRb44Wqyp6e59UQOc_wSmErRFAziorm_w6o,13111 +transformers/models/pix2struct/__init__.py,sha256=ivncogrVjZZ6ag6FYHJ0XqyCMJYbsCYlh5boqxe09Yo,1089 +transformers/models/pix2struct/__pycache__/__init__.cpython-310.pyc,, +transformers/models/pix2struct/__pycache__/configuration_pix2struct.cpython-310.pyc,, +transformers/models/pix2struct/__pycache__/image_processing_pix2struct.cpython-310.pyc,, +transformers/models/pix2struct/__pycache__/modeling_pix2struct.cpython-310.pyc,, +transformers/models/pix2struct/__pycache__/processing_pix2struct.cpython-310.pyc,, +transformers/models/pix2struct/configuration_pix2struct.py,sha256=eHg19KzSW5bh2dVTgMH7vhZPZu2yg6iEpy8DNO6bk8U,15370 +transformers/models/pix2struct/image_processing_pix2struct.py,sha256=gsCp_tcNsI6nuv_NSS7G0KkRL_jXjJWHjdDYt-cEF5k,19781 +transformers/models/pix2struct/modeling_pix2struct.py,sha256=UfgH0ZYM1vxrnVH1tb_fEV8SbHtonNW2rRzCo-nOTJQ,71754 +transformers/models/pix2struct/processing_pix2struct.py,sha256=7ynhvShIjU5tAuE6q3knIMT_cTe-sfFFE6bBMOyqwaQ,6325 +transformers/models/pixtral/__init__.py,sha256=WKCxuWpCeTYsYSaTH1XnUcGkIHEx5BIIXwwwqG_E83s,1126 +transformers/models/pixtral/__pycache__/__init__.cpython-310.pyc,, +transformers/models/pixtral/__pycache__/configuration_pixtral.cpython-310.pyc,, +transformers/models/pixtral/__pycache__/image_processing_pixtral.cpython-310.pyc,, +transformers/models/pixtral/__pycache__/image_processing_pixtral_fast.cpython-310.pyc,, +transformers/models/pixtral/__pycache__/modeling_pixtral.cpython-310.pyc,, +transformers/models/pixtral/__pycache__/processing_pixtral.cpython-310.pyc,, +transformers/models/pixtral/configuration_pixtral.py,sha256=86cY74VW7J8XqU1JbvpxLqOXnnzoPh7I_9zja8j3Wng,4237 +transformers/models/pixtral/image_processing_pixtral.py,sha256=9lBhUiTv2Hq5rNGKkFzBGgfxdAV2d7CkgUXgLbA4_Lg,22007 +transformers/models/pixtral/image_processing_pixtral_fast.py,sha256=dAV5WDfZIcqT33s_3Oe1TSw3aIHfLlDliMNbZtqoe3w,8102 +transformers/models/pixtral/modeling_pixtral.py,sha256=SIIDzSJYp1EdnTmPBF6ids4xDDQI4ejfhAlBYwNWvec,21378 +transformers/models/pixtral/processing_pixtral.py,sha256=wPd__HHCXh4X7m0ZPAZY5H5jC35W70P_VqAgUmNSQao,13536 +transformers/models/plbart/__init__.py,sha256=jmP857QTG7jGfr9n0qK3TB_1-hdVDD1ajtJvP6C7FIw,1032 +transformers/models/plbart/__pycache__/__init__.cpython-310.pyc,, +transformers/models/plbart/__pycache__/configuration_plbart.cpython-310.pyc,, +transformers/models/plbart/__pycache__/modeling_plbart.cpython-310.pyc,, +transformers/models/plbart/__pycache__/modular_plbart.cpython-310.pyc,, +transformers/models/plbart/__pycache__/tokenization_plbart.cpython-310.pyc,, +transformers/models/plbart/configuration_plbart.py,sha256=N3T4lbCCGP3nYNIHYah1yMvDy1FiJqgpdHIAKJFfcTQ,8621 +transformers/models/plbart/modeling_plbart.py,sha256=Pv2SWaqVv982vteId1MvzLgYxXkq7nBgUQKTv5Wil1g,80417 +transformers/models/plbart/modular_plbart.py,sha256=l-C3COh_JeABbxHTiGZQoNJuXkulPRH64btPNtnzQnU,31305 +transformers/models/plbart/tokenization_plbart.py,sha256=ytGwNtPfgHXoKdK85Qm1fz68tBU-qbrYwdMLd96d2Xs,18910 +transformers/models/poolformer/__init__.py,sha256=FgSXHIGeF8uz-Ye67HSRQefjounknzaqm0ZCOiMj4zo,1149 +transformers/models/poolformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/poolformer/__pycache__/configuration_poolformer.cpython-310.pyc,, +transformers/models/poolformer/__pycache__/feature_extraction_poolformer.cpython-310.pyc,, +transformers/models/poolformer/__pycache__/image_processing_poolformer.cpython-310.pyc,, +transformers/models/poolformer/__pycache__/image_processing_poolformer_fast.cpython-310.pyc,, +transformers/models/poolformer/__pycache__/modeling_poolformer.cpython-310.pyc,, +transformers/models/poolformer/configuration_poolformer.py,sha256=mU4fQSyfdSwP-vB3UIAkNuYI6wyqhxu2R3SOupiY2pc,5641 +transformers/models/poolformer/feature_extraction_poolformer.py,sha256=8NBHTCScDnuQAjwNVL1Mxs4xllp9FnJCSonL_ceF_lg,1332 +transformers/models/poolformer/image_processing_poolformer.py,sha256=nSno0MMnMsS8I4Hvw2majioTOt8pze37RH3rNsw05dk,17922 +transformers/models/poolformer/image_processing_poolformer_fast.py,sha256=G-ZZt8KLGKRRkCH5f7XcFVoDccI8p2O70pvSIB3osxU,10569 +transformers/models/poolformer/modeling_poolformer.py,sha256=H0hY5vk3xaARWeFeAfjeiO5XI4mT7crpTSp2drirAX0,15954 +transformers/models/pop2piano/__init__.py,sha256=I2PPcFi-p0X5py7dLqobymv3E9g-mUv1QRn0luyPlIk,999 +transformers/models/pop2piano/__pycache__/__init__.cpython-310.pyc,, +transformers/models/pop2piano/__pycache__/configuration_pop2piano.cpython-310.pyc,, +transformers/models/pop2piano/__pycache__/feature_extraction_pop2piano.cpython-310.pyc,, +transformers/models/pop2piano/__pycache__/modeling_pop2piano.cpython-310.pyc,, +transformers/models/pop2piano/__pycache__/processing_pop2piano.cpython-310.pyc,, +transformers/models/pop2piano/__pycache__/tokenization_pop2piano.cpython-310.pyc,, +transformers/models/pop2piano/configuration_pop2piano.py,sha256=aAnTDZdBrl19Kg6eOuPs13cz1_9ITlN7IgxysOqDGT4,5959 +transformers/models/pop2piano/feature_extraction_pop2piano.py,sha256=DQb7Y7mxkvYfwDbZQ-CQYzdOQ9mX17D-lhcsMMW4538,19974 +transformers/models/pop2piano/modeling_pop2piano.py,sha256=7cuWqiawM6LQm4uQlBB4vTAPLQKc_rXWupGFWkyIkPM,62701 +transformers/models/pop2piano/processing_pop2piano.py,sha256=swbOlXoxu939BHeRr1MRo3WUCvQgxSjNBBh0uTWe8fk,5683 +transformers/models/pop2piano/tokenization_pop2piano.py,sha256=XGoKjV1wPf14Taef2N-fXQRIi0kU3aZBUBdDl_ve7dw,32802 +transformers/models/prompt_depth_anything/__init__.py,sha256=7hl1iucaCG_JLQIF-336EbE7TmCzeO_BGvNZmN3w5RU,1234 +transformers/models/prompt_depth_anything/__pycache__/__init__.cpython-310.pyc,, +transformers/models/prompt_depth_anything/__pycache__/configuration_prompt_depth_anything.cpython-310.pyc,, +transformers/models/prompt_depth_anything/__pycache__/image_processing_prompt_depth_anything.cpython-310.pyc,, +transformers/models/prompt_depth_anything/__pycache__/modeling_prompt_depth_anything.cpython-310.pyc,, +transformers/models/prompt_depth_anything/__pycache__/modular_prompt_depth_anything.cpython-310.pyc,, +transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py,sha256=vL-HwgUR7moy5eBRTH0t1qPDduSQ40J9F6i9rpNZG-Y,9062 +transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py,sha256=20q7lfiocfHNYRVFXbC-WBdffWrlXWSxbxgAaD1FDno,24850 +transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py,sha256=NOLykApW2CBZ6XWBCNoT7_GxkEV0uJCQyDKGb4iVK7U,20380 +transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py,sha256=wP11VynfmVInYHvH4GeJDd1ELl7C7nj8evmivgntkDQ,13755 +transformers/models/prophetnet/__init__.py,sha256=TYI21JDlj449kTgKAOtUBpuxVv5L_I70CDjofSZ627M,1044 +transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc,, +transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc,, +transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc,, +transformers/models/prophetnet/configuration_prophetnet.py,sha256=mp5AYM4dewIqBG9e9x0_t6lwPEguvoLJIctBzj_TmZM,8919 +transformers/models/prophetnet/modeling_prophetnet.py,sha256=cHeA3VcnjbKo4a7eaVxqqDEskYLASbcWfGl4VpYwTy8,96717 +transformers/models/prophetnet/tokenization_prophetnet.py,sha256=JsP4EwckyDeORtcAsVlrNlfLXo_YriRbiZwLcKEzFDI,20154 +transformers/models/pvt/__init__.py,sha256=-4ajQRrz2cTp2czAd6D23yxShatfUpHzZrHyyLRsku0,1072 +transformers/models/pvt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc,, +transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc,, +transformers/models/pvt/__pycache__/image_processing_pvt_fast.cpython-310.pyc,, +transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc,, +transformers/models/pvt/configuration_pvt.py,sha256=NivJRKXgMQ-F4SOqf7Z3nFNWxJKdsV6iqJ2YdVvrtj0,6983 +transformers/models/pvt/image_processing_pvt.py,sha256=ALCRVpWFt18qSX5fxZuBWBqHEPY8kPmI708bUvnzWYg,13862 +transformers/models/pvt/image_processing_pvt_fast.py,sha256=C2hhtB4aUqbLdVawDaeubuef9fSkC-oNT0c4WJK8Ja0,1341 +transformers/models/pvt/modeling_pvt.py,sha256=BhG1tRpznXNSfEPkJ0cgWCLlq4OpAhMbhLuWk7o9iZE,25801 +transformers/models/pvt_v2/__init__.py,sha256=LkmqeLd7cZGKTFX_2d9_jU0sj_bDlML042kr_vMJTLw,993 +transformers/models/pvt_v2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/pvt_v2/__pycache__/configuration_pvt_v2.cpython-310.pyc,, +transformers/models/pvt_v2/__pycache__/modeling_pvt_v2.cpython-310.pyc,, +transformers/models/pvt_v2/configuration_pvt_v2.py,sha256=7LMMRQUgb_xYvWFkGtLxA6k_a10cBFUtSx8vejJup38,7978 +transformers/models/pvt_v2/modeling_pvt_v2.py,sha256=kVdiGbm_R-7zEKGfNXLwgz2NXcgLN0JrZFKUEK_Lyzs,26480 +transformers/models/qwen2/__init__.py,sha256=e49oEzErXujE0UVl_q_agf5XHzHES4vV2kLwmqdk2kg,1095 +transformers/models/qwen2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen2/__pycache__/configuration_qwen2.cpython-310.pyc,, +transformers/models/qwen2/__pycache__/modeling_qwen2.cpython-310.pyc,, +transformers/models/qwen2/__pycache__/modular_qwen2.cpython-310.pyc,, +transformers/models/qwen2/__pycache__/tokenization_qwen2.cpython-310.pyc,, +transformers/models/qwen2/__pycache__/tokenization_qwen2_fast.cpython-310.pyc,, +transformers/models/qwen2/configuration_qwen2.py,sha256=pn06fpnCFoGGJj7DVY1wZRIGOrfnn7wtdAJoJ179b9g,11376 +transformers/models/qwen2/modeling_qwen2.py,sha256=6xu0Y4si4BxfeJj09hwxwGVbDs6ziEYjReoUAW4WZEM,21407 +transformers/models/qwen2/modular_qwen2.py,sha256=sHNBzEdJxPUhgQfukTiguseL4A96BJGhg9_g14Q11dY,8064 +transformers/models/qwen2/tokenization_qwen2.py,sha256=I_BWl_yvJv5eMoq69STwEFEKK59LouLtydygpAFaCaI,13935 +transformers/models/qwen2/tokenization_qwen2_fast.py,sha256=ECWjuGUmKDvYakR_D-LZkdCXdbMtP9zCM8nkR7BhEEk,5210 +transformers/models/qwen2_5_omni/__init__.py,sha256=YEDAlOoWmhkZ4L6lxmlVqVhe5A0P6aVSJNSziEFSN4E,1071 +transformers/models/qwen2_5_omni/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen2_5_omni/__pycache__/configuration_qwen2_5_omni.cpython-310.pyc,, +transformers/models/qwen2_5_omni/__pycache__/modeling_qwen2_5_omni.cpython-310.pyc,, +transformers/models/qwen2_5_omni/__pycache__/modular_qwen2_5_omni.cpython-310.pyc,, +transformers/models/qwen2_5_omni/__pycache__/processing_qwen2_5_omni.cpython-310.pyc,, +transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py,sha256=qYeM_49riVzDLVSR_5jQDO9uXVt3jRGaMk91UMl7qzs,52773 +transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py,sha256=717mqSmd0nBHJXFQc6TL63lGtKFN5uO-D0KlnFHwE-M,177442 +transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py,sha256=v3HV8T7HW9WPotZDOGJ47BL4iJqTT01Q4SHwQB082e4,191269 +transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py,sha256=t4yWnsBcPKZi5AIZ-mDW7tuEqxM7Q0wuW5pof8tYPg8,17618 +transformers/models/qwen2_5_vl/__init__.py,sha256=8-dsgLIeeE3n90n6F0XOu-tBZ-80Wotz89pjZi5GqjQ,1065 +transformers/models/qwen2_5_vl/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen2_5_vl/__pycache__/configuration_qwen2_5_vl.cpython-310.pyc,, +transformers/models/qwen2_5_vl/__pycache__/modeling_qwen2_5_vl.cpython-310.pyc,, +transformers/models/qwen2_5_vl/__pycache__/modular_qwen2_5_vl.cpython-310.pyc,, +transformers/models/qwen2_5_vl/__pycache__/processing_qwen2_5_vl.cpython-310.pyc,, +transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py,sha256=dLuPdCvod3_PeSHP0UWa3BOkyYm-uStjyDnSLhzRT3U,17154 +transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py,sha256=0Lij4D_kG06NUzsVLmKZTPgA5xE0o8EcVzeHR_bLq7E,83353 +transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py,sha256=biQTYkmYAshnHtSd9Aa-XKKbu_-yY3qrfQCdClQ7l7c,49278 +transformers/models/qwen2_5_vl/processing_qwen2_5_vl.py,sha256=i2sxJ24Y-s4G_lJbAFfTiM0eIdEmA_BFhlxNi1gTbrE,15499 +transformers/models/qwen2_audio/__init__.py,sha256=KaUmP3FK3GdeWvbunzyp1QjBki0USS4E80NlvhaJ3D8,1045 +transformers/models/qwen2_audio/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen2_audio/__pycache__/configuration_qwen2_audio.cpython-310.pyc,, +transformers/models/qwen2_audio/__pycache__/modeling_qwen2_audio.cpython-310.pyc,, +transformers/models/qwen2_audio/__pycache__/processing_qwen2_audio.cpython-310.pyc,, +transformers/models/qwen2_audio/configuration_qwen2_audio.py,sha256=U37sA7O8GvbrlFOwXgBSvqu28Kz2uKJORqjthqH8UnE,8673 +transformers/models/qwen2_audio/modeling_qwen2_audio.py,sha256=-gMWHKbcAT5rILYmNVypl9kkWEiKOVYKRY6waW6poUQ,41729 +transformers/models/qwen2_audio/processing_qwen2_audio.py,sha256=H_nHzECaovVjOso57kFB1rxaA_mJveGMvxbMTuwqzoc,11924 +transformers/models/qwen2_moe/__init__.py,sha256=TZM20WtUr1UyV-hDDgq5B-qFT4aUulMpjWwSUNdUs2w,999 +transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc,, +transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc,, +transformers/models/qwen2_moe/configuration_qwen2_moe.py,sha256=3BBfpz3Pu3DuaCuTtWaFdY_JK5QrLVWrW-Ri-Ky205I,13228 +transformers/models/qwen2_moe/modeling_qwen2_moe.py,sha256=2bOS-C3IudN9IKST9UTBWGU8-3v3SSr2V60Q2DvkPFQ,54906 +transformers/models/qwen2_vl/__init__.py,sha256=MtNDD6sEQws-WTLwPxUL5UNd-UyDPrDh8yWzIAsRp-U,1131 +transformers/models/qwen2_vl/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen2_vl/__pycache__/configuration_qwen2_vl.cpython-310.pyc,, +transformers/models/qwen2_vl/__pycache__/image_processing_qwen2_vl.cpython-310.pyc,, +transformers/models/qwen2_vl/__pycache__/image_processing_qwen2_vl_fast.cpython-310.pyc,, +transformers/models/qwen2_vl/__pycache__/modeling_qwen2_vl.cpython-310.pyc,, +transformers/models/qwen2_vl/__pycache__/processing_qwen2_vl.cpython-310.pyc,, +transformers/models/qwen2_vl/__pycache__/video_processing_qwen2_vl.cpython-310.pyc,, +transformers/models/qwen2_vl/configuration_qwen2_vl.py,sha256=S6qKbyCE2CQNczuj7XM5NutobLWJe7bPlLvF10U_YyI,15808 +transformers/models/qwen2_vl/image_processing_qwen2_vl.py,sha256=dJrdm_LUohFwJx84T11EssmOJQvIXgxUy0MTtaUd6SE,26324 +transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py,sha256=cNRQJ7cbAHm4-v_OKnun8T1YUOcK2uMrTvpKYU-_8jM,12986 +transformers/models/qwen2_vl/modeling_qwen2_vl.py,sha256=tmDxaG9pnimsxfvZe8o5G4I1KsXCtizqTkCk7ObKnGc,75451 +transformers/models/qwen2_vl/processing_qwen2_vl.py,sha256=fjsmWwxkeJI-CdTZkSNHUg6dNl1eWkvZp7BD-KBdgKQ,13671 +transformers/models/qwen2_vl/video_processing_qwen2_vl.py,sha256=_wBOk3XZb0dfCtTXunqnz8HpEjc5C6PaXz6kwsZe4Zg,15183 +transformers/models/qwen3/__init__.py,sha256=5JU8uO9x0AmJ-YjY36MxtbMKT_B38dLJkrnAwLyjcTY,1014 +transformers/models/qwen3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen3/__pycache__/configuration_qwen3.cpython-310.pyc,, +transformers/models/qwen3/__pycache__/modeling_qwen3.cpython-310.pyc,, +transformers/models/qwen3/__pycache__/modular_qwen3.cpython-310.pyc,, +transformers/models/qwen3/configuration_qwen3.py,sha256=Er_c6Ruk-DaiVHVe8-h2Z49BgYtejJuSMsNuu1NFYyA,11786 +transformers/models/qwen3/modeling_qwen3.py,sha256=LURB42tAeZsFq1FNh3SxLhX9VBJpnURFCbCmEM4jybg,22873 +transformers/models/qwen3/modular_qwen3.py,sha256=dpeS1Grd05K-bvIJRTr0vMBIOes961i0Pls-dGxs0Ac,6095 +transformers/models/qwen3_moe/__init__.py,sha256=q5WfIniJecmOju3Lhy277H3Puu7viwc9vUhUWen3UZY,999 +transformers/models/qwen3_moe/__pycache__/__init__.cpython-310.pyc,, +transformers/models/qwen3_moe/__pycache__/configuration_qwen3_moe.cpython-310.pyc,, +transformers/models/qwen3_moe/__pycache__/modeling_qwen3_moe.cpython-310.pyc,, +transformers/models/qwen3_moe/__pycache__/modular_qwen3_moe.cpython-310.pyc,, +transformers/models/qwen3_moe/configuration_qwen3_moe.py,sha256=DQzCSiwo1SfPrEUr_wJV14N8qM7iVAIuhGDIt7BZPHw,12862 +transformers/models/qwen3_moe/modeling_qwen3_moe.py,sha256=_brLA7iMQ07-dcnP0pSsLRXaMAl-bpzJjvlbZysuRv4,32028 +transformers/models/qwen3_moe/modular_qwen3_moe.py,sha256=RLFNW6hZPXyZyNjTi-nQaAVTYV8oWEGB5LwgoU8CEYQ,12015 +transformers/models/rag/__init__.py,sha256=89sLlT4QJ96h0U-X6FmTdfSNJ8NjDjTpqyI1yK0L1Cw,1091 +transformers/models/rag/__pycache__/__init__.cpython-310.pyc,, +transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc,, +transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc,, +transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc,, +transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc,, +transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc,, +transformers/models/rag/configuration_rag.py,sha256=dFbQO0qhT-mKYoTEmZAXlpwHSHaE4CVWNGcY7D7_yGo,8523 +transformers/models/rag/modeling_rag.py,sha256=MUx6u4ssokzsIzDN69LdG6SvWWVfVcu_OPBEk0Icihc,89290 +transformers/models/rag/modeling_tf_rag.py,sha256=wIb8IlLjKC0tMxFOzRjoZ9sqfFPWEZYLlGyx9Ev4m4Y,88810 +transformers/models/rag/retrieval_rag.py,sha256=fadEhEpbmWHBchZmvmpyNK1GEYqAqbq0fbXw_TuR06E,30164 +transformers/models/rag/tokenization_rag.py,sha256=5UVTej-039v54SV8nC9StpNMSFMIxPCqo0srnrVsnKA,4610 +transformers/models/recurrent_gemma/__init__.py,sha256=i86Cydx-eAdwsVMjNc0yG9hGxe_amyfAdvF5Eg-UCGM,1011 +transformers/models/recurrent_gemma/__pycache__/__init__.cpython-310.pyc,, +transformers/models/recurrent_gemma/__pycache__/configuration_recurrent_gemma.cpython-310.pyc,, +transformers/models/recurrent_gemma/__pycache__/modeling_recurrent_gemma.cpython-310.pyc,, +transformers/models/recurrent_gemma/configuration_recurrent_gemma.py,sha256=w0mD1rnokEkBuvDNCW0mMJlO0DsF0TuG2JyJSmdqGmI,7750 +transformers/models/recurrent_gemma/modeling_recurrent_gemma.py,sha256=8YIzUqwWT_g5jHGf1-CSb4FnxBwzHiiAccOQ5NXHyH4,35977 +transformers/models/reformer/__init__.py,sha256=zjiMjHIRPssQ8pVa4fQ0zMCCn0ee_mtJt6wc9J23QYQ,1084 +transformers/models/reformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/reformer/__pycache__/configuration_reformer.cpython-310.pyc,, +transformers/models/reformer/__pycache__/modeling_reformer.cpython-310.pyc,, +transformers/models/reformer/__pycache__/tokenization_reformer.cpython-310.pyc,, +transformers/models/reformer/__pycache__/tokenization_reformer_fast.cpython-310.pyc,, +transformers/models/reformer/configuration_reformer.py,sha256=MnTplogKNnkWOIGQHLRx5qmrZOBqIqXfeyLzZPy89IA,13196 +transformers/models/reformer/modeling_reformer.py,sha256=4DgHnn_9dJmIsB6fIzBUOUIlMi36_9BOzrlkAdtNPos,117939 +transformers/models/reformer/tokenization_reformer.py,sha256=B5EhgmnvgvW8NiLWDq198Mh7IqUmnDYVUKoh0ECgbD4,6823 +transformers/models/reformer/tokenization_reformer_fast.py,sha256=Ow1TJe2MIatlbk0fYAfAZySEfPfWUpaAahzJvDrnAMQ,4137 +transformers/models/regnet/__init__.py,sha256=X_FU3wnZJ5KkCmRi4EyHk6ZUm_f0--YyyTS8lrknS9Y,1071 +transformers/models/regnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/regnet/__pycache__/configuration_regnet.cpython-310.pyc,, +transformers/models/regnet/__pycache__/modeling_flax_regnet.cpython-310.pyc,, +transformers/models/regnet/__pycache__/modeling_regnet.cpython-310.pyc,, +transformers/models/regnet/__pycache__/modeling_tf_regnet.cpython-310.pyc,, +transformers/models/regnet/configuration_regnet.py,sha256=TgYggQiYssFjcXjzLIe5ZDVrMxP4qQl1ZpmvZhLi2Ig,3974 +transformers/models/regnet/modeling_flax_regnet.py,sha256=ov6SXyXtkFXwNSC9quWte1vMl6AZkeJ49hlqp0l171k,28519 +transformers/models/regnet/modeling_regnet.py,sha256=DPNqyRFpYOaHykOKATThRS91g12G9jxoW_xxO2C02EU,15280 +transformers/models/regnet/modeling_tf_regnet.py,sha256=wcPOoY6_xunAZIg2wc3pBFKq5Y31sDJzC0N4MB4GgCw,24396 +transformers/models/rembert/__init__.py,sha256=Gif9TX1kvmD5iVWqsViSjxKYIDhR3FiBfp_QfA7U7i4,1119 +transformers/models/rembert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/rembert/__pycache__/configuration_rembert.cpython-310.pyc,, +transformers/models/rembert/__pycache__/modeling_rembert.cpython-310.pyc,, +transformers/models/rembert/__pycache__/modeling_tf_rembert.cpython-310.pyc,, +transformers/models/rembert/__pycache__/tokenization_rembert.cpython-310.pyc,, +transformers/models/rembert/__pycache__/tokenization_rembert_fast.cpython-310.pyc,, +transformers/models/rembert/configuration_rembert.py,sha256=wSBV6VLEvcKMvP2PZw6KgaqelYzyIWhK_NZDV8kzX_8,7300 +transformers/models/rembert/modeling_rembert.py,sha256=hsyZNT9cR41WTgx3ZNkd9K0HgqdQE9R8E5cNv_2qp2k,58285 +transformers/models/rembert/modeling_tf_rembert.py,sha256=ChyRVbTldGrTuWayyNLjm91mS4lOK25Q4SAuNTF5Fzo,77771 +transformers/models/rembert/tokenization_rembert.py,sha256=K_x4GpkaWMCipug1ojjPMKyiPMk-JwJSXBfXiLYIWm0,9566 +transformers/models/rembert/tokenization_rembert_fast.py,sha256=7T60RZ34azXx0zfaxE8Qh-4IJOYx7M-j59QDQO5BDNE,8747 +transformers/models/resnet/__init__.py,sha256=NCgMoczDbEI_XDWkWNWKIKGPYeohOC95f0o2X-Vh2vA,1071 +transformers/models/resnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/resnet/__pycache__/configuration_resnet.cpython-310.pyc,, +transformers/models/resnet/__pycache__/modeling_flax_resnet.cpython-310.pyc,, +transformers/models/resnet/__pycache__/modeling_resnet.cpython-310.pyc,, +transformers/models/resnet/__pycache__/modeling_tf_resnet.cpython-310.pyc,, +transformers/models/resnet/configuration_resnet.py,sha256=RUWWvz_KwilBJVOlaY1cK0CN078VPXPwzlOs2jZmd6I,6076 +transformers/models/resnet/modeling_flax_resnet.py,sha256=2r5isBIaKQIcGeDKWFtwbsgatGZDTXu0rpEKVbeT5xE,24708 +transformers/models/resnet/modeling_resnet.py,sha256=zEaFUeej9lwZbF5BmmETCIiJFo_dsghtAGf2OrScrLY,17206 +transformers/models/resnet/modeling_tf_resnet.py,sha256=FS0VNZqUMq7sXABV-GE3stcUpYd2bVUGVNh2xWLJdro,23774 +transformers/models/roberta/__init__.py,sha256=p1qYu_9qpmxsxMfXuoxK-VrmRQMEshwiM8Ekoij2J1M,1160 +transformers/models/roberta/__pycache__/__init__.cpython-310.pyc,, +transformers/models/roberta/__pycache__/configuration_roberta.cpython-310.pyc,, +transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc,, +transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc,, +transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc,, +transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc,, +transformers/models/roberta/__pycache__/tokenization_roberta_fast.cpython-310.pyc,, +transformers/models/roberta/configuration_roberta.py,sha256=BYTjf1lo0Mk9xYqZjNHsZSlxPG7zJPcFl8ttr6LK8Ew,7336 +transformers/models/roberta/modeling_flax_roberta.py,sha256=hW7TxugQammDOpLCTZW3X3TjcJuy81AIdDztOcxzs-A,57284 +transformers/models/roberta/modeling_roberta.py,sha256=te3VVQxnRHy-kd-nWudcRgzaljThGYHfar73fhLSwD4,71149 +transformers/models/roberta/modeling_tf_roberta.py,sha256=5wdoYO0aaDfd5u6iwEYjSLuhp3Mti8jODh9OdXWRQt0,79951 +transformers/models/roberta/tokenization_roberta.py,sha256=FtKax5F5Cg4uJR7aWs62l9Tp0uDcVLW2dZKfrYfarrg,16469 +transformers/models/roberta/tokenization_roberta_fast.py,sha256=JYe2lmZugU3J7PEKn_SegaFURSAPLUejM6ckH_SqWmY,10978 +transformers/models/roberta_prelayernorm/__init__.py,sha256=QsVJJaoujnLHyCgwSsz53MV88vI183tTGJNXHDCHCAc,1127 +transformers/models/roberta_prelayernorm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/roberta_prelayernorm/__pycache__/configuration_roberta_prelayernorm.cpython-310.pyc,, +transformers/models/roberta_prelayernorm/__pycache__/modeling_flax_roberta_prelayernorm.cpython-310.pyc,, +transformers/models/roberta_prelayernorm/__pycache__/modeling_roberta_prelayernorm.cpython-310.pyc,, +transformers/models/roberta_prelayernorm/__pycache__/modeling_tf_roberta_prelayernorm.cpython-310.pyc,, +transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py,sha256=YAxqjJmTFys2-TCyo4-B8Y1mm-kF2ExOqmxiSw5i4C4,7908 +transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py,sha256=fQzWW2x-4ffASPs5V3shK4ctvdvmBlKrqmBeP6mUsEM,60941 +transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py,sha256=8cCF31F8fJRC3vsiP0nPh12ikg33drdHiw10IDCLXks,66495 +transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py,sha256=Cv6jETaFoVnYC4rbko5lk3KL11Q7CuWMqaKJlCufNqs,83224 +transformers/models/roc_bert/__init__.py,sha256=4CveMGU-dY3nV4E6x-Xpb1jicRniwrPuSOrY8-SHIUI,1038 +transformers/models/roc_bert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc,, +transformers/models/roc_bert/__pycache__/modeling_roc_bert.cpython-310.pyc,, +transformers/models/roc_bert/__pycache__/tokenization_roc_bert.cpython-310.pyc,, +transformers/models/roc_bert/configuration_roc_bert.py,sha256=3w3t43X0ZkziGeftmlFg8yozWj57Pn5kynhbxXUkNMk,8544 +transformers/models/roc_bert/modeling_roc_bert.py,sha256=tzCqJtK9NkWY3VuvabsGF4ygo19cmB3ptiyoI41VM5A,88665 +transformers/models/roc_bert/tokenization_roc_bert.py,sha256=Zo8YLFlTCOtUMFXU-et41Ktw_S61n9XRHn7DulIJ4tQ,49487 +transformers/models/roformer/__init__.py,sha256=v1CIjowYMq6aN-V9gyl-RWlMi_uQQxopuvEv76geFqk,1166 +transformers/models/roformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/roformer/__pycache__/configuration_roformer.cpython-310.pyc,, +transformers/models/roformer/__pycache__/modeling_flax_roformer.cpython-310.pyc,, +transformers/models/roformer/__pycache__/modeling_roformer.cpython-310.pyc,, +transformers/models/roformer/__pycache__/modeling_tf_roformer.cpython-310.pyc,, +transformers/models/roformer/__pycache__/tokenization_roformer.cpython-310.pyc,, +transformers/models/roformer/__pycache__/tokenization_roformer_fast.cpython-310.pyc,, +transformers/models/roformer/__pycache__/tokenization_utils.cpython-310.pyc,, +transformers/models/roformer/configuration_roformer.py,sha256=22jMpd4-nBlP7kJAlZADxFmr76zor7BRBd8orUW23go,6865 +transformers/models/roformer/modeling_flax_roformer.py,sha256=RwabkHxAdW4jApBUbJwjXESKCg-xcez9uNBAzya_xP4,39383 +transformers/models/roformer/modeling_roformer.py,sha256=m6gsHipkUMQE0MFDkVn0cdHkIK66EPC2vKTW3yvxfFI,64756 +transformers/models/roformer/modeling_tf_roformer.py,sha256=wevLVlaGrv6P6sP8zY3Mpg9fUsZRqb-PLRdOAT_OpWo,66049 +transformers/models/roformer/tokenization_roformer.py,sha256=Dlj00LiDLK0PSbGRlTOVpTXhYFj9yBvMqFqaEiNQSwk,20858 +transformers/models/roformer/tokenization_roformer_fast.py,sha256=BGhYYclZeX8qFSf0XOvATd9gH8rqlIqW4QCQq-umMXY,5584 +transformers/models/roformer/tokenization_utils.py,sha256=v_Qvq0uBuHpE43oIM64g9kTZcy8BD9oHhOR_ketIyIg,2625 +transformers/models/rt_detr/__init__.py,sha256=c9Y3NeKQwBP46tyFF99kjqTngoIWhLMq7XvzEJOfLaY,1181 +transformers/models/rt_detr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/rt_detr/__pycache__/configuration_rt_detr.cpython-310.pyc,, +transformers/models/rt_detr/__pycache__/configuration_rt_detr_resnet.cpython-310.pyc,, +transformers/models/rt_detr/__pycache__/image_processing_rt_detr.cpython-310.pyc,, +transformers/models/rt_detr/__pycache__/image_processing_rt_detr_fast.cpython-310.pyc,, +transformers/models/rt_detr/__pycache__/modeling_rt_detr.cpython-310.pyc,, +transformers/models/rt_detr/__pycache__/modeling_rt_detr_resnet.cpython-310.pyc,, +transformers/models/rt_detr/__pycache__/modular_rt_detr.cpython-310.pyc,, +transformers/models/rt_detr/configuration_rt_detr.py,sha256=Khno06NPBiP_2Nq7_uy569OoiR6sb7vdxJ1jMVOtkCA,18295 +transformers/models/rt_detr/configuration_rt_detr_resnet.py,sha256=kBbmglFZkq0cqLsz1VZwTXVLHQnjnLjtFbkfMMbVOmM,5557 +transformers/models/rt_detr/image_processing_rt_detr.py,sha256=eqVRFSxM8sBKw9BWZVs8Uk5aMw1U5gQgdnIg4ah2mP4,51686 +transformers/models/rt_detr/image_processing_rt_detr_fast.py,sha256=vROKjVHLTKJHAe4iizfwOaJWr6-2-mbVHIDfb6Wc2G4,25994 +transformers/models/rt_detr/modeling_rt_detr.py,sha256=ZP01SZGMd4xxm6Xk7mYyRAQlm-MVZhbCwPVK8suMPMA,95201 +transformers/models/rt_detr/modeling_rt_detr_resnet.py,sha256=kcg9CKqgbAWwJQ8e2HvhSDcHmq8yb1Bjk8dKsmATrNk,14612 +transformers/models/rt_detr/modular_rt_detr.py,sha256=anZIG2NBft3D5Jgl_Aqn1RwuK8BAc-heCO7R0E0PFxs,15124 +transformers/models/rt_detr_v2/__init__.py,sha256=7RL5U-hsGt3HQZ5SuWn8iZY_L166EYswBvaQXFRkzRc,1003 +transformers/models/rt_detr_v2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/rt_detr_v2/__pycache__/configuration_rt_detr_v2.cpython-310.pyc,, +transformers/models/rt_detr_v2/__pycache__/modeling_rt_detr_v2.cpython-310.pyc,, +transformers/models/rt_detr_v2/__pycache__/modular_rt_detr_v2.cpython-310.pyc,, +transformers/models/rt_detr_v2/configuration_rt_detr_v2.py,sha256=IjGQgzggWMu4uGoPEn8DHMc00lGJCdTcu2fvZ6NamQs,19837 +transformers/models/rt_detr_v2/modeling_rt_detr_v2.py,sha256=ii7UpPiZ_cXtTLgztKigSXtpfm-b9hhOaW1zypqAqZg,96244 +transformers/models/rt_detr_v2/modular_rt_detr_v2.py,sha256=AB-Ebfpiw6bwGmGHQi0R9_yxqtGaH2JZloMo4ws9PNg,29739 +transformers/models/rwkv/__init__.py,sha256=HAiwEvW1j_xuHj_PbmN25srY9RtA1gLmN_0RWvAyG78,989 +transformers/models/rwkv/__pycache__/__init__.cpython-310.pyc,, +transformers/models/rwkv/__pycache__/configuration_rwkv.cpython-310.pyc,, +transformers/models/rwkv/__pycache__/modeling_rwkv.cpython-310.pyc,, +transformers/models/rwkv/configuration_rwkv.py,sha256=0hwiEhaLNCekxOiYD_D-e95ftq7_aazx9ImRtf0ydWc,5204 +transformers/models/rwkv/modeling_rwkv.py,sha256=G4ssmavN9yaF8dhGtmjztZ_IOXibdndY-uDXLocOqxg,33436 +transformers/models/sam/__init__.py,sha256=ilUO6W284DgX2BijkzdGXaw-OZsSrEo-qjZqiidfOEY,1141 +transformers/models/sam/__pycache__/__init__.cpython-310.pyc,, +transformers/models/sam/__pycache__/configuration_sam.cpython-310.pyc,, +transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc,, +transformers/models/sam/__pycache__/image_processing_sam_fast.cpython-310.pyc,, +transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc,, +transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc,, +transformers/models/sam/__pycache__/processing_sam.cpython-310.pyc,, +transformers/models/sam/configuration_sam.py,sha256=q6C10OyYdHicWRfuaudnl_5K9I3LH8nPQj_1wXPGegw,14716 +transformers/models/sam/image_processing_sam.py,sha256=f9pyhLro6KoxT0c9iLRvyrLhVdf5vzy4HeB_okVm-lI,68043 +transformers/models/sam/image_processing_sam_fast.py,sha256=8P4F716yqZkDS86240P1MtVUy0k3pp9QrsCtnyJRvB4,34233 +transformers/models/sam/modeling_sam.py,sha256=gXjFWlLuF0OymfEAWCiKSseU-0YP8mm9GpRp7ySz72M,61921 +transformers/models/sam/modeling_tf_sam.py,sha256=LHf2rhftqbnJ7F3CSzi0ndRtHmckhuXIh1Xhgw106Cs,77732 +transformers/models/sam/processing_sam.py,sha256=XdNowFY5wi_HLWF6VvMzbO8xplBFKDC3apb9cYxjYCc,12090 +transformers/models/sam_hq/__init__.py,sha256=DtfMcRDroMaiZ9FKrgymx4rCyGuP5r1dxr-wzjS0T0Q,1029 +transformers/models/sam_hq/__pycache__/__init__.cpython-310.pyc,, +transformers/models/sam_hq/__pycache__/configuration_sam_hq.cpython-310.pyc,, +transformers/models/sam_hq/__pycache__/modeling_sam_hq.cpython-310.pyc,, +transformers/models/sam_hq/__pycache__/modular_sam_hq.cpython-310.pyc,, +transformers/models/sam_hq/__pycache__/processing_samhq.cpython-310.pyc,, +transformers/models/sam_hq/configuration_sam_hq.py,sha256=U3nPcw9lh2M6MdBMe7rK_xniKVPVoRKzgqvPgU-7kzo,14840 +transformers/models/sam_hq/modeling_sam_hq.py,sha256=Ln2AQHl3hO21IVZUYs0b_B_yFKHYjhDNJWJv-ewDQz0,69561 +transformers/models/sam_hq/modular_sam_hq.py,sha256=TjZtgBHN5IHnLjHdCv5GeoafFGSEwnG8-6ZMTM5EKO8,31579 +transformers/models/sam_hq/processing_samhq.py,sha256=PZMhsc4iuQXz7yiHbpkP2cSZyAxakwJxvuSXdpS4z-8,11990 +transformers/models/seamless_m4t/__init__.py,sha256=Y5c_W1E83fh8ToTMqF4NcReXzKZiTDv3A4ePoNUxXDg,1194 +transformers/models/seamless_m4t/__pycache__/__init__.cpython-310.pyc,, +transformers/models/seamless_m4t/__pycache__/configuration_seamless_m4t.cpython-310.pyc,, +transformers/models/seamless_m4t/__pycache__/feature_extraction_seamless_m4t.cpython-310.pyc,, +transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc,, +transformers/models/seamless_m4t/__pycache__/processing_seamless_m4t.cpython-310.pyc,, +transformers/models/seamless_m4t/__pycache__/tokenization_seamless_m4t.cpython-310.pyc,, +transformers/models/seamless_m4t/__pycache__/tokenization_seamless_m4t_fast.cpython-310.pyc,, +transformers/models/seamless_m4t/configuration_seamless_m4t.py,sha256=zvCh7r-KOILwOlujAaJLtv1-Z7B8XgZEfxqYuBMjGK0,23521 +transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py,sha256=VeMODyxbmdHC7O2gb_VOzXO8_cz7gktCWEYIVPmscWQ,13628 +transformers/models/seamless_m4t/modeling_seamless_m4t.py,sha256=ywtIqVhwTiSas_KZt9wsPELbcmbMR_8nlwlRhQW4FZU,186962 +transformers/models/seamless_m4t/processing_seamless_m4t.py,sha256=aAEIo-7LS_R-Vlwz7ysld4h3y9GAmkICT7oj9qjKM8Y,5930 +transformers/models/seamless_m4t/tokenization_seamless_m4t.py,sha256=vRb1fQgr6HzLT5rnhE7dDqAuC5_yT5DCIftiBSZIIak,26076 +transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py,sha256=NC5GbGQnDiLf9NB6-XQ4DqJjLFQWJ7_0yJXoDthY-z0,19774 +transformers/models/seamless_m4t_v2/__init__.py,sha256=mMY04PBMrOwTIQLq01RHqZjssvrSYl3UDhP5Y5vFifs,1011 +transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc,, +transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc,, +transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py,sha256=ihF9DjqhoOojebAUTijxbQuLNs0nEqJBi9umyR-gHgA,24388 +transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py,sha256=3dH-9g4wV_VV_4dWUV_2LMZbiKocZb0BFrQa2erygZk,206113 +transformers/models/segformer/__init__.py,sha256=KusPvx7i3IW86Z3RjiVjo3oNu-SsqwmkifgegRkSzKs,1185 +transformers/models/segformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/segformer/__pycache__/configuration_segformer.cpython-310.pyc,, +transformers/models/segformer/__pycache__/feature_extraction_segformer.cpython-310.pyc,, +transformers/models/segformer/__pycache__/image_processing_segformer.cpython-310.pyc,, +transformers/models/segformer/__pycache__/image_processing_segformer_fast.cpython-310.pyc,, +transformers/models/segformer/__pycache__/modeling_segformer.cpython-310.pyc,, +transformers/models/segformer/__pycache__/modeling_tf_segformer.cpython-310.pyc,, +transformers/models/segformer/__pycache__/modular_segformer.cpython-310.pyc,, +transformers/models/segformer/configuration_segformer.py,sha256=CyvERUX7QyWRo0mG7jwst-3jIqzOyD6YQ2ZdCkoTY_g,7429 +transformers/models/segformer/feature_extraction_segformer.py,sha256=7JHygTUT0pw36WCxl14kgaPvxqNBPFf9AOUuxQKHhlg,1324 +transformers/models/segformer/image_processing_segformer.py,sha256=JnpRUI0bYh5_L9xqJiSrPoqMFhdpYSNAVmdVX_J45uI,22207 +transformers/models/segformer/image_processing_segformer_fast.py,sha256=U9QuNUq8rqhXAy1aFYFVayc1NjZXUbS0rUafwdi4uK8,10394 +transformers/models/segformer/modeling_segformer.py,sha256=iIYyaC429ZfLaFzpSaupmnhP086pIz-kE-tR5t-9rvQ,32655 +transformers/models/segformer/modeling_tf_segformer.py,sha256=SWfmDa_g2XHzZ1GTIAhW9QBd8hQW13AyMvxLhx53NzE,43689 +transformers/models/segformer/modular_segformer.py,sha256=KtWpIS6l50GAEox8ZC7lPTKUJQUgwAEi9vDn8cU_yzE,6134 +transformers/models/seggpt/__init__.py,sha256=RzV8DKCX1lOWGqXv2BlE1R7T4QuEcdYAVy_csccLvEw,1036 +transformers/models/seggpt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/seggpt/__pycache__/configuration_seggpt.cpython-310.pyc,, +transformers/models/seggpt/__pycache__/image_processing_seggpt.cpython-310.pyc,, +transformers/models/seggpt/__pycache__/modeling_seggpt.cpython-310.pyc,, +transformers/models/seggpt/configuration_seggpt.py,sha256=VeHoocblt-EIRF-FO8JkrVGUcBPKbXPWAJMlXRllz44,6492 +transformers/models/seggpt/image_processing_seggpt.py,sha256=9Qa_-JsAuBdo3Jc_0Z_jXKFUn8GOK9v11ossULABnSQ,31475 +transformers/models/seggpt/modeling_seggpt.py,sha256=3IPuHnp-kfUo-8J4IwPLTNB87iArTs3b7PEUngDArx4,44933 +transformers/models/sew/__init__.py,sha256=POCF36ZRa_dr7oQhkDU2X17bsZuLoWI5V8DSihqr_vU,987 +transformers/models/sew/__pycache__/__init__.cpython-310.pyc,, +transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc,, +transformers/models/sew/__pycache__/feature_extraction_sew.cpython-310.pyc,, +transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc,, +transformers/models/sew/__pycache__/modular_sew.cpython-310.pyc,, +transformers/models/sew/configuration_sew.py,sha256=lJIjv2Ktr3PexNd9E8Q2cb3ewzh4W10eCk5M4d-JOyc,14231 +transformers/models/sew/feature_extraction_sew.py,sha256=szQVJwMsEe-9xZxeHiUFb5E-JFHc6gfTQNqwPp_kLiU,1883 +transformers/models/sew/modeling_sew.py,sha256=kYPlJjQM-hfUr9UCgsz1Rk8xlIuItUKvulvo93CA5es,47441 +transformers/models/sew/modular_sew.py,sha256=I_ZREVE3XNh8BMK4LLnu9sGrZPrUzbnSoZzLhOBsQr8,18913 +transformers/models/sew_d/__init__.py,sha256=zE9sw10e_a1d-8-Jsb75z5frCjkFGD0dZMHAXiNgGwk,991 +transformers/models/sew_d/__pycache__/__init__.cpython-310.pyc,, +transformers/models/sew_d/__pycache__/configuration_sew_d.cpython-310.pyc,, +transformers/models/sew_d/__pycache__/modeling_sew_d.cpython-310.pyc,, +transformers/models/sew_d/configuration_sew_d.py,sha256=UXLYoDAIix9cWhSQVuavz4LgbQ2lvCxO5vE_bGsZei0,16191 +transformers/models/sew_d/modeling_sew_d.py,sha256=-LO1nWLrzyhDAQN1zq7nLfCGukkB2B1sm1GJSnA8FsI,69212 +transformers/models/shieldgemma2/__init__.py,sha256=B7eqFJSWi0p49QNvKqUGR8NPyFjQuMdBANevIjTsSxw,1048 +transformers/models/shieldgemma2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/shieldgemma2/__pycache__/configuration_shieldgemma2.cpython-310.pyc,, +transformers/models/shieldgemma2/__pycache__/modeling_shieldgemma2.cpython-310.pyc,, +transformers/models/shieldgemma2/__pycache__/processing_shieldgemma2.cpython-310.pyc,, +transformers/models/shieldgemma2/configuration_shieldgemma2.py,sha256=Cj4MM47fvnQVGqzEXeidaqrkSmz4-i11mYvz6yj4dvU,4805 +transformers/models/shieldgemma2/modeling_shieldgemma2.py,sha256=UrnUYcDjd2nu6G4P8EAwsJKv0dovfYoE-vLhZU3vP2k,6101 +transformers/models/shieldgemma2/processing_shieldgemma2.py,sha256=X7k95BmxwMasinqM9e3hUW7zd5QoK5wHnGzlznEwe3I,8585 +transformers/models/siglip/__init__.py,sha256=CnNqbSQ25tKLz0MGJVmhSXjVyASRDu7v5yjTHWYZ6M4,1160 +transformers/models/siglip/__pycache__/__init__.cpython-310.pyc,, +transformers/models/siglip/__pycache__/configuration_siglip.cpython-310.pyc,, +transformers/models/siglip/__pycache__/image_processing_siglip.cpython-310.pyc,, +transformers/models/siglip/__pycache__/image_processing_siglip_fast.cpython-310.pyc,, +transformers/models/siglip/__pycache__/modeling_siglip.cpython-310.pyc,, +transformers/models/siglip/__pycache__/processing_siglip.cpython-310.pyc,, +transformers/models/siglip/__pycache__/tokenization_siglip.cpython-310.pyc,, +transformers/models/siglip/configuration_siglip.py,sha256=3GVhImzaW3WCFyQ3IfC-ZM3ZA2qmvA_BEjmZyNOQyfk,11697 +transformers/models/siglip/image_processing_siglip.py,sha256=GnygcypTs6iYpcESnv0WCPLlCbk9Jm8dxPsftAe_i0E,12031 +transformers/models/siglip/image_processing_siglip_fast.py,sha256=3dwic9zjpzgxbCnQC84lUvcDOSnWIlEZrCUJItmi474,1257 +transformers/models/siglip/modeling_siglip.py,sha256=5WkCdkAOcJrXQDfKck98W0aOTCb1YDw_nOZhDOCvm7Y,49332 +transformers/models/siglip/processing_siglip.py,sha256=fuXgwOdFQrEPdRUa0xgRhCaH1avjOA0vxJwz9gSVpuM,7343 +transformers/models/siglip/tokenization_siglip.py,sha256=QXJ1RdlwC6ESO0z3thY-tzXY5JL0VEsMBsWi-OgA4Vg,16047 +transformers/models/siglip2/__init__.py,sha256=dvfEVdLNJzWjugwTPGNp1gfxA6x4ytFLgGtV4Zfhoh4,1126 +transformers/models/siglip2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/siglip2/__pycache__/configuration_siglip2.cpython-310.pyc,, +transformers/models/siglip2/__pycache__/image_processing_siglip2.cpython-310.pyc,, +transformers/models/siglip2/__pycache__/image_processing_siglip2_fast.cpython-310.pyc,, +transformers/models/siglip2/__pycache__/modeling_siglip2.cpython-310.pyc,, +transformers/models/siglip2/__pycache__/modular_siglip2.cpython-310.pyc,, +transformers/models/siglip2/__pycache__/processing_siglip2.cpython-310.pyc,, +transformers/models/siglip2/configuration_siglip2.py,sha256=zs0zqZ9Tog9iwLK6yEN6peJErYCHN5N0iBB11Lp0FpA,12819 +transformers/models/siglip2/image_processing_siglip2.py,sha256=u_JMPN_0LRIdfyUK6OhwP3deP_1pfZ7vcV7JjwefCzM,16075 +transformers/models/siglip2/image_processing_siglip2_fast.py,sha256=ZXmKSbmUlhmJUw22JzTKHI7xpZJiRWrBwRN50ez1rGw,6511 +transformers/models/siglip2/modeling_siglip2.py,sha256=xxSwPS2o11E-T2HTMhwUd9Mtf_OzkhzHQfGohDikI3A,54004 +transformers/models/siglip2/modular_siglip2.py,sha256=tjy-a-HbaSQtongAib4SkmksQLdtKTHyBjo3trCKSqw,26857 +transformers/models/siglip2/processing_siglip2.py,sha256=SE5YKHPxpHpurgug081mweiiznxlR_e4niW8nxJGDY0,8078 +transformers/models/smollm3/__init__.py,sha256=BZA2MiDpGmv2swg1yO14tkgi_SZ0yVg8ndr-PJwY-fI,1000 +transformers/models/smollm3/__pycache__/__init__.cpython-310.pyc,, +transformers/models/smollm3/__pycache__/configuration_smollm3.cpython-310.pyc,, +transformers/models/smollm3/__pycache__/modeling_smollm3.cpython-310.pyc,, +transformers/models/smollm3/__pycache__/modular_smollm3.cpython-310.pyc,, +transformers/models/smollm3/configuration_smollm3.py,sha256=9RUIE0AkwCrHfvkdpNuMhhp-sR7vJtX_PxGpaEoEG7c,13346 +transformers/models/smollm3/modeling_smollm3.py,sha256=-fhFOHgVXKFUombaQtVGYxl4lN5MEOIrCRA827Fwzlc,22344 +transformers/models/smollm3/modular_smollm3.py,sha256=kurghqLFiarOQdlLuX1_V0NAoSZMQwl6IicsbnNRTEk,16286 +transformers/models/smolvlm/__init__.py,sha256=fE-znTLrbxNe5qkHVgI4xmwFz-paFymZuxTAd2GKkOo,1126 +transformers/models/smolvlm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/smolvlm/__pycache__/configuration_smolvlm.cpython-310.pyc,, +transformers/models/smolvlm/__pycache__/image_processing_smolvlm.cpython-310.pyc,, +transformers/models/smolvlm/__pycache__/image_processing_smolvlm_fast.cpython-310.pyc,, +transformers/models/smolvlm/__pycache__/modeling_smolvlm.cpython-310.pyc,, +transformers/models/smolvlm/__pycache__/modular_smolvlm.cpython-310.pyc,, +transformers/models/smolvlm/__pycache__/processing_smolvlm.cpython-310.pyc,, +transformers/models/smolvlm/__pycache__/video_processing_smolvlm.cpython-310.pyc,, +transformers/models/smolvlm/configuration_smolvlm.py,sha256=4XHkP6aHHeBOGOoLNn87AuAp5csfYpWo8QKW8TrauYI,9359 +transformers/models/smolvlm/image_processing_smolvlm.py,sha256=LvUX1KFJ0OsZJeg91YGvoWn5wqlNGMHAHDiaqlp_Qyo,44035 +transformers/models/smolvlm/image_processing_smolvlm_fast.py,sha256=zT8jdeCc-Hwvw9QsJfKMLDwf6oCxkuP_WKQLlx6cFiQ,24026 +transformers/models/smolvlm/modeling_smolvlm.py,sha256=8VXk3EMh_POd5T-_Vd93A7z1Mwarwe5eu94Mbwt5rGY,46034 +transformers/models/smolvlm/modular_smolvlm.py,sha256=2iLYg35U4k4ue_mYetqn_yT1zbw0JGXu2EGG4ul6VNE,18819 +transformers/models/smolvlm/processing_smolvlm.py,sha256=dIw9Z74Y3S-uqhYTv8KOPlbM2xpAS3ONTiAAmNwky_I,20183 +transformers/models/smolvlm/video_processing_smolvlm.py,sha256=pWIleNYor2oMgHs0ny2sSVeAOZi1U-JIhA1TkKLjzug,17198 +transformers/models/speech_encoder_decoder/__init__.py,sha256=0MwevN904dCSAb0dvznhDH--q-m3-MzdCtx0B-T5hpk,1081 +transformers/models/speech_encoder_decoder/__pycache__/__init__.cpython-310.pyc,, +transformers/models/speech_encoder_decoder/__pycache__/configuration_speech_encoder_decoder.cpython-310.pyc,, +transformers/models/speech_encoder_decoder/__pycache__/modeling_flax_speech_encoder_decoder.cpython-310.pyc,, +transformers/models/speech_encoder_decoder/__pycache__/modeling_speech_encoder_decoder.cpython-310.pyc,, +transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py,sha256=2FBAuqwi4KNkbb7chAliDYZ46vYJiIjEwqtSh1oFSKY,4693 +transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py,sha256=g3CBztbdQ2M9nsk0W39Jw1cWzTVHdUuCTJEswzhEZ8w,44860 +transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py,sha256=yNmAivvaN3Pdrvm8B8WY5lryjW6rroJLfSo_DIKOAsw,25624 +transformers/models/speech_to_text/__init__.py,sha256=qZzt5u1rbSsOjPVmX40R4b4pkL1mxOQZ66q8GPDKao8,1200 +transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc,, +transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc,, +transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc,, +transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc,, +transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc,, +transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc,, +transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc,, +transformers/models/speech_to_text/configuration_speech_to_text.py,sha256=YazgmnHYt_lbYaRQNt2dEYAqgpAfYln36hQl89WPLF4,9825 +transformers/models/speech_to_text/feature_extraction_speech_to_text.py,sha256=r6vxaMiZM7KBxghUnd_WnDJ4brms95HimGR0bkzvglw,13955 +transformers/models/speech_to_text/modeling_speech_to_text.py,sha256=jhWHcszAYQ3Rc7PV1ATgEHsn12SFWtRLSlE4qJ1ZRC0,61722 +transformers/models/speech_to_text/modeling_tf_speech_to_text.py,sha256=Qv61VrbNAAbS89ZmGX1xsIdxzKvtTmSYQdDa21NgnH0,74288 +transformers/models/speech_to_text/processing_speech_to_text.py,sha256=v_scWR5ExsRmschQLTzps3NoJpUhcrch0xtuGZoyo80,4856 +transformers/models/speech_to_text/tokenization_speech_to_text.py,sha256=4VRmhzHQZvrplhvYdQFMN8v9xAWmz0UFW4TY9pI7ygw,11501 +transformers/models/speecht5/__init__.py,sha256=DploRLnZX4ZO40Z7BstCZ7aNWGuZE06tIeMo0GTyR60,1124 +transformers/models/speecht5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/speecht5/__pycache__/configuration_speecht5.cpython-310.pyc,, +transformers/models/speecht5/__pycache__/feature_extraction_speecht5.cpython-310.pyc,, +transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc,, +transformers/models/speecht5/__pycache__/number_normalizer.cpython-310.pyc,, +transformers/models/speecht5/__pycache__/processing_speecht5.cpython-310.pyc,, +transformers/models/speecht5/__pycache__/tokenization_speecht5.cpython-310.pyc,, +transformers/models/speecht5/configuration_speecht5.py,sha256=q6TwW_M__spcgD9NOObNlcOZt8_xvL6V5Qm1yQZ1T1I,23466 +transformers/models/speecht5/feature_extraction_speecht5.py,sha256=MqnjfL1S_-VCnJhEEWgfv9kAYp2_nlO9MZ9OtsESJX0,17853 +transformers/models/speecht5/modeling_speecht5.py,sha256=DFe2wlkEmuNE8gXmZc-59sc8tRRI0rbedMfOkTqkcgI,147885 +transformers/models/speecht5/number_normalizer.py,sha256=cxnEUdHSISW5eAo15cLuVkZa65zMFuMFaJ8zAOQCsAA,7019 +transformers/models/speecht5/processing_speecht5.py,sha256=lp8lCue0tNo3xQVqlHpzruReD0iGUZeNz4KRsXP12rg,7596 +transformers/models/speecht5/tokenization_speecht5.py,sha256=UfqBVrMUHCRRY_-kVvcU3RJnEfdAlq40ooE0UqV40ps,9009 +transformers/models/splinter/__init__.py,sha256=N3tdgJIqZRPK0g3pfLE3p3-HkGJMRf-GQ189anQ51to,1084 +transformers/models/splinter/__pycache__/__init__.cpython-310.pyc,, +transformers/models/splinter/__pycache__/configuration_splinter.cpython-310.pyc,, +transformers/models/splinter/__pycache__/modeling_splinter.cpython-310.pyc,, +transformers/models/splinter/__pycache__/tokenization_splinter.cpython-310.pyc,, +transformers/models/splinter/__pycache__/tokenization_splinter_fast.cpython-310.pyc,, +transformers/models/splinter/configuration_splinter.py,sha256=ZajZPX6f9K7gBqp2PbOtmJg-_fAU8h72tKdTNjyQV0M,5625 +transformers/models/splinter/modeling_splinter.py,sha256=dMJYhHNbuFmTWze5K_QTO2Nc8rM8PfYYDkgLH3Ivk5A,38011 +transformers/models/splinter/tokenization_splinter.py,sha256=ba400uZlqtB6kjdMI3oMgBWs_p-px81zAQItZQx-l6c,20948 +transformers/models/splinter/tokenization_splinter_fast.py,sha256=AG6k691a2HJqtIAiEyn07WuSc4JgqU1HTkfEGC8Tt2c,8590 +transformers/models/squeezebert/__init__.py,sha256=_kzQtfoJetCK99e_FICGZl5DN8S2VVcOUFioGyN0sLI,1096 +transformers/models/squeezebert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc,, +transformers/models/squeezebert/__pycache__/modeling_squeezebert.cpython-310.pyc,, +transformers/models/squeezebert/__pycache__/tokenization_squeezebert.cpython-310.pyc,, +transformers/models/squeezebert/__pycache__/tokenization_squeezebert_fast.cpython-310.pyc,, +transformers/models/squeezebert/configuration_squeezebert.py,sha256=9I4mUuqEwKAIrezRjjto3HBfJ-aiWBTkQcIZWuJFFGM,7312 +transformers/models/squeezebert/modeling_squeezebert.py,sha256=XNcULy9rowxidymjHb8D3OWWVFaTsC9rHLPmJQsd5rQ,38738 +transformers/models/squeezebert/tokenization_squeezebert.py,sha256=4ZbPoFCjrt-d2zdbZ9s23IHOsN5iw9VpG11W4sJWCJM,20092 +transformers/models/squeezebert/tokenization_squeezebert_fast.py,sha256=CtqOSIMAYhS66yYjbWxTDpbbxNvAllTkP_lJroEyvfQ,6724 +transformers/models/stablelm/__init__.py,sha256=aVgWTcwBuuiGJDp8H_ZU6BvhYqjmNEqCukU7jEfwd_I,997 +transformers/models/stablelm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/stablelm/__pycache__/configuration_stablelm.cpython-310.pyc,, +transformers/models/stablelm/__pycache__/modeling_stablelm.cpython-310.pyc,, +transformers/models/stablelm/configuration_stablelm.py,sha256=GqKL53dgijlmwQMgLsuw4jB6gm1stU4SaCETjqumKVs,10843 +transformers/models/stablelm/modeling_stablelm.py,sha256=SHNetzeAohpXhVYhdKi8H9xcZSBUSstj3QmzXrS9qlM,45295 +transformers/models/starcoder2/__init__.py,sha256=fZ8HHZCGjxRfVgROe7zuoi9ADIAa4SeqxGHkvKUQiQM,1001 +transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc,, +transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc,, +transformers/models/starcoder2/__pycache__/modular_starcoder2.cpython-310.pyc,, +transformers/models/starcoder2/configuration_starcoder2.py,sha256=wgtxWNUy_mjoly8Hyg4eVBJKFtcy9x2rdK55uzErIXg,10898 +transformers/models/starcoder2/modeling_starcoder2.py,sha256=ePN1WAPYMlCT1PgRYKM2UH_5ztsgzPFkMREl4mkxFC4,21359 +transformers/models/starcoder2/modular_starcoder2.py,sha256=HlS9Rg6ifP-GSqiWNJd7dxf9AUd7rWlvavfZJJtrHWA,9704 +transformers/models/superglue/__init__.py,sha256=Sg_nfSbBltkVhp40pVc04SthUCnXMX3kWHH_qC_YL4Y,1045 +transformers/models/superglue/__pycache__/__init__.cpython-310.pyc,, +transformers/models/superglue/__pycache__/configuration_superglue.cpython-310.pyc,, +transformers/models/superglue/__pycache__/image_processing_superglue.cpython-310.pyc,, +transformers/models/superglue/__pycache__/modeling_superglue.cpython-310.pyc,, +transformers/models/superglue/configuration_superglue.py,sha256=WD-orzD73GN0s_3On6qcv0jRS7PwC1yjgNBPNnXpOzc,5473 +transformers/models/superglue/image_processing_superglue.py,sha256=TBf3TVLl1LJlJt-cKFxnUh9ToifooCfWjgwiwdtCoXE,21944 +transformers/models/superglue/modeling_superglue.py,sha256=P1OlrN-5umPgITWCb4pNKSruteJVJjS6O0Z4HEaYZ80,36032 +transformers/models/superpoint/__init__.py,sha256=6fQQ-p4220IUaIQCZseKItiHWVV7KOiA5mXoTdJSJmI,1100 +transformers/models/superpoint/__pycache__/__init__.cpython-310.pyc,, +transformers/models/superpoint/__pycache__/configuration_superpoint.cpython-310.pyc,, +transformers/models/superpoint/__pycache__/image_processing_superpoint.cpython-310.pyc,, +transformers/models/superpoint/__pycache__/image_processing_superpoint_fast.cpython-310.pyc,, +transformers/models/superpoint/__pycache__/modeling_superpoint.cpython-310.pyc,, +transformers/models/superpoint/configuration_superpoint.py,sha256=wWW7CLDM2VW-f41M_hLvq4N3j1gt_4QmsaNHifKLd_I,4048 +transformers/models/superpoint/image_processing_superpoint.py,sha256=R_7RqgeGVrMepchnGIH7smiVqbl__inUcCDye0vxZqQ,16394 +transformers/models/superpoint/image_processing_superpoint_fast.py,sha256=cfHNKd8IzIFZp6NaUqt83aLJaX4M7fjeSJz1GmOQW0s,7074 +transformers/models/superpoint/modeling_superpoint.py,sha256=iqN6g4a2Yt8I-tantOd66ajRukiCzl1PcLFUIMDv71I,20071 +transformers/models/swiftformer/__init__.py,sha256=cW3-9efPxdjZV1KziM8j1S8e8wH3wJQhWqMXlULhG6c,1046 +transformers/models/swiftformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/swiftformer/__pycache__/configuration_swiftformer.cpython-310.pyc,, +transformers/models/swiftformer/__pycache__/modeling_swiftformer.cpython-310.pyc,, +transformers/models/swiftformer/__pycache__/modeling_tf_swiftformer.cpython-310.pyc,, +transformers/models/swiftformer/configuration_swiftformer.py,sha256=zc6uduEgdxwc_ApNrsADFqGSKC1qlH-kceHSuUWHQCI,5867 +transformers/models/swiftformer/modeling_swiftformer.py,sha256=1AxEVT8-oh2Fh0CuWAdQm_oVECi1YktoD33Xd7cENmQ,20641 +transformers/models/swiftformer/modeling_tf_swiftformer.py,sha256=BtStZyRloUcoWu3km_TLihPzMPiXA5QT88WQthuMS8Q,34959 +transformers/models/swin/__init__.py,sha256=7pcdahUG9WcEkEDRoUcMVxdonKglhOpXaQLo8xI6KTg,1025 +transformers/models/swin/__pycache__/__init__.cpython-310.pyc,, +transformers/models/swin/__pycache__/configuration_swin.cpython-310.pyc,, +transformers/models/swin/__pycache__/modeling_swin.cpython-310.pyc,, +transformers/models/swin/__pycache__/modeling_tf_swin.cpython-310.pyc,, +transformers/models/swin/configuration_swin.py,sha256=hcksE44MGT9_rWYNXvhyl94jqU00rQY3XXTDPzTlvmo,7958 +transformers/models/swin/modeling_swin.py,sha256=Cnq5AQe1wZaU5tAx6IQ71LNNSwa9l85PWMnZdD9W3zU,54813 +transformers/models/swin/modeling_tf_swin.py,sha256=R-Fyh0ZnlXex4JK1JE22tgeE62O_IYskMl54kK3HOe4,70827 +transformers/models/swin2sr/__init__.py,sha256=PLCBXwTQF37hLur2ROcYXUiNropQ6u5Ig_HgK29MOu8,1088 +transformers/models/swin2sr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/swin2sr/__pycache__/configuration_swin2sr.cpython-310.pyc,, +transformers/models/swin2sr/__pycache__/image_processing_swin2sr.cpython-310.pyc,, +transformers/models/swin2sr/__pycache__/image_processing_swin2sr_fast.cpython-310.pyc,, +transformers/models/swin2sr/__pycache__/modeling_swin2sr.cpython-310.pyc,, +transformers/models/swin2sr/configuration_swin2sr.py,sha256=6ZRVIyo6z1oQvPm13QvkrWcKpf1qjMf0QqwmdHMdvto,6841 +transformers/models/swin2sr/image_processing_swin2sr.py,sha256=T5JpOohG19DOgjlUHgtw06vOv1Q5FHg-oK6ImXPL2zQ,9247 +transformers/models/swin2sr/image_processing_swin2sr_fast.py,sha256=obhPGADB2acMjySvBMjeh96rQ615Uzf7jnarn6JYQgw,4171 +transformers/models/swin2sr/modeling_swin2sr.py,sha256=rprQ8EHF4aASVsu1IGuGeD_p1wMn_nOSDJj5DhgXDuc,46916 +transformers/models/swinv2/__init__.py,sha256=njM902tlEQ82mYRN9ZTMOiXpJn1NHnxKbm_LCvn2I-M,993 +transformers/models/swinv2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/swinv2/__pycache__/configuration_swinv2.cpython-310.pyc,, +transformers/models/swinv2/__pycache__/modeling_swinv2.cpython-310.pyc,, +transformers/models/swinv2/configuration_swinv2.py,sha256=m2yjnprZXcqtF4dlw2JAv8MWjBBm6khztGzCUAp4rHw,7547 +transformers/models/swinv2/modeling_swinv2.py,sha256=1dz9n91ZRavMS5xTU0cCyCvWFfFHrhGEUfXk3-kEuNU,59027 +transformers/models/switch_transformers/__init__.py,sha256=Iw38A9kfIT5mJ0G00YE-TVN-M_b1DBHYQqb0pEyTZMY,1019 +transformers/models/switch_transformers/__pycache__/__init__.cpython-310.pyc,, +transformers/models/switch_transformers/__pycache__/configuration_switch_transformers.cpython-310.pyc,, +transformers/models/switch_transformers/__pycache__/modeling_switch_transformers.cpython-310.pyc,, +transformers/models/switch_transformers/configuration_switch_transformers.py,sha256=hqXjdBHj-oqNPBPzwH-e5-dKYDPw2lfWgc3oDegHHVE,9054 +transformers/models/switch_transformers/modeling_switch_transformers.py,sha256=J_w85_1dAtR2RgIMkCYklqvItsUG-4pgC3kI7lP7b5I,84298 +transformers/models/t5/__init__.py,sha256=hCQO8nkKAJqFgMOwC7nxhyDYOUA9fcDT0pDb7DAHt5Y,1130 +transformers/models/t5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/t5/__pycache__/configuration_t5.cpython-310.pyc,, +transformers/models/t5/__pycache__/modeling_flax_t5.cpython-310.pyc,, +transformers/models/t5/__pycache__/modeling_t5.cpython-310.pyc,, +transformers/models/t5/__pycache__/modeling_tf_t5.cpython-310.pyc,, +transformers/models/t5/__pycache__/tokenization_t5.cpython-310.pyc,, +transformers/models/t5/__pycache__/tokenization_t5_fast.cpython-310.pyc,, +transformers/models/t5/configuration_t5.py,sha256=1fXcdM1_SwAjVwAo2pyiWoxXuoZ6meJaqI6btUPH-bU,7381 +transformers/models/t5/modeling_flax_t5.py,sha256=l-uYt9Ze0OemY-eF2BVoCmYomkQeJieRMuozgr7pqk8,74353 +transformers/models/t5/modeling_t5.py,sha256=aAR06W2l7_kZqmPVboXjAq1Ljljdt3Hqqdyfl813k6U,109566 +transformers/models/t5/modeling_tf_t5.py,sha256=OpUvMhrYdYEomdc6hfq-CVUbycBAyfe-PTTffhxFCU0,77068 +transformers/models/t5/tokenization_t5.py,sha256=hadW7akP02nYXtfy3u-p5hlOqF254umauT2xpVcOo0A,20025 +transformers/models/t5/tokenization_t5_fast.py,sha256=E1E_dH9RXXh7ei1c1u9Q_518o-n5x0mrhMJlHvlqLb8,10048 +transformers/models/t5gemma/__init__.py,sha256=2QjFw4aK-Ui2JuImfxWN8oeMkMwokEzurG8wPvKv98Y,1005 +transformers/models/t5gemma/__pycache__/__init__.cpython-310.pyc,, +transformers/models/t5gemma/__pycache__/configuration_t5gemma.cpython-310.pyc,, +transformers/models/t5gemma/__pycache__/modeling_t5gemma.cpython-310.pyc,, +transformers/models/t5gemma/__pycache__/modular_t5gemma.cpython-310.pyc,, +transformers/models/t5gemma/configuration_t5gemma.py,sha256=_HSUvwfaKSR_ejgmh-Y11uEM7goAE9fZajn4EhvStoQ,16368 +transformers/models/t5gemma/modeling_t5gemma.py,sha256=8tLzcc8ptLh59jKQg8lyT12ym9jG4Rr8a-YiZ16jrNI,60565 +transformers/models/t5gemma/modular_t5gemma.py,sha256=KlkeaDYiaAWxejj-mJMbgL1HY-u54NUXdcfd5QmbfJw,52660 +transformers/models/table_transformer/__init__.py,sha256=VT-KM0_6LZ6fdOAglbfA8zEhCQuYa6He10Div7WEcD8,1015 +transformers/models/table_transformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/table_transformer/__pycache__/configuration_table_transformer.cpython-310.pyc,, +transformers/models/table_transformer/__pycache__/modeling_table_transformer.cpython-310.pyc,, +transformers/models/table_transformer/configuration_table_transformer.py,sha256=sXV9fFcqNqz9K7zYPCFZyDXfjr42GhotJ7th5UCdH54,13613 +transformers/models/table_transformer/modeling_table_transformer.py,sha256=LHuFU2P2LSHaURjA-NpyknbADo054RGIQGLutzoT72g,61172 +transformers/models/tapas/__init__.py,sha256=DQTmog2nYukVsXxARy8v35SitI0Iv4ZLCGl7zUlLDuI,1066 +transformers/models/tapas/__pycache__/__init__.cpython-310.pyc,, +transformers/models/tapas/__pycache__/configuration_tapas.cpython-310.pyc,, +transformers/models/tapas/__pycache__/modeling_tapas.cpython-310.pyc,, +transformers/models/tapas/__pycache__/modeling_tf_tapas.cpython-310.pyc,, +transformers/models/tapas/__pycache__/tokenization_tapas.cpython-310.pyc,, +transformers/models/tapas/configuration_tapas.py,sha256=i1_a_AArLjS8SkWu0Du8TC7JZBfvMUSku2QteSdfnC4,12293 +transformers/models/tapas/modeling_tapas.py,sha256=KR7pE60rX4xrQU6aOLrNDW1uLUObN4s0bNZaEHJN1Is,108458 +transformers/models/tapas/modeling_tf_tapas.py,sha256=HpG3CLwp21ZKZ39Kew6FakiaWuCuv0ZFXmFdMs2WFlw,112267 +transformers/models/tapas/tokenization_tapas.py,sha256=tN5PcvOE_GT9NOYggB4GYI0kRl3mj_gyqUHDQ0bjGXE,118445 +transformers/models/textnet/__init__.py,sha256=WCPdGs5LWKGDk5UvZm4wA0G76bIXMOhBr1M3x-WmE3s,1039 +transformers/models/textnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/textnet/__pycache__/configuration_textnet.cpython-310.pyc,, +transformers/models/textnet/__pycache__/image_processing_textnet.cpython-310.pyc,, +transformers/models/textnet/__pycache__/modeling_textnet.cpython-310.pyc,, +transformers/models/textnet/configuration_textnet.py,sha256=ZGtG42UxM2RbWr7pSN1IUIBo74aK8Vq79Gg2-vfFWp4,6212 +transformers/models/textnet/image_processing_textnet.py,sha256=fR9wH7_0gal82_n-4MwsLImm4_3i9qPOR-fvtKMupn0,17711 +transformers/models/textnet/modeling_textnet.py,sha256=wn1guGtGvirJx_CaOunmvU5isjFs-ZmeW7A5lNZvu68,16573 +transformers/models/time_series_transformer/__init__.py,sha256=3A_3Wog-6NDwCoBIMtkzJv9slc_wXpzDzsOo-xBQ8hE,1027 +transformers/models/time_series_transformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/time_series_transformer/__pycache__/configuration_time_series_transformer.cpython-310.pyc,, +transformers/models/time_series_transformer/__pycache__/modeling_time_series_transformer.cpython-310.pyc,, +transformers/models/time_series_transformer/configuration_time_series_transformer.py,sha256=jNs-oZ17yVDy4g-shNyOLoA9pupIt9ZlBbX5BXRLyYo,11695 +transformers/models/time_series_transformer/modeling_time_series_transformer.py,sha256=R3gwO47YuffNJMDUSkKdoeCmtOl-w5rRT6Pv45JUE8Y,95489 +transformers/models/timesfm/__init__.py,sha256=gcfLgRAbwZThFP98fst9wsoTMB0fkR28tzWYoQIs5qU,995 +transformers/models/timesfm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/timesfm/__pycache__/configuration_timesfm.cpython-310.pyc,, +transformers/models/timesfm/__pycache__/modeling_timesfm.cpython-310.pyc,, +transformers/models/timesfm/__pycache__/modular_timesfm.cpython-310.pyc,, +transformers/models/timesfm/configuration_timesfm.py,sha256=OBKxwrNeeak-YgKtKL4zTgs705E9to4FCbD7wX0J1Gs,5715 +transformers/models/timesfm/modeling_timesfm.py,sha256=oW_6yao7FOzfvlXxGVAsdK2AZlfX7H1f0R77yUDR-oQ,34502 +transformers/models/timesfm/modular_timesfm.py,sha256=RE1ypm8pygCjjZQ2lUfEfwqiY3v8wYdWHJrEsFIkdeo,32164 +transformers/models/timesformer/__init__.py,sha256=4ODuyNRrYkbgpSbMHJX8XmpJdekHlu__zWey-plUSgI,1003 +transformers/models/timesformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/timesformer/__pycache__/configuration_timesformer.cpython-310.pyc,, +transformers/models/timesformer/__pycache__/modeling_timesformer.cpython-310.pyc,, +transformers/models/timesformer/configuration_timesformer.py,sha256=GilCKil_40B_hqjh0-02CWrBupbwEfHhOZ3b5bUpTPI,5568 +transformers/models/timesformer/modeling_timesformer.py,sha256=aUB9d6gCpKSyey0CY_wHlk9_xQsCBz4HT5YWw9exm6o,32838 +transformers/models/timm_backbone/__init__.py,sha256=s0GlTaJ43Yt9ZdzG9-qjJNlp0Ol4vjN-14S6N7gXLsA,1007 +transformers/models/timm_backbone/__pycache__/__init__.cpython-310.pyc,, +transformers/models/timm_backbone/__pycache__/configuration_timm_backbone.cpython-310.pyc,, +transformers/models/timm_backbone/__pycache__/modeling_timm_backbone.cpython-310.pyc,, +transformers/models/timm_backbone/configuration_timm_backbone.py,sha256=CMRsZX3ZQiI1bzBrza3Eqgjy8XEid8dPfJZVuhtLTn8,3186 +transformers/models/timm_backbone/modeling_timm_backbone.py,sha256=8k2tlfAtgX2FguLvxtXc_15jCjbnq4lXKq_BalsgxT8,6635 +transformers/models/timm_wrapper/__init__.py,sha256=nO3xlv8KQmYCoxKqDteADLkli16cLqdLkfTY_G73O6k,1048 +transformers/models/timm_wrapper/__pycache__/__init__.cpython-310.pyc,, +transformers/models/timm_wrapper/__pycache__/configuration_timm_wrapper.cpython-310.pyc,, +transformers/models/timm_wrapper/__pycache__/image_processing_timm_wrapper.cpython-310.pyc,, +transformers/models/timm_wrapper/__pycache__/modeling_timm_wrapper.cpython-310.pyc,, +transformers/models/timm_wrapper/configuration_timm_wrapper.py,sha256=tHCQEH67F-TXb_ES5751YHxKINpZJQbYtPjiMc0f4gY,5297 +transformers/models/timm_wrapper/image_processing_timm_wrapper.py,sha256=b9aeo2BbC0kWkKsAy9wMCXRgU-P6y6WxgVXiPaMtYq8,5338 +transformers/models/timm_wrapper/modeling_timm_wrapper.py,sha256=ERZgzCnsmZVawGWxbrGpRLxYUOwROZ655VBwPFC3uyQ,16611 +transformers/models/trocr/__init__.py,sha256=Hllbq_42XbGRZyXsGOzYHcb33MOA5_yfijMRKEXJ4n4,1027 +transformers/models/trocr/__pycache__/__init__.cpython-310.pyc,, +transformers/models/trocr/__pycache__/configuration_trocr.cpython-310.pyc,, +transformers/models/trocr/__pycache__/modeling_trocr.cpython-310.pyc,, +transformers/models/trocr/__pycache__/processing_trocr.cpython-310.pyc,, +transformers/models/trocr/configuration_trocr.py,sha256=mm8gO1FagOM7OpQ9S7TZ9UrNc7or081ymZz-q3uss3s,6558 +transformers/models/trocr/modeling_trocr.py,sha256=bNsVKjXdVqtaDQmT23TWhJQdgIVeVPAhxBT31spZB7Q,39098 +transformers/models/trocr/processing_trocr.py,sha256=lcleRsXV-rBWWR304Y0PVNORv14GZ3-8hE0BMbhLbY4,6348 +transformers/models/tvp/__init__.py,sha256=CMKadZ9nKrh8p6u4Z-k6014a9LqDJY7KpyL009s3kpo,1061 +transformers/models/tvp/__pycache__/__init__.cpython-310.pyc,, +transformers/models/tvp/__pycache__/configuration_tvp.cpython-310.pyc,, +transformers/models/tvp/__pycache__/image_processing_tvp.cpython-310.pyc,, +transformers/models/tvp/__pycache__/modeling_tvp.cpython-310.pyc,, +transformers/models/tvp/__pycache__/processing_tvp.cpython-310.pyc,, +transformers/models/tvp/configuration_tvp.py,sha256=DpgrPqYGfjA6OSNkh5UsdkLw8RWFF6qh90Zrz6U9pn4,10147 +transformers/models/tvp/image_processing_tvp.py,sha256=ky4Owy0s0wmEVkhPIVGfcL8fTJviPYLUM8uvOJh1L2c,22841 +transformers/models/tvp/modeling_tvp.py,sha256=dpW7PUlmyRVDk8adwMl6DIw-RgQcOMLn_85Kmby4mdc,40062 +transformers/models/tvp/processing_tvp.py,sha256=PKQkniKgJaUGAlBX4pHYpXcq4HdsLpc5yFxiAGHQU-4,7008 +transformers/models/udop/__init__.py,sha256=CqFpHruzC__VtxEcVz31QxxMpBI1mjO77-Lj0RqW4Eo,1103 +transformers/models/udop/__pycache__/__init__.cpython-310.pyc,, +transformers/models/udop/__pycache__/configuration_udop.cpython-310.pyc,, +transformers/models/udop/__pycache__/modeling_udop.cpython-310.pyc,, +transformers/models/udop/__pycache__/processing_udop.cpython-310.pyc,, +transformers/models/udop/__pycache__/tokenization_udop.cpython-310.pyc,, +transformers/models/udop/__pycache__/tokenization_udop_fast.cpython-310.pyc,, +transformers/models/udop/configuration_udop.py,sha256=xzaHEk_1LtY4AqHr10qL2Vt7Yi-CRgCO98Ni_OvRPgg,7675 +transformers/models/udop/modeling_udop.py,sha256=DveMPNK88kYgd7n8PAtX4lkuiULlDbpafeZn_9fpqDY,91948 +transformers/models/udop/processing_udop.py,sha256=FleCrAhhJSE66MrW2XdVKROcWUcJ3KqxRhxj5f8mT78,9302 +transformers/models/udop/tokenization_udop.py,sha256=8wBBqyD99Y_tcP8q_LHZiIITj26kKdMRtLAeaIH91EU,71827 +transformers/models/udop/tokenization_udop_fast.py,sha256=cvfqL2DiAyd2_d5BL14U6XQScBvM8JoGizTM3tLSik0,49663 +transformers/models/umt5/__init__.py,sha256=FKt6Ap3AvOCIKoeOM-5qY84lNEML9IujaDaYROINJMs,989 +transformers/models/umt5/__pycache__/__init__.cpython-310.pyc,, +transformers/models/umt5/__pycache__/configuration_umt5.cpython-310.pyc,, +transformers/models/umt5/__pycache__/modeling_umt5.cpython-310.pyc,, +transformers/models/umt5/configuration_umt5.py,sha256=W60fZhT2upRLbNTauRSs1-K0HSB5aCV4m79TFSXO1VI,7749 +transformers/models/umt5/modeling_umt5.py,sha256=XGic29t6ON6iTWR5ztPLYyX0jwScuby6DX7JGC4aFLQ,90785 +transformers/models/unispeech/__init__.py,sha256=AXJMExDoYYI71OKNXhAt7lyqcFIvcLHEQ1Fsm171m5w,999 +transformers/models/unispeech/__pycache__/__init__.cpython-310.pyc,, +transformers/models/unispeech/__pycache__/configuration_unispeech.cpython-310.pyc,, +transformers/models/unispeech/__pycache__/modeling_unispeech.cpython-310.pyc,, +transformers/models/unispeech/__pycache__/modular_unispeech.cpython-310.pyc,, +transformers/models/unispeech/configuration_unispeech.py,sha256=THcTvZNOVsDyoVkiDZ5kKGu5hmNu0qM2lal-P-WPDos,17510 +transformers/models/unispeech/modeling_unispeech.py,sha256=y5TUhqtKcYf2NAsNStA7b7Xku74efU3xIprQQmrDOU4,64393 +transformers/models/unispeech/modular_unispeech.py,sha256=8Y20toq5hig6AbiivT3BJzBB6JbvC2_ntAMk2QzkLtA,18221 +transformers/models/unispeech_sat/__init__.py,sha256=P9lCzMg01s4Gj_Pb8t1l36MRAeoOcxUa4d7dbQSe9N4,1007 +transformers/models/unispeech_sat/__pycache__/__init__.cpython-310.pyc,, +transformers/models/unispeech_sat/__pycache__/configuration_unispeech_sat.cpython-310.pyc,, +transformers/models/unispeech_sat/__pycache__/modeling_unispeech_sat.cpython-310.pyc,, +transformers/models/unispeech_sat/__pycache__/modular_unispeech_sat.cpython-310.pyc,, +transformers/models/unispeech_sat/configuration_unispeech_sat.py,sha256=3CduPVTYRqVQWaZbD_oA3wsCl_7v95Fn4RuWpPi6VhQ,18855 +transformers/models/unispeech_sat/modeling_unispeech_sat.py,sha256=sCbv0wUE4_p3Pm0EZtTHpTAC4RCs1e2FZlTg1iaAuro,78466 +transformers/models/unispeech_sat/modular_unispeech_sat.py,sha256=9xX9MGAhCxGJ51X9PdhH8B_oAUWjJMqv0JnTHA4fFP0,18614 +transformers/models/univnet/__init__.py,sha256=hfHyxyKGEfd58p1fUSA3IxK2q6JkVatkGceVaoKuODk,1041 +transformers/models/univnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/univnet/__pycache__/configuration_univnet.cpython-310.pyc,, +transformers/models/univnet/__pycache__/feature_extraction_univnet.cpython-310.pyc,, +transformers/models/univnet/__pycache__/modeling_univnet.cpython-310.pyc,, +transformers/models/univnet/configuration_univnet.py,sha256=dwE48PdXxA4_3tbux06b7HAsdUK9c5-capcOdDeAr9c,6758 +transformers/models/univnet/feature_extraction_univnet.py,sha256=9iNfhNCBNRWaY7odFlzXzMLzauhSzgFGdr20sQ4xPWw,22880 +transformers/models/univnet/modeling_univnet.py,sha256=LTX4W5D4B8B9zK29ba8M5z5XGtqHBbj2n8n8OJP6fis,25779 +transformers/models/upernet/__init__.py,sha256=Wq3u7yXJul5PLmjalxKgx451sa_WuSXbEM45bZsRv3E,995 +transformers/models/upernet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/upernet/__pycache__/configuration_upernet.cpython-310.pyc,, +transformers/models/upernet/__pycache__/modeling_upernet.cpython-310.pyc,, +transformers/models/upernet/configuration_upernet.py,sha256=ih_8sDO-OzHJOCqUDhtiBOtRanCrLfhx3c6OBZryILI,6859 +transformers/models/upernet/modeling_upernet.py,sha256=vWF4TKPs6t8AxHBVOGsw2BaHRyXnithQ3RFs-iNRHUM,14577 +transformers/models/video_llava/__init__.py,sha256=bsLGp1WBBO_AvNVRxzOn5k7OYQIbX9SqFhESd24FImc,1093 +transformers/models/video_llava/__pycache__/__init__.cpython-310.pyc,, +transformers/models/video_llava/__pycache__/configuration_video_llava.cpython-310.pyc,, +transformers/models/video_llava/__pycache__/image_processing_video_llava.cpython-310.pyc,, +transformers/models/video_llava/__pycache__/modeling_video_llava.cpython-310.pyc,, +transformers/models/video_llava/__pycache__/processing_video_llava.cpython-310.pyc,, +transformers/models/video_llava/__pycache__/video_processing_video_llava.cpython-310.pyc,, +transformers/models/video_llava/configuration_video_llava.py,sha256=zetmD2cvfHzFEiU8PJn1qWWVPwMgT08YMR3q-DTPukk,6448 +transformers/models/video_llava/image_processing_video_llava.py,sha256=bCd_ql-CCvQDJ941KfJSISwfHigc6xm-YPvzDBjEQew,19073 +transformers/models/video_llava/modeling_video_llava.py,sha256=2hWQFmPcp16lrIWQOLwmsFMBQqGI_Q86-etGSlMVoI8,33607 +transformers/models/video_llava/processing_video_llava.py,sha256=vilw0o5p5ngAbLyQj3IgNGo0Up_80ND2HZVA-CWyP28,11947 +transformers/models/video_llava/video_processing_video_llava.py,sha256=UQK5S3qUDL1BIIYhqN41Y-iIs7dQL2DNMqZZ_gLBVro,1879 +transformers/models/videomae/__init__.py,sha256=IYw3qXj1-PDmBAp---YaZyqdBsIjdMZQI37xT_-9SgY,1089 +transformers/models/videomae/__pycache__/__init__.cpython-310.pyc,, +transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc,, +transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc,, +transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc,, +transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc,, +transformers/models/videomae/configuration_videomae.py,sha256=O0BwqYZnc9Q5Kpemmel6rOxeDBSj7KKCxgpHfMVCVGE,6600 +transformers/models/videomae/feature_extraction_videomae.py,sha256=YfjgYL2im5-5OtnL_U9Z72Fxm58jNAIQWkUlszLJEtY,1316 +transformers/models/videomae/image_processing_videomae.py,sha256=UhuQHRAS0I_Y4j5pzcAvehONMuDHpEwGPCUjXGhIESA,16771 +transformers/models/videomae/modeling_videomae.py,sha256=NnQdvZzmaOU3iGWSh-GLjjbcc7Qj81yJWEv4hgSaGlA,43989 +transformers/models/vilt/__init__.py,sha256=efaZSTGsk3QhZmBrc6F29q55LkC_1Vb8fNC0MY4881Q,1154 +transformers/models/vilt/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc,, +transformers/models/vilt/__pycache__/feature_extraction_vilt.cpython-310.pyc,, +transformers/models/vilt/__pycache__/image_processing_vilt.cpython-310.pyc,, +transformers/models/vilt/__pycache__/image_processing_vilt_fast.cpython-310.pyc,, +transformers/models/vilt/__pycache__/modeling_vilt.cpython-310.pyc,, +transformers/models/vilt/__pycache__/processing_vilt.cpython-310.pyc,, +transformers/models/vilt/configuration_vilt.py,sha256=B7lnWQV7QC5CeliGPQF5TP5Ci-s35bv7_LX4UvOVNUs,6817 +transformers/models/vilt/feature_extraction_vilt.py,sha256=OYz67RYXTxX9oQpJ9b-lSzCduexmgugUpkiPHSfcs9s,1284 +transformers/models/vilt/image_processing_vilt.py,sha256=dwTZIYmMY8rt1q6ey1rdkvwJjMj-YqzIBzxSDknIDYE,23284 +transformers/models/vilt/image_processing_vilt_fast.py,sha256=-U7Sua6PmfA56qq3L39lOyCTQUYTdvklAfOcvGco9Ow,10004 +transformers/models/vilt/modeling_vilt.py,sha256=7vBaSrDs6454Gvvpw51VDOI3IT_s9jeGZA6zIpOKRLM,57618 +transformers/models/vilt/processing_vilt.py,sha256=3PESD2fBTBB34_Lx9BJbSyv9UASsLXG1MfuHEHoe8fU,6103 +transformers/models/vipllava/__init__.py,sha256=HJ5mZUNdt_bmaC9l-GycD7mVT2r1oN15prmnlBtz6oA,997 +transformers/models/vipllava/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vipllava/__pycache__/configuration_vipllava.cpython-310.pyc,, +transformers/models/vipllava/__pycache__/modeling_vipllava.cpython-310.pyc,, +transformers/models/vipllava/__pycache__/modular_vipllava.cpython-310.pyc,, +transformers/models/vipllava/configuration_vipllava.py,sha256=BUvvdV2CgiEp5h-pWbuEUi7t8bnkl6haqljNtf3en6c,5049 +transformers/models/vipllava/modeling_vipllava.py,sha256=nvxmBmtgfw3AOrdiZKnvs5CNEmCkkMEllcknCy0fQz8,20680 +transformers/models/vipllava/modular_vipllava.py,sha256=sMyZjncsPSdQ9-sNNdSEWGs0XD_ITXoTnNXn-67sF0I,12305 +transformers/models/vision_encoder_decoder/__init__.py,sha256=xK5xKVeIOZSN1d9Y2nDa3NYkLdGidbwgQ6Es8JhzKzA,1135 +transformers/models/vision_encoder_decoder/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vision_encoder_decoder/__pycache__/configuration_vision_encoder_decoder.cpython-310.pyc,, +transformers/models/vision_encoder_decoder/__pycache__/modeling_flax_vision_encoder_decoder.cpython-310.pyc,, +transformers/models/vision_encoder_decoder/__pycache__/modeling_tf_vision_encoder_decoder.cpython-310.pyc,, +transformers/models/vision_encoder_decoder/__pycache__/modeling_vision_encoder_decoder.cpython-310.pyc,, +transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py,sha256=6uLyTC16qnr7xboS9yTb1C6OvVWu2snFata6p85Crcs,8475 +transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py,sha256=34_EAdUqNMIieXc8H4Phm_gwFyGPcDZUyjPpTwI2le0,41615 +transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py,sha256=qRGCkG5ctzU7KRxbUBRwkplcQAutV781TDsBgfsBLP8,36173 +transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py,sha256=Sq2J0O4G_qAlYE-IebFV_4E0YJjfKMNyYN1XRcnUsBE,29422 +transformers/models/vision_text_dual_encoder/__init__.py,sha256=LRXs5oXk4_8AaHuIVaj1IgBO4X1vwP-ehQC1T1xEiAI,1198 +transformers/models/vision_text_dual_encoder/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vision_text_dual_encoder/__pycache__/configuration_vision_text_dual_encoder.cpython-310.pyc,, +transformers/models/vision_text_dual_encoder/__pycache__/modeling_flax_vision_text_dual_encoder.cpython-310.pyc,, +transformers/models/vision_text_dual_encoder/__pycache__/modeling_tf_vision_text_dual_encoder.cpython-310.pyc,, +transformers/models/vision_text_dual_encoder/__pycache__/modeling_vision_text_dual_encoder.cpython-310.pyc,, +transformers/models/vision_text_dual_encoder/__pycache__/processing_vision_text_dual_encoder.cpython-310.pyc,, +transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py,sha256=Zqb3nGZWG-J3a2FPUY4ocbDYWiLVeZOiFua-MXTDUfQ,5023 +transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py,sha256=1Op0R17qW21kpD6WJ6iQDBajVd5xGW4aPHE9_yR5GGw,26398 +transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py,sha256=NnCOaAYtjQ4yUv90Am7cHnxKU9VgDDnXGQqnJSsgEs4,28626 +transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py,sha256=el2H0m-8HOaVL4ev5aOxhnVO9W-uCiw8-21aLchm25A,18131 +transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py,sha256=9xOD08qJx9s5VqAHikDJAXvo0WgfUv8s_4YTW477wPA,7686 +transformers/models/visual_bert/__init__.py,sha256=zZFHfkE7OUMZUwYvB7v4ZIBXVUW9Mboqoa1QdTQURWM,1003 +transformers/models/visual_bert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/visual_bert/__pycache__/configuration_visual_bert.cpython-310.pyc,, +transformers/models/visual_bert/__pycache__/modeling_visual_bert.cpython-310.pyc,, +transformers/models/visual_bert/configuration_visual_bert.py,sha256=4U17YnlSjbOpzsAPdGH_EfvBjv7jppbHWlmLBrchGM4,6767 +transformers/models/visual_bert/modeling_visual_bert.py,sha256=NlRjZauVj-ky_Zk5mFPmsmA-VbO1gyNnKU-mjACIr_o,70241 +transformers/models/vit/__init__.py,sha256=uTQRjeWgJLHyXfc7yVOEyv7wnr42Jhy-8p9k5UUbxAM,1186 +transformers/models/vit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vit/__pycache__/configuration_vit.cpython-310.pyc,, +transformers/models/vit/__pycache__/feature_extraction_vit.cpython-310.pyc,, +transformers/models/vit/__pycache__/image_processing_vit.cpython-310.pyc,, +transformers/models/vit/__pycache__/image_processing_vit_fast.cpython-310.pyc,, +transformers/models/vit/__pycache__/modeling_flax_vit.cpython-310.pyc,, +transformers/models/vit/__pycache__/modeling_tf_vit.cpython-310.pyc,, +transformers/models/vit/__pycache__/modeling_vit.cpython-310.pyc,, +transformers/models/vit/configuration_vit.py,sha256=qzjqndsRc6Pyd8YiTFvAbe-OIIJyRSPOdUFKVSJB2Fg,6290 +transformers/models/vit/feature_extraction_vit.py,sha256=v5PPSon24ldH0wC-42BQTxGakc-ow2aUh-Egq5D9hJw,1276 +transformers/models/vit/image_processing_vit.py,sha256=MOU1FkkDPH2tVUOENE26fv5sVhWqxDc6n7VaeqYp05o,14429 +transformers/models/vit/image_processing_vit_fast.py,sha256=yrkXCSNPpRXfBiQhsgLao-dFOALdBrWa4dDOwZvGiwQ,1237 +transformers/models/vit/modeling_flax_vit.py,sha256=95SBab3CAIoD3bwgWfN6Y7v44fR6EHNJVDuPqi-FOX8,25503 +transformers/models/vit/modeling_tf_vit.py,sha256=r__S8GF7AS7xbOpnVp_JOdE4ByH1H1WBnv1CZdGLTmE,37321 +transformers/models/vit/modeling_vit.py,sha256=DNU7QU-xVs4llO5O5c-J7bnyhXkIXt0wRSFErPOBwLg,34087 +transformers/models/vit_mae/__init__.py,sha256=C8NcxWwzXlNMeMOA9DNHfDYvRF9biIuUduuwhoaTTD8,1034 +transformers/models/vit_mae/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vit_mae/__pycache__/configuration_vit_mae.cpython-310.pyc,, +transformers/models/vit_mae/__pycache__/modeling_tf_vit_mae.cpython-310.pyc,, +transformers/models/vit_mae/__pycache__/modeling_vit_mae.cpython-310.pyc,, +transformers/models/vit_mae/configuration_vit_mae.py,sha256=3nnWDAbp6WLfOHLO3taJUNEuGRlk3oAa0qaLEEJgjHQ,6372 +transformers/models/vit_mae/modeling_tf_vit_mae.py,sha256=GKXpysqOcon8KW-3KRQoG9mvLBb5b8mIk2Ix6VQyYhE,58009 +transformers/models/vit_mae/modeling_vit_mae.py,sha256=gyF7S1Bc3faltcMNizjbpG81Ffu4AevCmQnJMmLf7hY,45360 +transformers/models/vit_msn/__init__.py,sha256=Y1g56VRSNr-PxS-g4Cp2IlRR5M9CiaFGlhAQXwszGHo,995 +transformers/models/vit_msn/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vit_msn/__pycache__/configuration_vit_msn.cpython-310.pyc,, +transformers/models/vit_msn/__pycache__/modeling_vit_msn.cpython-310.pyc,, +transformers/models/vit_msn/configuration_vit_msn.py,sha256=HeU0UloranISU9zLiPsK0CyooMacqogTNmwE4xp2N-o,4864 +transformers/models/vit_msn/modeling_vit_msn.py,sha256=Ub7YAmaDjAZ0ZJ5NwIdq13kdWl5AZAqD6V-zQG0Eo88,28833 +transformers/models/vitdet/__init__.py,sha256=13LNGZwvKK3tBrQWVs43rQbxbgqvxLfnM0uMqomHqhM,993 +transformers/models/vitdet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vitdet/__pycache__/configuration_vitdet.cpython-310.pyc,, +transformers/models/vitdet/__pycache__/modeling_vitdet.cpython-310.pyc,, +transformers/models/vitdet/configuration_vitdet.py,sha256=CM18kVFmgjDEp7leQPG0L60VKNmBebmxYvEGZN4Kvlg,7541 +transformers/models/vitdet/modeling_vitdet.py,sha256=tGJl5_5BJ1P8NkMkDGicQ5MDoHWUdHjDR2UfBiLmWvQ,32395 +transformers/models/vitmatte/__init__.py,sha256=al6dWrth9LhRLjmVZrxSi0SRcMMUH_UNpMmR5nwflSc,1092 +transformers/models/vitmatte/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vitmatte/__pycache__/configuration_vitmatte.cpython-310.pyc,, +transformers/models/vitmatte/__pycache__/image_processing_vitmatte.cpython-310.pyc,, +transformers/models/vitmatte/__pycache__/image_processing_vitmatte_fast.cpython-310.pyc,, +transformers/models/vitmatte/__pycache__/modeling_vitmatte.cpython-310.pyc,, +transformers/models/vitmatte/configuration_vitmatte.py,sha256=4o0WTCZLL3RcipC5atxgQU6zO9x8hgrWOs83olgQM2Q,6460 +transformers/models/vitmatte/image_processing_vitmatte.py,sha256=dXRNvL7ix2bEencYaq7-7S2kXlmNtViHYw95CJJaU68,13554 +transformers/models/vitmatte/image_processing_vitmatte_fast.py,sha256=RHVAF8eM5_8tysejotdcWG6p6lYPOkzMdK9B2l3iayg,6765 +transformers/models/vitmatte/modeling_vitmatte.py,sha256=Xw_A4MvjsM34PDy5XxkZ26mbYm7WP8wiYhbVc4O6k0Y,10618 +transformers/models/vitpose/__init__.py,sha256=VA7aRcVMgFJH46i6HurkXJS0Z38BotU3H3o3e2wgyXU,1039 +transformers/models/vitpose/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vitpose/__pycache__/configuration_vitpose.cpython-310.pyc,, +transformers/models/vitpose/__pycache__/image_processing_vitpose.cpython-310.pyc,, +transformers/models/vitpose/__pycache__/modeling_vitpose.cpython-310.pyc,, +transformers/models/vitpose/configuration_vitpose.py,sha256=Ij3xe1cFLDOuIJYUTh-SP2UPKSf9pHwXlLwKRDtcqdc,6015 +transformers/models/vitpose/image_processing_vitpose.py,sha256=3pF3NhIoID8Yw3SDOAdVZBJPUJtxabpAgkwrCArTg34,29590 +transformers/models/vitpose/modeling_vitpose.py,sha256=QfYBW5P3X7R2Hu6f1N2l28RVlCGvap-buWQsR0MIZdY,12415 +transformers/models/vitpose_backbone/__init__.py,sha256=W5IjP47Ykg5KRs8S9ztAbtfQ__n6sbJUZG4UDIGdGmA,577 +transformers/models/vitpose_backbone/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vitpose_backbone/__pycache__/configuration_vitpose_backbone.cpython-310.pyc,, +transformers/models/vitpose_backbone/__pycache__/modeling_vitpose_backbone.cpython-310.pyc,, +transformers/models/vitpose_backbone/configuration_vitpose_backbone.py,sha256=k17SrNK_1I7cE43C3Cu2ZU5V5VnWQA7RsmOSSzXCEME,6651 +transformers/models/vitpose_backbone/modeling_vitpose_backbone.py,sha256=1NgKrWJUnTweT21J-7tEDGuOUtRU4kCpNCnLNdhnVCQ,21942 +transformers/models/vits/__init__.py,sha256=7baZcqGvFlYQxAl721XtMptMZKkzvBOa2ttyOhqhUtk,1026 +transformers/models/vits/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vits/__pycache__/configuration_vits.cpython-310.pyc,, +transformers/models/vits/__pycache__/modeling_vits.cpython-310.pyc,, +transformers/models/vits/__pycache__/tokenization_vits.cpython-310.pyc,, +transformers/models/vits/configuration_vits.py,sha256=OT42q2ihf2Q9r9qm7JJM4gJlOqQSZyVH8Jk3Qsbcji0,13892 +transformers/models/vits/modeling_vits.py,sha256=qNk_ZWLxxwlvoKaEx-HkktFvp1wygyM0TQILF1Sjcsk,61965 +transformers/models/vits/tokenization_vits.py,sha256=hMWf72PabgSlH-UJjN4-ddrNQGb8n5e7d5mSXuGTK9U,9369 +transformers/models/vivit/__init__.py,sha256=LT2FipIBdB69s9UY4viyuB5q2e0v3bCwtQMiOEOj2xg,1033 +transformers/models/vivit/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vivit/__pycache__/configuration_vivit.cpython-310.pyc,, +transformers/models/vivit/__pycache__/image_processing_vivit.cpython-310.pyc,, +transformers/models/vivit/__pycache__/modeling_vivit.cpython-310.pyc,, +transformers/models/vivit/configuration_vivit.py,sha256=TVsjmzoXac2Xh0zcHS8fy0RmFivbol3WsO7kj-gKZik,5142 +transformers/models/vivit/image_processing_vivit.py,sha256=ocG2i431U5hruiWC2kR4z6qnGWyh4L98jnqB9fSSiC0,19245 +transformers/models/vivit/modeling_vivit.py,sha256=NaLIJWUTV_SHkndBpoYd8Imvh4lWvM1oWbVtSqxRvsI,31923 +transformers/models/vjepa2/__init__.py,sha256=uG8tvHYoCxXAMjQuCfsT56YCg0l8_2e6H5Nm7L7Ygm0,1056 +transformers/models/vjepa2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/vjepa2/__pycache__/configuration_vjepa2.cpython-310.pyc,, +transformers/models/vjepa2/__pycache__/modeling_vjepa2.cpython-310.pyc,, +transformers/models/vjepa2/__pycache__/video_processing_vjepa2.cpython-310.pyc,, +transformers/models/vjepa2/configuration_vjepa2.py,sha256=mpPeIpTffMjlfOJ5m0mM_wj16znJAKUabh3S1EGvcoM,7055 +transformers/models/vjepa2/modeling_vjepa2.py,sha256=N-GhFhP81EjnSSCIpvj1NNI6mWm5-5sf192YZWZyrC8,50076 +transformers/models/vjepa2/video_processing_vjepa2.py,sha256=nnb_IGGov_QaZRveFkQUI6aEpZs8d0YsJehp6JSb9GQ,2120 +transformers/models/voxtral/__init__.py,sha256=hARPigCbvhSlR0iKCDkHcUnsXtoCgXnLFSgZnw5HQus,1053 +transformers/models/voxtral/__pycache__/__init__.cpython-310.pyc,, +transformers/models/voxtral/__pycache__/configuration_voxtral.cpython-310.pyc,, +transformers/models/voxtral/__pycache__/modeling_voxtral.cpython-310.pyc,, +transformers/models/voxtral/__pycache__/modular_voxtral.cpython-310.pyc,, +transformers/models/voxtral/__pycache__/processing_voxtral.cpython-310.pyc,, +transformers/models/voxtral/configuration_voxtral.py,sha256=y_8_udDNsK5Zsy6JAkbIJ7QhMYCDLwHNwp1QhQcYmDs,8474 +transformers/models/voxtral/modeling_voxtral.py,sha256=P4YKjtgg6oymssewpMO4kdHBUKALKIP8dulV0OE8uik,23099 +transformers/models/voxtral/modular_voxtral.py,sha256=JFPELk4Ljvb-WTmo7_CrR3ldUG8hjqnuwRAFYPmVgmg,11613 +transformers/models/voxtral/processing_voxtral.py,sha256=3iwsSFSFicS7smrnECfynOGwtwPy5aJ3ydYpnA-PkVE,20996 +transformers/models/wav2vec2/__init__.py,sha256=5nXyY4dA0h9iNUQZrGAUXtjOnU6KbVq2B1gRzEGEUNI,1206 +transformers/models/wav2vec2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/wav2vec2/__pycache__/configuration_wav2vec2.cpython-310.pyc,, +transformers/models/wav2vec2/__pycache__/feature_extraction_wav2vec2.cpython-310.pyc,, +transformers/models/wav2vec2/__pycache__/modeling_flax_wav2vec2.cpython-310.pyc,, +transformers/models/wav2vec2/__pycache__/modeling_tf_wav2vec2.cpython-310.pyc,, +transformers/models/wav2vec2/__pycache__/modeling_wav2vec2.cpython-310.pyc,, +transformers/models/wav2vec2/__pycache__/processing_wav2vec2.cpython-310.pyc,, +transformers/models/wav2vec2/__pycache__/tokenization_wav2vec2.cpython-310.pyc,, +transformers/models/wav2vec2/configuration_wav2vec2.py,sha256=A4XGuSVpZUfWvd-ZJPkcGhISsBSNtSgUCNU2gN0mZos,20100 +transformers/models/wav2vec2/feature_extraction_wav2vec2.py,sha256=OTlRDKVJjkM2J93nN-PRW8xWetFO6Q7TsoRHLvew2pA,11609 +transformers/models/wav2vec2/modeling_flax_wav2vec2.py,sha256=es7RwXr70qYl5QmOlsGusK1OL55tU9_PD5VmLuCeF4E,57379 +transformers/models/wav2vec2/modeling_tf_wav2vec2.py,sha256=CLzi1cUuo9m0Dbi9ydK4sS7E4z39ZhVI7tFUOIsxMJM,78572 +transformers/models/wav2vec2/modeling_wav2vec2.py,sha256=tHxfSOF_JSsjb0TLHPgzTtrL8y9Ooq4SwUWjLCKuEX4,101124 +transformers/models/wav2vec2/processing_wav2vec2.py,sha256=sIOnbKf0GrT_1cE04Yvyb6PoIBcATsO40mAP7jTeAmc,8540 +transformers/models/wav2vec2/tokenization_wav2vec2.py,sha256=xaEOdRxousCdBFo4_snzcolYYFEype-apHZsIhhWxLc,38782 +transformers/models/wav2vec2_bert/__init__.py,sha256=DL010VL3ZV3lAugPH-BOTNSgIedotOEaoy8iHo0sC1Q,1051 +transformers/models/wav2vec2_bert/__pycache__/__init__.cpython-310.pyc,, +transformers/models/wav2vec2_bert/__pycache__/configuration_wav2vec2_bert.cpython-310.pyc,, +transformers/models/wav2vec2_bert/__pycache__/modeling_wav2vec2_bert.cpython-310.pyc,, +transformers/models/wav2vec2_bert/__pycache__/modular_wav2vec2_bert.cpython-310.pyc,, +transformers/models/wav2vec2_bert/__pycache__/processing_wav2vec2_bert.cpython-310.pyc,, +transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py,sha256=XpsUE-kvk4hTixrK5sbClT8A5j46A3XwzBcwWDs5E7g,18142 +transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py,sha256=Emceflb16OQkPzj9qOSLHuvFvY_iK4RcPOE5HZDAk04,66112 +transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py,sha256=etlcrbz0EEviMqc-9J28SWmRYsZy00Kj5tkUAwye1hM,44972 +transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py,sha256=lmagmiwXlzoqzI57JADvxRdy5AnGq948-Z1aJddTxx0,7876 +transformers/models/wav2vec2_conformer/__init__.py,sha256=JBpapW8VF3yck4Bk29xKyUiQZqB_CXLSYtYxXGXAu2Q,1017 +transformers/models/wav2vec2_conformer/__pycache__/__init__.cpython-310.pyc,, +transformers/models/wav2vec2_conformer/__pycache__/configuration_wav2vec2_conformer.cpython-310.pyc,, +transformers/models/wav2vec2_conformer/__pycache__/modeling_wav2vec2_conformer.cpython-310.pyc,, +transformers/models/wav2vec2_conformer/__pycache__/modular_wav2vec2_conformer.cpython-310.pyc,, +transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py,sha256=gBeb_cZC5XCDOmg1llPUQ0ELDS-1u0_eGZRrA98tLxM,20938 +transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py,sha256=tLwQenpfgNlNN_ZfjrN3vjfj2euRQodkEREBTllxL2w,85467 +transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py,sha256=J15j9EItXMqJDeMDcMe3-PEWmSYYyrlBgOgVPiqBA20,30777 +transformers/models/wav2vec2_phoneme/__init__.py,sha256=LV4FKcFYNt0GuJvfsUOwTYVFRVfuzUuclKRybFyN9lk,967 +transformers/models/wav2vec2_phoneme/__pycache__/__init__.cpython-310.pyc,, +transformers/models/wav2vec2_phoneme/__pycache__/tokenization_wav2vec2_phoneme.cpython-310.pyc,, +transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py,sha256=DdFdFOAOhnhy5Iq6U-eno8m-bgLbGC-2xiHauNhszd4,23217 +transformers/models/wav2vec2_with_lm/__init__.py,sha256=yZKHsma85j7AMLB8g8uNXL5D_E5Gc3Vqe-D-V2W15oY,965 +transformers/models/wav2vec2_with_lm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/wav2vec2_with_lm/__pycache__/processing_wav2vec2_with_lm.cpython-310.pyc,, +transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py,sha256=cxCksE0dl-HONpCWGgzV_gfr-SHiDMhuZaoPB-LTl5Q,30031 +transformers/models/wavlm/__init__.py,sha256=wYnYuOpw2e95lauqDbD7u3OC-Pez8yoRsrgExSh_WJQ,991 +transformers/models/wavlm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/wavlm/__pycache__/configuration_wavlm.cpython-310.pyc,, +transformers/models/wavlm/__pycache__/modeling_wavlm.cpython-310.pyc,, +transformers/models/wavlm/__pycache__/modular_wavlm.cpython-310.pyc,, +transformers/models/wavlm/configuration_wavlm.py,sha256=HrK0dtsxcVB-k3yO2px6RS-TW1lonvx_x8uRkz7iJqQ,18588 +transformers/models/wavlm/modeling_wavlm.py,sha256=O91Gzm72L0nSRG-Q-JiCO0AsLTEJosHza5BsEHh504Q,72556 +transformers/models/wavlm/modular_wavlm.py,sha256=4sgAWhmP5YPHzYuK-T3gjrh-Tu4VjOutEVAf8k4nDgQ,23178 +transformers/models/whisper/__init__.py,sha256=qT70wGFDyOsAGuyaHe9if7kn8fxK2shCe6rovr3onw4,1244 +transformers/models/whisper/__pycache__/__init__.cpython-310.pyc,, +transformers/models/whisper/__pycache__/configuration_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/english_normalizer.cpython-310.pyc,, +transformers/models/whisper/__pycache__/feature_extraction_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/generation_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/modeling_flax_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/modeling_tf_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/modeling_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/processing_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/tokenization_whisper.cpython-310.pyc,, +transformers/models/whisper/__pycache__/tokenization_whisper_fast.cpython-310.pyc,, +transformers/models/whisper/configuration_whisper.py,sha256=CZgcgXbDdSSxvAwvJ0BmsVuhJLFOVgeGKnKn83yz978,17102 +transformers/models/whisper/english_normalizer.py,sha256=GmqBtyvGnsz2HXoksWAVu2wxJJJUclq-CSdH40jP51g,22857 +transformers/models/whisper/feature_extraction_whisper.py,sha256=43vzBLDCjU8a02N8-muLTY2V4Be7Q5D0bSP0kdU334g,16171 +transformers/models/whisper/generation_whisper.py,sha256=jAWD5OSjG-Zg9HKlY7ZwirRXx3Cx1-wdxlddJSwj4LU,110510 +transformers/models/whisper/modeling_flax_whisper.py,sha256=Sdt-z5ZQXvD6I501rsCdPHmX5UuVtz1nWCmpt8ub5YY,74027 +transformers/models/whisper/modeling_tf_whisper.py,sha256=OGFAOCKCYScoYDvYlokYFX7ze5Ay_061y3AE-xhlL1E,84718 +transformers/models/whisper/modeling_whisper.py,sha256=piMdPe3z_Xw8vzFYE-GHiLY5yP7nQzNklUW86WKfm7s,74291 +transformers/models/whisper/processing_whisper.py,sha256=S1BSShkgI4Bq3K3JbcQHp9xxIaiLU_rsN3pz6-jj1w4,3949 +transformers/models/whisper/tokenization_whisper.py,sha256=JxHC6ryKd8kjOLXOIjKf37jdEf48Y1McdrWcmotRLHc,58409 +transformers/models/whisper/tokenization_whisper_fast.py,sha256=Q6HdLOFUejR-aEIJZo0SkCkIQCtFL7eSSyvSEJWbaJ0,30284 +transformers/models/x_clip/__init__.py,sha256=ufjh6w7SNuNAUjAHp_MK3yRcrHm22-SfhZ0ZfbiXhGw,1030 +transformers/models/x_clip/__pycache__/__init__.cpython-310.pyc,, +transformers/models/x_clip/__pycache__/configuration_x_clip.cpython-310.pyc,, +transformers/models/x_clip/__pycache__/modeling_x_clip.cpython-310.pyc,, +transformers/models/x_clip/__pycache__/processing_x_clip.cpython-310.pyc,, +transformers/models/x_clip/configuration_x_clip.py,sha256=s3aGXvewhpL5nrY57j9KihuGZYZY62xlJXCmtJ1AYjg,18244 +transformers/models/x_clip/modeling_x_clip.py,sha256=5GnhitjlPp723wj5RuJYzHcOed82z0Uc_eSWDA5ejFs,65591 +transformers/models/x_clip/processing_x_clip.py,sha256=xbOsNr8HnywiLtCjluts7B74jDj5b80hN6D1IRF4lLg,6927 +transformers/models/xglm/__init__.py,sha256=ZU7tQBmBXzr8wh9MJNDZ5uIrsCRQP8tuNrpGDd2W3OI,1142 +transformers/models/xglm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/xglm/__pycache__/configuration_xglm.cpython-310.pyc,, +transformers/models/xglm/__pycache__/modeling_flax_xglm.cpython-310.pyc,, +transformers/models/xglm/__pycache__/modeling_tf_xglm.cpython-310.pyc,, +transformers/models/xglm/__pycache__/modeling_xglm.cpython-310.pyc,, +transformers/models/xglm/__pycache__/tokenization_xglm.cpython-310.pyc,, +transformers/models/xglm/__pycache__/tokenization_xglm_fast.cpython-310.pyc,, +transformers/models/xglm/configuration_xglm.py,sha256=m0sfPYf0qKl0pT9sOd3ssoQv13yt5IWRnUn11aGDa1Q,5881 +transformers/models/xglm/modeling_flax_xglm.py,sha256=BLW965ik0iRDR9fGbbw82SyuDxNs__9qKdQxZLPq7XI,33217 +transformers/models/xglm/modeling_tf_xglm.py,sha256=CHMd92nY-UsHA-lPDKOKdrPXHKUFWh3_7CDEGvmzy3E,45026 +transformers/models/xglm/modeling_xglm.py,sha256=4xCRowEJVCc86_dqfrw7rDrFVg5ob3v4-uYMgbdnPGY,32407 +transformers/models/xglm/tokenization_xglm.py,sha256=lzdJEYP3S8w-HLa5nX7BAllqXQWnmr49kQsSYl3Cxe4,12576 +transformers/models/xglm/tokenization_xglm_fast.py,sha256=4G278mqxvBG0onsXTichXPRki94OiZJZ3Ioy4t6TfKQ,7470 +transformers/models/xlm/__init__.py,sha256=QevE83gMJ5h41H7EKxRAUN-kmE0zgOsyGj6QzWcpjmk,1058 +transformers/models/xlm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/xlm/__pycache__/configuration_xlm.cpython-310.pyc,, +transformers/models/xlm/__pycache__/modeling_tf_xlm.cpython-310.pyc,, +transformers/models/xlm/__pycache__/modeling_xlm.cpython-310.pyc,, +transformers/models/xlm/__pycache__/tokenization_xlm.cpython-310.pyc,, +transformers/models/xlm/configuration_xlm.py,sha256=k216zyLI3r20HXsFfesj0QQiF-4oCxjUM3ONCxJZtzY,11062 +transformers/models/xlm/modeling_tf_xlm.py,sha256=kEPNqT6vET9FaGDUoo8-wfmCkmFIMnfU3XVPUyAVN8E,56527 +transformers/models/xlm/modeling_xlm.py,sha256=m3uFEYVq57YMEjSJWJ1TgIBnXhpRSwb9BBduK3w6sGY,76761 +transformers/models/xlm/tokenization_xlm.py,sha256=on-cVBeHILqyhqK5xOY9PP49TIB2HNwItq3Y_9uOtCI,23347 +transformers/models/xlm_roberta/__init__.py,sha256=dhjej7PBi8UrfXRkTxh9CWXnw8wuLZHPT9FYFfCkIHg,1184 +transformers/models/xlm_roberta/__pycache__/__init__.cpython-310.pyc,, +transformers/models/xlm_roberta/__pycache__/configuration_xlm_roberta.cpython-310.pyc,, +transformers/models/xlm_roberta/__pycache__/modeling_flax_xlm_roberta.cpython-310.pyc,, +transformers/models/xlm_roberta/__pycache__/modeling_tf_xlm_roberta.cpython-310.pyc,, +transformers/models/xlm_roberta/__pycache__/modeling_xlm_roberta.cpython-310.pyc,, +transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta.cpython-310.pyc,, +transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta_fast.cpython-310.pyc,, +transformers/models/xlm_roberta/configuration_xlm_roberta.py,sha256=A_vz0mpN0KhV6dbuOO8FR5qFCXXYeSASknBS_kVLXPM,7596 +transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py,sha256=ndYuXyoj_I5xVW2TWB5SrXt_D3hlqShvbOY-tW0eOkM,58777 +transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py,sha256=pFUXeNncEmL8gNGKg3Mwkh6B-db2yKXgnf5ZvrlN-Go,81896 +transformers/models/xlm_roberta/modeling_xlm_roberta.py,sha256=8ft3dysl2yW_drfRn04825iLmhaA6QzPrTzDHLBRs4w,72085 +transformers/models/xlm_roberta/tokenization_xlm_roberta.py,sha256=GYqlEjrwdzxZ1xdDuCpJRAKEZAIPuBdGWetCD7eXpzw,12804 +transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py,sha256=ojIIHjd2wELfeiFylqMYN0iYMeEaAtimZFOHVAFYkkM,7808 +transformers/models/xlm_roberta_xl/__init__.py,sha256=V0fXTKk2hQmf5dKogCJ0HSiRBxVX-rs7c414ZoZIh28,1009 +transformers/models/xlm_roberta_xl/__pycache__/__init__.cpython-310.pyc,, +transformers/models/xlm_roberta_xl/__pycache__/configuration_xlm_roberta_xl.cpython-310.pyc,, +transformers/models/xlm_roberta_xl/__pycache__/modeling_xlm_roberta_xl.cpython-310.pyc,, +transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py,sha256=f8cw938xnzVrNMZA9C6A0wIQm_mmtUr6EMQAgamN9Sw,7348 +transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py,sha256=iRagZtRTuoOI9NNQZEeSv1f6AprBcLtbASaP2NbNkfs,67169 +transformers/models/xlnet/__init__.py,sha256=t-UvrFyorGF7VMuATzjrB_cUqKsM-8O9KqxiWjtJqhs,1109 +transformers/models/xlnet/__pycache__/__init__.cpython-310.pyc,, +transformers/models/xlnet/__pycache__/configuration_xlnet.cpython-310.pyc,, +transformers/models/xlnet/__pycache__/modeling_tf_xlnet.cpython-310.pyc,, +transformers/models/xlnet/__pycache__/modeling_xlnet.cpython-310.pyc,, +transformers/models/xlnet/__pycache__/tokenization_xlnet.cpython-310.pyc,, +transformers/models/xlnet/__pycache__/tokenization_xlnet_fast.cpython-310.pyc,, +transformers/models/xlnet/configuration_xlnet.py,sha256=U_WpCoqALv86cbvTXgTVnJwOfl3nzcGTgZJd_9SDhvY,10953 +transformers/models/xlnet/modeling_tf_xlnet.py,sha256=FjkGozmhkH6qIPhQO15VCkDJMWOVj2QD7aCR3Nb12wQ,77744 +transformers/models/xlnet/modeling_xlnet.py,sha256=rzkZRV_HSH-iY3LgmwmRYkRriz8uP2mFid-euskYGKE,106723 +transformers/models/xlnet/tokenization_xlnet.py,sha256=8GVn73lPjtz6PljvlLuwvuNCElWfbLHfHtdYaI8XrS8,15800 +transformers/models/xlnet/tokenization_xlnet_fast.py,sha256=ZfWE9TWuq8NC7Q3Z2y-plAofp0o9PtQEu08U4M7Qx6s,9247 +transformers/models/xlstm/__init__.py,sha256=-Vfj7bUcDAD3TguoDgKW0zpzZ8KtOmnUNwSkvL6Df8k,1047 +transformers/models/xlstm/__pycache__/__init__.cpython-310.pyc,, +transformers/models/xlstm/__pycache__/configuration_xlstm.cpython-310.pyc,, +transformers/models/xlstm/__pycache__/modeling_xlstm.cpython-310.pyc,, +transformers/models/xlstm/configuration_xlstm.py,sha256=jXCuoIYX4cb4atgAMjgove47wmm6dkTStY4hAFC3wZI,12847 +transformers/models/xlstm/modeling_xlstm.py,sha256=NG4UCINzgoBLsBj2KdMMM0GR_N0MapEOauDw5iZOOOI,65988 +transformers/models/xmod/__init__.py,sha256=WLxIbzC8oCEkMrerWHTy7GLopz0mqocSaacdcyb_BhQ,989 +transformers/models/xmod/__pycache__/__init__.cpython-310.pyc,, +transformers/models/xmod/__pycache__/configuration_xmod.cpython-310.pyc,, +transformers/models/xmod/__pycache__/modeling_xmod.cpython-310.pyc,, +transformers/models/xmod/configuration_xmod.py,sha256=W5bQLbTh3EhMd-Lvseyl28uQhkVVCdPSdhcRXJO7hcg,9180 +transformers/models/xmod/modeling_xmod.py,sha256=Y5rUAnLEAZ6dXNmFVd1qAKfCVV_k4wrcPlBBbVSvBYs,68344 +transformers/models/yolos/__init__.py,sha256=UlbQDtMQJaGRcin-iz6NOEFWT8otanBndRuw4VrWUiQ,1124 +transformers/models/yolos/__pycache__/__init__.cpython-310.pyc,, +transformers/models/yolos/__pycache__/configuration_yolos.cpython-310.pyc,, +transformers/models/yolos/__pycache__/feature_extraction_yolos.cpython-310.pyc,, +transformers/models/yolos/__pycache__/image_processing_yolos.cpython-310.pyc,, +transformers/models/yolos/__pycache__/image_processing_yolos_fast.cpython-310.pyc,, +transformers/models/yolos/__pycache__/modeling_yolos.cpython-310.pyc,, +transformers/models/yolos/__pycache__/modular_yolos.cpython-310.pyc,, +transformers/models/yolos/configuration_yolos.py,sha256=3MosWcNOUgTJ1pTBkCQT852fsnIDHomISTyCShOKo2k,7627 +transformers/models/yolos/feature_extraction_yolos.py,sha256=5wVaZnDzK3ROFChjwHYMHGv1aPmtq1IOqmt100yImtE,1594 +transformers/models/yolos/image_processing_yolos.py,sha256=qZ0IAI-zbLNFt4bM91OmeYfury9GxFgaNtdt15cZyHE,67983 +transformers/models/yolos/image_processing_yolos_fast.py,sha256=_8B3QeqT-dqRw2P6WeRwaXEdILgJJEKlguCn1YOOnyE,37705 +transformers/models/yolos/modeling_yolos.py,sha256=NmL_h3eTD5n3BqF27HI9kpCQrd1PY6rHcjvh_PEz7gE,34433 +transformers/models/yolos/modular_yolos.py,sha256=ZTEyzZtRQI7b1zKqP6wkaxY4TyC9dFbYKb7PQ_0Mwp4,8269 +transformers/models/yoso/__init__.py,sha256=sCXsXYZuOQLFkZMexRb8qY7EJCftR54G_eO7qIUvdss,989 +transformers/models/yoso/__pycache__/__init__.cpython-310.pyc,, +transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc,, +transformers/models/yoso/__pycache__/modeling_yoso.cpython-310.pyc,, +transformers/models/yoso/configuration_yoso.py,sha256=6PQqt0OjHQBTNnnhDE761sdwlq9_tqG48UJ-pBV3rBM,6715 +transformers/models/yoso/modeling_yoso.py,sha256=aWfgWaTtQZvX-w9Of_mJs14gcy52D5JJHHOaig4NZVI,49753 +transformers/models/zamba/__init__.py,sha256=iqZnf8BQ49TLcB4mYwIfuJeF4aGvYhOBRiGI6_74ZFk,991 +transformers/models/zamba/__pycache__/__init__.cpython-310.pyc,, +transformers/models/zamba/__pycache__/configuration_zamba.cpython-310.pyc,, +transformers/models/zamba/__pycache__/modeling_zamba.cpython-310.pyc,, +transformers/models/zamba/configuration_zamba.py,sha256=0sHrNCBHaMWoTLegdBSl2WFQBQtyMj4qb_XNO5cUM64,11292 +transformers/models/zamba/modeling_zamba.py,sha256=5GbZQla7CiLWDWuygLyg3_CjgLgmiMnDjmQv7kzc2To,63227 +transformers/models/zamba2/__init__.py,sha256=3FgH8KelorllnKF6ncpKGREwZXt6YwsQ7NPS8W6jcmQ,993 +transformers/models/zamba2/__pycache__/__init__.cpython-310.pyc,, +transformers/models/zamba2/__pycache__/configuration_zamba2.cpython-310.pyc,, +transformers/models/zamba2/__pycache__/modeling_zamba2.cpython-310.pyc,, +transformers/models/zamba2/__pycache__/modular_zamba2.cpython-310.pyc,, +transformers/models/zamba2/configuration_zamba2.py,sha256=DNZAQNnBwPacTJy1yp7Dd8y9aW7HL3zFwa0xam1Fop8,12734 +transformers/models/zamba2/modeling_zamba2.py,sha256=xB5ejX15ATXEvMdz8U_8WlYBJngVxtsHkRfJKTybjXQ,85163 +transformers/models/zamba2/modular_zamba2.py,sha256=nBZyOmBzVzZV_H1xn2UjYFNYkCbIAgXhGoCoTVlPJt0,55813 +transformers/models/zoedepth/__init__.py,sha256=BUGUeWtpJJRRdQGT1dIOi-B5v89Ae8eTTxbEmVqiu0k,1092 +transformers/models/zoedepth/__pycache__/__init__.cpython-310.pyc,, +transformers/models/zoedepth/__pycache__/configuration_zoedepth.cpython-310.pyc,, +transformers/models/zoedepth/__pycache__/image_processing_zoedepth.cpython-310.pyc,, +transformers/models/zoedepth/__pycache__/image_processing_zoedepth_fast.cpython-310.pyc,, +transformers/models/zoedepth/__pycache__/modeling_zoedepth.cpython-310.pyc,, +transformers/models/zoedepth/configuration_zoedepth.py,sha256=gRQ2uRqmhfagYNHU3FkWJ4PPirYIe9Ve7GJ7ENW17mk,12972 +transformers/models/zoedepth/image_processing_zoedepth.py,sha256=jUzKeNx4VR-l9ps_VEshhfRC-ZB7pLHWw66ZRTSiX4s,28181 +transformers/models/zoedepth/image_processing_zoedepth_fast.py,sha256=FnYxAEzSeOylTTsxbYBeyxpjUvsHlNwrKD6y0l1CT3g,13761 +transformers/models/zoedepth/modeling_zoedepth.py,sha256=t2_5CO4MJnLMpPk1iq2ZDSVIVxVL1_2B0-4Tr6A1nhY,54475 +transformers/onnx/__init__.py,sha256=wALLY4TPOK2iPrFcfZf_WiEmTRAU6dAWHElxGdexr58,1548 +transformers/onnx/__main__.py,sha256=JZ9ZmeRsnDitwTMWb-dFT8W9AEmMoMKLQ3SvbyCkY0w,9497 +transformers/onnx/__pycache__/__init__.cpython-310.pyc,, +transformers/onnx/__pycache__/__main__.cpython-310.pyc,, +transformers/onnx/__pycache__/config.cpython-310.pyc,, +transformers/onnx/__pycache__/convert.cpython-310.pyc,, +transformers/onnx/__pycache__/features.cpython-310.pyc,, +transformers/onnx/__pycache__/utils.cpython-310.pyc,, +transformers/onnx/config.py,sha256=soohSCWqM_jnG7TIzCeZz8Ugfzv2W_tlEokFb8Z7sRM,32617 +transformers/onnx/convert.py,sha256=1Skizwf9hyB2CQtNNwjwtuJT9EshegWQNcvPdJp4SNg,19418 +transformers/onnx/features.py,sha256=zRhGiYgzMMfvdh7UvCO9j_y0L9cbVbTSL88cItk_PBg,28276 +transformers/onnx/utils.py,sha256=39Uw_GkFBsTb6ZvMIHRTnI289aQDhc6hwfEapaBGE-o,3625 +transformers/optimization.py,sha256=QbCP-ynCaRYmsiXtOJppTMjmFICBghSjXyFbXDkWe_s,39971 +transformers/optimization_tf.py,sha256=JYL8tVbLyB3puGJ0b2i1gGaidCHSxm8_jYmm7z-ZJ-4,16718 +transformers/pipelines/__init__.py,sha256=ItV1UJhF0zn0dun_BbsFEj4u7ssPQTWEW0nduT80mrE,83535 +transformers/pipelines/__pycache__/__init__.cpython-310.pyc,, +transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc,, +transformers/pipelines/__pycache__/automatic_speech_recognition.cpython-310.pyc,, +transformers/pipelines/__pycache__/base.cpython-310.pyc,, +transformers/pipelines/__pycache__/depth_estimation.cpython-310.pyc,, +transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc,, +transformers/pipelines/__pycache__/feature_extraction.cpython-310.pyc,, +transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc,, +transformers/pipelines/__pycache__/image_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/image_feature_extraction.cpython-310.pyc,, +transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc,, +transformers/pipelines/__pycache__/image_text_to_text.cpython-310.pyc,, +transformers/pipelines/__pycache__/image_to_image.cpython-310.pyc,, +transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc,, +transformers/pipelines/__pycache__/mask_generation.cpython-310.pyc,, +transformers/pipelines/__pycache__/object_detection.cpython-310.pyc,, +transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc,, +transformers/pipelines/__pycache__/question_answering.cpython-310.pyc,, +transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc,, +transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc,, +transformers/pipelines/__pycache__/text_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/text_generation.cpython-310.pyc,, +transformers/pipelines/__pycache__/text_to_audio.cpython-310.pyc,, +transformers/pipelines/__pycache__/token_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/video_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/visual_question_answering.cpython-310.pyc,, +transformers/pipelines/__pycache__/zero_shot_audio_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc,, +transformers/pipelines/__pycache__/zero_shot_object_detection.cpython-310.pyc,, +transformers/pipelines/audio_classification.py,sha256=ZDKCHhh7RE0OR_qD4Oq6jdY3qb5JpuUMJOioKudYyG8,11200 +transformers/pipelines/audio_utils.py,sha256=Zy6IbcbsXP4_mJkwlfvUG4VRX88bcKhpH1_ZaxJmrX4,12269 +transformers/pipelines/automatic_speech_recognition.py,sha256=nNgTWxSa_SlMon_34t-P8NvHuX7AxNg4h18MPJ6_0ag,34009 +transformers/pipelines/base.py,sha256=_uv3MjJgU7E558VYm9S019sWWu1zoYlRLc_GHf8r18Q,68669 +transformers/pipelines/depth_estimation.py,sha256=5g7fafV623awFBblTrNnXkN1g7P64QjDIxJRNnkwW28,6176 +transformers/pipelines/document_question_answering.py,sha256=efdAtKOTTedjhY-TKntvi2g54sE5LMaIFz34l_7KcWc,25792 +transformers/pipelines/feature_extraction.py,sha256=x6ZmfVcoT9TNWaPyM5S-zep_cCgTsFBFxe7B8QdrVTs,3564 +transformers/pipelines/fill_mask.py,sha256=DNGcI1nwopJUgz2C1FDi1r6rhgJRfxsuqrFy1tVFRs0,12086 +transformers/pipelines/image_classification.py,sha256=pP4giGHRbROoCkXS-xFt42jpkrxTi_MzpPeV5bfABvo,10238 +transformers/pipelines/image_feature_extraction.py,sha256=wJg23P055cfM6Xir9yMD9I0ps-B7te6UibbmpON90pU,4968 +transformers/pipelines/image_segmentation.py,sha256=b6yAF8B7_f0CwxJ6y7ymCQnmiIRW-DEGVfeZl_6oOuE,9988 +transformers/pipelines/image_text_to_text.py,sha256=blkQ_EuOmjAfn4fVrcixIaAjTuNwIDXRR58Nn_yXdK8,23503 +transformers/pipelines/image_to_image.py,sha256=fgBabCF2m32jRl0DCcPDMBAuigAu1NHD5OLh2VAeSLY,5393 +transformers/pipelines/image_to_text.py,sha256=ICa8okECoJMdDpykz3cCQ1nMtKluowDqTRFiFX7QAKc,10341 +transformers/pipelines/mask_generation.py,sha256=JRchLR7x5Pt-_ZF8D5WTnPOjJmY1S9k4iWDga5oc9hs,14480 +transformers/pipelines/object_detection.py,sha256=CxdYWobnXEbId-MVMys9ctFOZx2UyrWkQfZEu7AWDFI,8648 +transformers/pipelines/pt_utils.py,sha256=D-cFFKAaVtn3jaZGPKFr-U3JF3_YR5H3kO4QD1jrqQY,12762 +transformers/pipelines/question_answering.py,sha256=QvTA1VqjEddB2YcW6FsGunRoHiYqHo-C-WS-ItyiSag,31095 +transformers/pipelines/table_question_answering.py,sha256=SIljMVtUxDO48hvCUsxjWw7d3haiYwAU6Xiry7KVcFI,20965 +transformers/pipelines/text2text_generation.py,sha256=S9NhbLvTm3cjVD-Ai6AOFrimeSty447OEKEcoeMqCys,18896 +transformers/pipelines/text_classification.py,sha256=LrmS2Etpkb_QCVjXpCxTYfA1Ds72uTUxSSUZ3rQ4yHs,11272 +transformers/pipelines/text_generation.py,sha256=bxnO5H31pYcSnyZt1RpGJyOCJqxJVwWiGoUprlNAk2Q,26448 +transformers/pipelines/text_to_audio.py,sha256=tO1FZlZTND4wVD6RUOPowgD2YNzCrYhtQhjrglGqYGk,10602 +transformers/pipelines/token_classification.py,sha256=lyLNtFUHWpUDRhMJuXKGk_AQ6__vB4l-h3Pm-hncSrg,30837 +transformers/pipelines/video_classification.py,sha256=CQhYwOblhkhAZLINmOj2Y3Q9fnIW4Ccw_hn-jteBh9M,8242 +transformers/pipelines/visual_question_answering.py,sha256=MfHRnI_BN4l7nGypTsNOcOPZQDCXj0P44dXt11j6Rd8,9825 +transformers/pipelines/zero_shot_audio_classification.py,sha256=-P5FdjH_KBPKaZz8flLFdPTmwHCb1HwZla1t9O46Ktw,7034 +transformers/pipelines/zero_shot_classification.py,sha256=gMfncfItamTLHUldLcSqZhn9USCR2JESCBJfEl1Wzks,12553 +transformers/pipelines/zero_shot_image_classification.py,sha256=y0oJOf1f38Wk5IClZgJIYrbMCpI9FVsbxXPjh5vnJUE,8628 +transformers/pipelines/zero_shot_object_detection.py,sha256=gGQeFOBOw4JUBk0PfFl6qEAaHPeRGE2nPTWc8M5MN50,10746 +transformers/processing_utils.py,sha256=behtNYzlxRvRzVSPXGNoXcTTnumhBhDVkW_6-t1O-Kc,82110 +transformers/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +transformers/pytorch_utils.py,sha256=-ASEq4FhHie14bhkcNeO8zfqdHtMwcq7AVpU7EFxLXU,14981 +transformers/quantizers/__init__.py,sha256=S_xTSTbkDOvjLgR3jgR4EAkP_sc3NE8e38T-lllAaNo,800 +transformers/quantizers/__pycache__/__init__.cpython-310.pyc,, +transformers/quantizers/__pycache__/auto.cpython-310.pyc,, +transformers/quantizers/__pycache__/base.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_auto_round.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_bitnet.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_compressed_tensors.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_eetq.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_fbgemm_fp8.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_finegrained_fp8.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_fp_quant.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_higgs.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_hqq.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_mxfp4.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_quark.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_spqr.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_torchao.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizer_vptq.cpython-310.pyc,, +transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc,, +transformers/quantizers/auto.py,sha256=QdfQLFATN-NpsuFhBWo6uO0-xXISSlKPDVqD9K8V8j4,11671 +transformers/quantizers/base.py,sha256=lkOocjY6_2hBTcRur054gy_Gf8qDRd6Jtf6jtWeOVO4,15911 +transformers/quantizers/quantizer_aqlm.py,sha256=s-JXtyt423j_TztYGuUl9kjRLkqI5hMgcuhKK967Wns,3647 +transformers/quantizers/quantizer_auto_round.py,sha256=nZHBQEOUhwIdJ7rlye4dLJb2oNtwFE2HsfartkRueVo,3104 +transformers/quantizers/quantizer_awq.py,sha256=EZnV_3q_HhsZdCoYcnTarE-_7y7PLXa9n2EHVvs90R0,7504 +transformers/quantizers/quantizer_bitnet.py,sha256=9cQ6Nk8qXvgtgbMzmPPwpBu9MDfmqZ1xB6pnBx4aaQU,4669 +transformers/quantizers/quantizer_bnb_4bit.py,sha256=0hcNzZQI3-JJv0GBE-8wdH9SGDoom7hEpaEmzOOjBVQ,16576 +transformers/quantizers/quantizer_bnb_8bit.py,sha256=6rDoPUyBYmf-asdtsHgFSmSEjyggWMROYGwYnoL_B3k,14314 +transformers/quantizers/quantizer_compressed_tensors.py,sha256=-tExuhVJYzw2wuxSRY0YnOVu044cz2Wjv8I6NlWUKo4,7444 +transformers/quantizers/quantizer_eetq.py,sha256=JRcBNz2ginPvkhuDSFJGTDfmNM40gqjTcAcX0kh915Q,7207 +transformers/quantizers/quantizer_fbgemm_fp8.py,sha256=IHQplkcm369duavDJ70pFM2uc_ljmhPVXbtb3lkggj0,13769 +transformers/quantizers/quantizer_finegrained_fp8.py,sha256=EKsTozudaZjMmrzZUaQGD-hq9NTeZMRAVRsSxGE2590,9470 +transformers/quantizers/quantizer_fp_quant.py,sha256=WNj29q6Z_OcO-v5s_b_8fPqbCBz527j1qc0lxPN5u4I,7465 +transformers/quantizers/quantizer_gptq.py,sha256=D4bGUngl2mdu0K0LvyvfQ5l2Iz3Ny-Iat9eYNNSAhGo,5633 +transformers/quantizers/quantizer_higgs.py,sha256=S-HuJxgw3FRfuxRf_PEhhnHbUPrs3ELyt6o7_3NCBPo,8485 +transformers/quantizers/quantizer_hqq.py,sha256=nbm8J5vnyAFBMHnlTgjKvjRUGyWQC5zwL_P2w_CLxyM,13024 +transformers/quantizers/quantizer_mxfp4.py,sha256=guK_8EzjUG9Fu3XxPUc2LB8-M_VmwzPkL1lvkqYTZR0,16242 +transformers/quantizers/quantizer_quanto.py,sha256=RYtiuoTPGO10Z2-iAKHGl5rPgTu-PFFKfRA5rtAxXB0,7652 +transformers/quantizers/quantizer_quark.py,sha256=dXhnSCtjK30AQvXoGbVICqtotYWKekVH8JPOlOWBnjY,3850 +transformers/quantizers/quantizer_spqr.py,sha256=i2vfNz6RL9u94GKKFHMwwtNUmIrMVBj1mTbCyHxu0Tk,3248 +transformers/quantizers/quantizer_torchao.py,sha256=6PBD7FWCwBSr3fjZlV88DnAQTgo3b-38rfnUICFAhHc,16418 +transformers/quantizers/quantizer_vptq.py,sha256=OCz4vueKXQg9Az5KJ97COYZBnHaHhYYF3a4H56fvDDk,3764 +transformers/quantizers/quantizers_utils.py,sha256=gVf8Up7S6h8mrYHtwmcAJgBENhwQsh3x6cMmoPso6x8,878 +transformers/safetensors_conversion.py,sha256=LjnFRVfXRsOhIHdyiw6pevDJcMdsKwc3kvQ6csPs9wA,4074 +transformers/sagemaker/__init__.py,sha256=fKtKAHamz_CLL9jPGCa2E-1n8RmuS-58qGtzZuKc3qg,730 +transformers/sagemaker/__pycache__/__init__.cpython-310.pyc,, +transformers/sagemaker/__pycache__/trainer_sm.cpython-310.pyc,, +transformers/sagemaker/__pycache__/training_args_sm.cpython-310.pyc,, +transformers/sagemaker/trainer_sm.py,sha256=7GsKLtjdMfKp98OwHD7RcBsl745OOwHAaBswkfLkfsE,1044 +transformers/sagemaker/training_args_sm.py,sha256=4ZnQhITfMwT0y2Y2MvkI11PEB_yfTX5Z7WrPKt0VXD8,5389 +transformers/testing_utils.py,sha256=-SckqPNh4JZnSkbQh0hbMRGzOjJM74RG_xnApbfJwbo,121627 +transformers/tf_utils.py,sha256=uiS6uSPmB_ZUaxbV-vMkGy1roDTtY3ujpIgkwuskGmc,11390 +transformers/time_series_utils.py,sha256=fhc___L7NHqLzQ2lvrojW0yGkXJUTVqHGEAt5VDRqNA,7493 +transformers/tokenization_mistral_common.py,sha256=ovzIqZ25d2ose4DpPX7o5YteVUnPr_d-_walrkMJu9U,91529 +transformers/tokenization_utils.py,sha256=KAQjomfPxV2n20uzN39yFSxeXhXnUkRQFjuJG6F070c,47774 +transformers/tokenization_utils_base.py,sha256=HzlDOJ74NPZEht-cGvq9t4DWKKQuFmMQDhVc1WrVAx8,210542 +transformers/tokenization_utils_fast.py,sha256=VG8L7X98l-9_yiURQkps-qLvOc2ClBOGtUKbojTWHxc,41357 +transformers/trainer.py,sha256=5-6lCjx9ixcDYl0tGKoL8FDXHLHzs75hPuYduGav7pE,265140 +transformers/trainer_callback.py,sha256=hNeWb0OiAI2AdfW66bot9RlJp-4tm7Z6cASSW48qXQk,33611 +transformers/trainer_pt_utils.py,sha256=xA3gTQHm4aIB3NaRCSqkOZon7eHWVPMdZbEcK8ueZRI,61699 +transformers/trainer_seq2seq.py,sha256=_GsOuEH9pGY4Jf8gprEpLnH_itU48iz1l_JE7226UmQ,17961 +transformers/trainer_utils.py,sha256=DsVdLKbqIMfeYP6Cvu4r4LpwMu_-BDYOvAjcqkG1o4s,34115 +transformers/training_args.py,sha256=2aoH_j04senV2sgi8hpgEjYYZdW1WQTrXtxl1GpBDl0,160895 +transformers/training_args_seq2seq.py,sha256=J9_vJQR4VxWAHWVbRmxjXHSRLd6KSe8inisIVezlbXI,3896 +transformers/training_args_tf.py,sha256=dTx8RireQILOPtoLd4rPc4aQnXfj_Qs3KNLYLiSLUIk,14583 +transformers/utils/__init__.py,sha256=gikCAb5XYzT94ZCsYkJpLcUyvhk3HFSSxyrqojOannI,10450 +transformers/utils/__pycache__/__init__.cpython-310.pyc,, +transformers/utils/__pycache__/attention_visualizer.cpython-310.pyc,, +transformers/utils/__pycache__/auto_docstring.cpython-310.pyc,, +transformers/utils/__pycache__/backbone_utils.cpython-310.pyc,, +transformers/utils/__pycache__/bitsandbytes.cpython-310.pyc,, +transformers/utils/__pycache__/chat_template_utils.cpython-310.pyc,, +transformers/utils/__pycache__/constants.cpython-310.pyc,, +transformers/utils/__pycache__/deprecation.cpython-310.pyc,, +transformers/utils/__pycache__/doc.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_detectron2_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_flax_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_mistral_common_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_music_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_sentencepiece_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_speech_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_tensorflow_text_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_tf_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_timm_and_torchvision_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_tokenizers_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_torchaudio_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_torchvision_objects.cpython-310.pyc,, +transformers/utils/__pycache__/dummy_vision_objects.cpython-310.pyc,, +transformers/utils/__pycache__/fx.cpython-310.pyc,, +transformers/utils/__pycache__/generic.cpython-310.pyc,, +transformers/utils/__pycache__/hp_naming.cpython-310.pyc,, +transformers/utils/__pycache__/hub.cpython-310.pyc,, +transformers/utils/__pycache__/import_utils.cpython-310.pyc,, +transformers/utils/__pycache__/logging.cpython-310.pyc,, +transformers/utils/__pycache__/metrics.cpython-310.pyc,, +transformers/utils/__pycache__/model_parallel_utils.cpython-310.pyc,, +transformers/utils/__pycache__/notebook.cpython-310.pyc,, +transformers/utils/__pycache__/peft_utils.cpython-310.pyc,, +transformers/utils/__pycache__/quantization_config.cpython-310.pyc,, +transformers/utils/__pycache__/sentencepiece_model_pb2.cpython-310.pyc,, +transformers/utils/__pycache__/sentencepiece_model_pb2_new.cpython-310.pyc,, +transformers/utils/__pycache__/versions.cpython-310.pyc,, +transformers/utils/attention_visualizer.py,sha256=dTO8HZkdNSp3cp52uLPrNsWz8NFzx3Zql2jyZgzLzhY,9580 +transformers/utils/auto_docstring.py,sha256=PL0W9QrJUhbA8R3Nm7YcGbSThQa6HoamTh8zZs-8dHU,81618 +transformers/utils/backbone_utils.py,sha256=Ivb5CS4DC3WVEOTahm33h8COiLAjVYLo2KI1a1Svb6Y,17358 +transformers/utils/bitsandbytes.py,sha256=LzOKwcHWAxxZZv-7Ts9Q0vlEYvHd18affVgVbiR3Tzs,1040 +transformers/utils/chat_template_utils.py,sha256=L9JP7gU8GWXZeuVNRksnN4D2NhDZ-tzKZC9nTzVvx-w,22446 +transformers/utils/constants.py,sha256=sZsUwOnA3CbtN1svs9YoaNLTTsAc9RVaITsgpf8K4iI,282 +transformers/utils/deprecation.py,sha256=rsbc7bbHPmvePSmkpf_nXQ7OIX6ITFSK6nJxHvu0bY4,8065 +transformers/utils/doc.py,sha256=K5MgXYi1j1Bd5OU-ho57ojgp1UftHN4Yu2mpj3x3lQA,52480 +transformers/utils/dummy_detectron2_objects.py,sha256=n7Pt_7sbVBNfohKGcOARB-ZcPcJRbjEAcoLd2vTXndU,340 +transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py,sha256=n6pY4s7zCII3dzo7Ejd0RviHa_pMateuDEwbbHgsTUY,902 +transformers/utils/dummy_flax_objects.py,sha256=lqW9EJzfDmsx7Uj4cm4UHUUwcYI9SFm8-biApCP40HQ,2652 +transformers/utils/dummy_mistral_common_objects.py,sha256=a43f12WAikWuVMOnFPTA1A2rvI9gi2a4POyuBLLFVEs,311 +transformers/utils/dummy_music_objects.py,sha256=1lxIebYUOdHJWMQ_T5IQgPgcO_wp_8YM_HGc3skuGVg,458 +transformers/utils/dummy_pt_objects.py,sha256=z7Zk6_PWnqXU3sQSUlOarFvvlOWGfQy2Gz4JC60rEMM,15587 +transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py,sha256=BgPLr8Wz8A-17K86x04N21CKXtWNQLJEWx2c4aZRqaA,286 +transformers/utils/dummy_sentencepiece_objects.py,sha256=pBykNNg9IPDeshVOeaw4sxHvgmt3by9X4rIQtz0ONYg,6455 +transformers/utils/dummy_speech_objects.py,sha256=9eFm1cjdsYOPBoAz9JTgP35Bg8WF2C9AZ_y1hFpKZdQ,465 +transformers/utils/dummy_tensorflow_text_objects.py,sha256=43V0IA2kb9gtuL0S1OL1eRFFxzQwKg4pPjMVuXUB5qg,306 +transformers/utils/dummy_tf_objects.py,sha256=8ZPa6w8h-VzRDzwOO9xK26u9evz3T8bkxSLhgxI-lKU,4139 +transformers/utils/dummy_timm_and_torchvision_objects.py,sha256=EFuC5z6IsKOqqowoUGviJ3KgTjzvdTTN7gGQ3it-4t0,324 +transformers/utils/dummy_tokenizers_objects.py,sha256=PFIh5nBDmhWG2XDGuwIyBGldm6b_jdZdL3E8t5A8FsY,304 +transformers/utils/dummy_torchaudio_objects.py,sha256=EG0q0JkedoNb_4ntsf6EyTOE6Nr1whvHOzHPKy1t7x0,847 +transformers/utils/dummy_torchvision_objects.py,sha256=BaUQGsNL0Xfj-HP-pOVXSKYw5UFaNlWD_Iso9D8muGw,479 +transformers/utils/dummy_vision_objects.py,sha256=GDbX7-GrqykExLY91SMhSf508DinS5NSFfavbeDsCMU,630 +transformers/utils/fx.py,sha256=QqV-1ulNwDlMq3FK-WeN67CXIepPYZAe96IcAaJm6G0,56943 +transformers/utils/generic.py,sha256=SROD9Spq0YXKsv8AtHD9GHTdqm-opowSI_v53o6MRP0,39771 +transformers/utils/hp_naming.py,sha256=vqcOXcDOyqbISWo8-ClUJUOBVbZM1h08EcymTwcRthc,4979 +transformers/utils/hub.py,sha256=yb-vmcXXk16eYNLNPJaXzyzpng1RRoVk2Ro1Xu-WXQQ,51656 +transformers/utils/import_utils.py,sha256=qaipd7JC8ku5djhWvhKLx4TnSl_VIM-wic2Wu-ZLCTk,104474 +transformers/utils/logging.py,sha256=D931dVGRXkb_cKJGUvdKk_Bxbtj3gOqhyCJD_33AFCQ,12282 +transformers/utils/metrics.py,sha256=A301r8MYoZPcSMO8lfpOSJO_J8GFHxhjKHBbxazqTxU,15732 +transformers/utils/model_parallel_utils.py,sha256=dmPsjrVGLxwYHsGXyvFQrcl-aZRQA5hydi4I7_sBAoo,2257 +transformers/utils/notebook.py,sha256=Gkg0GxMhbrTk21Fp6aXVeTuVk49yPDWuGbLc4OCHrPo,15796 +transformers/utils/peft_utils.py,sha256=7XZBVmD_gcZl2hjwTRYztGs-WEHfRc53rqNuUdAlzl0,5193 +transformers/utils/quantization_config.py,sha256=WigjnnG-7XYpJ0bhfhzMTNh6XFWrDw6n6idaJi8mBXI,94094 +transformers/utils/sentencepiece_model_pb2.py,sha256=WcMZRm2-571XwxSfo-6FZih9fDy_Zl5mMwqrDrC1Dlg,50663 +transformers/utils/sentencepiece_model_pb2_new.py,sha256=ahaV--amhGIL3nXFCTHqezqxuGXm8SHr_C3Zvj7KbAY,6598 +transformers/utils/versions.py,sha256=C-Tqr4qGSHH64ygIBCSo8gA6azz7Dbzh8zdc_yjMkX8,4337 +transformers/video_processing_utils.py,sha256=Fy7gcvKbNmw3ICLseF824Vy8KxMIxbGcByGIyJ5dN7k,40695 +transformers/video_utils.py,sha256=aXrJh9TAhvWnRXfYA-EkpWbzBfYOFMxroxZDznDonvI,30802 diff --git a/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e7fa31b6f3f78deb1022c1f7927f07d4d16da822 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..47f047547c476346d81519e9d380af1a6be26155 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +transformers = transformers.commands.transformers_cli:main +transformers-cli = transformers.commands.transformers_cli:main_cli diff --git a/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..68b7d66c97d66c58de883ed0c451af2b3183e6f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/licenses/LICENSE @@ -0,0 +1,203 @@ +Copyright 2018- The Hugging Face team. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..976a2b1f3998279c10c413279a095be86bf69167 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers-4.55.1.dist-info/top_level.txt @@ -0,0 +1 @@ +transformers