id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
18,528
import importlib.util import io import json import weakref from copy import deepcopy from functools import partialmethod from .dependency_versions_check import dep_version_check from .file_utils import is_torch_available from .utils import logging logger = logging.get_logger(__name__) def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps): """ A convenience wrapper that deals with optimizer and lr scheduler configuration. """ config = hf_deepspeed_config.config # Optimizer + Scheduler # Currently supported combos: # 1. DS scheduler + DS optimizer: Yes # 2. HF scheduler + HF optimizer: Yes # 3. DS scheduler + HF optimizer: Yes # 4. HF scheduler + DS optimizer: Yes # # Unless Offload is enabled in which case it's: # 1. DS scheduler + DS optimizer: Yes # 2. HF scheduler + HF optimizer: Mostly* # 3. DS scheduler + HF optimizer: Mostly* # 4. HF scheduler + DS optimizer: Yes # # Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB) optimizer = None if "optimizer" in config: if args.adafactor: raise ValueError( "--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. " "Only one optimizer can be configured." ) else: if hf_deepspeed_config.is_offload(): logger.info( "Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the custom optimizer has both CPU and GPU implementation (except LAMB)" ) # ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch. # But trainer uses AdamW by default. optimizer = trainer.create_optimizer() # To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer` config["zero_allow_untested_optimizer"] = True def _lr_scheduler_callable(optimizer): return trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) lr_scheduler = None if "scheduler" not in config: if optimizer is None: # Optimizer is not available, so use callable to defer lr_scheduler creation to DS init lr_scheduler = _lr_scheduler_callable else: lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) return optimizer, lr_scheduler The provided code snippet includes necessary dependencies for implementing the `deepspeed_init` function. Write a Python function `def deepspeed_init(trainer, num_training_steps, resume_from_checkpoint=None, inference=False)` to solve the following problem: Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args. If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made. Args: trainer: Trainer object num_training_steps: per single gpu resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load inference: launch in inference mode (no optimizer and no lr scheduler) Returns: model, optimizer, lr_scheduler Here is the function: def deepspeed_init(trainer, num_training_steps, resume_from_checkpoint=None, inference=False): """ Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args. If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made. Args: trainer: Trainer object num_training_steps: per single gpu resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load inference: launch in inference mode (no optimizer and no lr scheduler) Returns: model, optimizer, lr_scheduler """ import deepspeed from deepspeed.utils import logger as ds_logger model = trainer.model args = trainer.args # resume config update - some bits like `model` and `num_training_steps` only become available during train hf_deepspeed_config = args.hf_deepspeed_config hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps) config = hf_deepspeed_config.config # set the Deepspeed log level consistent with the Trainer ds_logger.setLevel(args.get_process_log_level()) if inference: # only Z3 makes sense for the inference if not hf_deepspeed_config.is_zero3(): raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config") # in case the training config is re-used for inference hf_deepspeed_config.del_config_sub_tree("optimizer") hf_deepspeed_config.del_config_sub_tree("lr_scheduler") optimizer, lr_scheduler = None, None model_parameters = None else: optimizer, lr_scheduler = deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps) model_parameters = list(filter(lambda p: p.requires_grad, model.parameters())) # keep for quick debug: # from pprint import pprint; pprint(config) kwargs = dict( model=model, model_parameters=model_parameters, config_params=config, optimizer=optimizer, lr_scheduler=lr_scheduler, ) deepspeed_engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs) # stash kwargs to enabled a later deepspeed_reinit trainer.deepspeed_initialize_kwargs = kwargs if resume_from_checkpoint is not None: # it's possible that the user is trying to resume from model_path, which doesn't necessarily # contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's # a resume from a checkpoint and not just a local pretrained weight. So we check here if the # path contains what looks like a deepspeed checkpoint import glob deepspeed_checkpoint_dirs = sorted(glob.glob(f"{resume_from_checkpoint}/global_step*")) if len(deepspeed_checkpoint_dirs) > 0: logger.info(f"Attempting to resume from {resume_from_checkpoint}") # this magically updates self.optimizer and self.lr_scheduler load_path, _ = deepspeed_engine.load_checkpoint( resume_from_checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True ) if load_path is None: raise ValueError(f"[deepspeed] failed to resume from checkpoint {resume_from_checkpoint}") else: logger.info(f"{resume_from_checkpoint} doesn't have deepspeed checkpoints, doing nothing") return deepspeed_engine, optimizer, lr_scheduler
Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args. If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made. Args: trainer: Trainer object num_training_steps: per single gpu resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load inference: launch in inference mode (no optimizer and no lr scheduler) Returns: model, optimizer, lr_scheduler
18,529
import math import torch from packaging import version from torch import nn from .utils import logging The provided code snippet includes necessary dependencies for implementing the `gelu_python` function. Write a Python function `def gelu_python(x)` to solve the following problem: Original Implementation of the GELU activation function in Google BERT repo when initially created. For information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 Here is the function: def gelu_python(x): """ Original Implementation of the GELU activation function in Google BERT repo when initially created. For information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
Original Implementation of the GELU activation function in Google BERT repo when initially created. For information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
18,530
import math import torch from packaging import version from torch import nn from .utils import logging The provided code snippet includes necessary dependencies for implementing the `gelu_new` function. Write a Python function `def gelu_new(x)` to solve the following problem: Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 Here is the function: def gelu_new(x): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
18,531
import math import torch from packaging import version from torch import nn from .utils import logging def gelu_fast(x): return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
null
18,532
import math import torch from packaging import version from torch import nn from .utils import logging def quick_gelu(x): return x * torch.sigmoid(1.702 * x)
null
18,533
import math import torch from packaging import version from torch import nn from .utils import logging The provided code snippet includes necessary dependencies for implementing the `_silu_python` function. Write a Python function `def _silu_python(x)` to solve the following problem: See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with later. Here is the function: def _silu_python(x): """ See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with later. """ return x * torch.sigmoid(x)
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with later.
18,534
import math import torch from packaging import version from torch import nn from .utils import logging The provided code snippet includes necessary dependencies for implementing the `_mish_python` function. Write a Python function `def _mish_python(x)` to solve the following problem: See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also visit the official repository for the paper: https://github.com/digantamisra98/Mish Here is the function: def _mish_python(x): """ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also visit the official repository for the paper: https://github.com/digantamisra98/Mish """ return x * torch.tanh(nn.functional.softplus(x))
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also visit the official repository for the paper: https://github.com/digantamisra98/Mish
18,535
import math import torch from packaging import version from torch import nn from .utils import logging def linear_act(x): return x
null
18,536
import math import torch from packaging import version from torch import nn from .utils import logging ACT2FN = { "relu": nn.functional.relu, "silu": silu, "swish": silu, "gelu": gelu, "tanh": torch.tanh, "gelu_python": gelu_python, "gelu_new": gelu_new, "gelu_fast": gelu_fast, "quick_gelu": quick_gelu, "mish": mish, "linear": linear_act, "sigmoid": torch.sigmoid, } def get_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
null
18,537
import importlib import re import warnings from collections import OrderedDict from typing import List, Union from ...configuration_utils import PretrainedConfig from ..file_utils import CONFIG_NAME from ..utils import logging from .dynamic import get_class_from_dynamic_module CONFIG_MAPPING_NAMES = OrderedDict( [ # Add configs here ("sbert", "SbertConfig"), ("veco", "VecoConfig"), ("palm", "PalmConfig"), ] ) The provided code snippet includes necessary dependencies for implementing the `config_class_to_model_type` function. Write a Python function `def config_class_to_model_type(config)` to solve the following problem: Converts a config class name to the corresponding model type Here is the function: def config_class_to_model_type(config): """Converts a config class name to the corresponding model type""" for key, cls in CONFIG_MAPPING_NAMES.items(): if cls == config: return key return None
Converts a config class name to the corresponding model type
18,538
import importlib import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from ..file_utils import ( HF_MODULES_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, cached_path, is_offline_mode, ) from ..utils import logging logger = logging.get_logger(__name__) def create_dynamic_module(name: Union[str, os.PathLike]): """ Creates a dynamic module in the cache directory for modules. """ init_hf_modules() dynamic_module_path = Path(HF_MODULES_CACHE) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent) os.makedirs(dynamic_module_path, exist_ok=True) init_path = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def check_imports(filename): """ Check if the current Python environment contains all the libraries that are imported in a file. """ with open(filename, "r", encoding="utf-8") as f: content = f.read() # Imports of the form `import xxx` imports = re.findall("^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) # Only keep the top-level module imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] # Unique-ify and test we got them all imports = list(set(imports)) missing_packages = [] for imp in imports: try: importlib.import_module(imp) except ImportError: missing_packages.append(imp) if len(missing_packages) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" ) def get_class_in_module(class_name, module_path): """ Import a module on the cache directory for modules and extract a class from it. """ module_path = module_path.replace(os.path.sep, ".") module = importlib.import_module(module_path) return getattr(module, class_name) HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" def is_offline_mode(): return _is_offline_mode def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent: Union[Dict, str, None] = None, extract_compressed_file=False, force_extract=False, use_auth_token: Union[bool, str, None] = None, local_files_only=False, ) -> Optional[str]: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-download the file even if it's already cached in the cache dir. resume_download: if True, resume the download if incompletely received file is found. user_agent: Optional string or dict that will be appended to the user-agent on remote requests. use_auth_token: Optional string or boolean to use as Bearer token for remote files. If True, will get token from ~/.huggingface. extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract: if True when extract_compressed_file is True and the archive was already extracted, re-extract the archive and override the folder where it was extracted. Return: Local path (string) of file or if networking is off, last version of file cached on disk. Raises: In case of non-recoverable file (non-existent or inaccessible url + no cache on disk). """ if cache_dir is None: # Just keep on use transformers' cache dir, no need to change. cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, use_auth_token=use_auth_token, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError(f"file {url_or_filename} not found") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError(f"Archive format of {output_path} could not be identified") return output_path_extracted return output_path The provided code snippet includes necessary dependencies for implementing the `get_class_from_dynamic_module` function. Write a Python function `def get_class_from_dynamic_module( pretrained_model_name_or_path: Union[str, os.PathLike], module_file: str, class_name: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, **kwargs, )` to solve the following problem: Extracts a class from a module file, present in the local folder or repository of a model. <Tip warning={true}> Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should therefore only be called on trusted repos. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. module_file (`str`): The name of the module file containing the class to look for. class_name (`str`): The name of the class to import in the module. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `type`: The class, dynamically imported from the module. Examples: ```python # Download module *modeling.py* from huggingface.co and cache then extract the class *MyBertModel* from this # module. cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") ``` Here is the function: def get_class_from_dynamic_module( pretrained_model_name_or_path: Union[str, os.PathLike], module_file: str, class_name: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, **kwargs, ): """ Extracts a class from a module file, present in the local folder or repository of a model. <Tip warning={true}> Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should therefore only be called on trusted repos. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. module_file (`str`): The name of the module file containing the class to look for. class_name (`str`): The name of the class to import in the module. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `type`: The class, dynamically imported from the module. Examples: ```python # Download module *modeling.py* from huggingface.co and cache then extract the class *MyBertModel* from this # module. cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") ```""" if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file) submodule = "local" else: raise RuntimeError(f"Only support local files.") try: # Load from URL or cache if already cached resolved_module_file = cached_path( module_file_or_url, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, ) except EnvironmentError: logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") raise # Check we have all the requirements in our environment check_imports(resolved_module_file) # Now we move the module inside our cached dynamic modules. full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(full_submodule) submodule_path = Path(HF_MODULES_CACHE) / full_submodule if submodule == "local": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. module_name = module_file shutil.copy(resolved_module_file, submodule_path / module_file) else: # The module file will end up being named module_file + the etag. This way we get the benefit of versioning. resolved_module_file_name = Path(resolved_module_file).name module_name_parts = [module_file.replace(".py", "")] + resolved_module_file_name.split(".") module_name = "_".join(module_name_parts) + ".py" if not (submodule_path / module_name).exists(): shutil.copy(resolved_module_file, submodule_path / module_name) # And lastly we get the class inside our newly created module final_module = os.path.join(full_submodule, module_name.replace(".py", "")) return get_class_in_module(class_name, final_module)
Extracts a class from a module file, present in the local folder or repository of a model. <Tip warning={true}> Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should therefore only be called on trusted repos. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. module_file (`str`): The name of the module file containing the class to look for. class_name (`str`): The name of the class to import in the module. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `type`: The class, dynamically imported from the module. Examples: ```python # Download module *modeling.py* from huggingface.co and cache then extract the class *MyBertModel* from this # module. cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") ```
18,539
import importlib import os from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ..feature_extraction_utils import FeatureExtractionMixin from ..file_utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, config_class_to_model_type, model_type_to_module_name, replace_list_option_in_docstrings, ) FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict( [ ] ) def model_type_to_module_name(key): """Converts a config key to the corresponding module.""" # Special treatment if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME: return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key] return key.replace("-", "_") def feature_extractor_class_from_name(class_name: str): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f".{module_name}", "sofa.models") return getattr(module, class_name) break return None
null
18,540
import importlib from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ..file_utils import copy_func from ..utils import logging from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings from .dynamic import get_class_from_dynamic_module def _get_model_class(config, model_mapping): supported_models = model_mapping[type(config)] if not isinstance(supported_models, (list, tuple)): return supported_models name_to_model = {model.__name__: model for model in supported_models} architectures = getattr(config, "architectures", []) for arch in architectures: if arch in name_to_model: return name_to_model[arch] elif f"TF{arch}" in name_to_model: return name_to_model[f"TF{arch}"] elif f"Flax{arch}" in name_to_model: return name_to_model[f"Flax{arch}"] # If not architecture is set in the config or match the supported models, the first element of the tuple is the # defaults. return supported_models[0]
null
18,541
import importlib from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ..file_utils import copy_func from ..utils import logging from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings from .dynamic import get_class_from_dynamic_module CLASS_DOCSTRING = """ This is a generic model class that will be instantiated as one of the model classes of the library when created with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class method. This class cannot be instantiated directly using `__init__()` (throws an error). """ FROM_CONFIG_DOCSTRING = """ Instantiates one of the model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights. Args: config ([`PretrainedConfig`]): The model class to instantiate is selected based on the configuration class: List options Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download configuration from huggingface.co and cache. >>> config = AutoConfig.from_pretrained('checkpoint_placeholder') >>> model = BaseAutoModelClass.from_config(config) ``` """ FROM_PRETRAINED_TORCH_DOCSTRING = """ Instantiate one of the model classes of the library from a pretrained model. The model class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (additional positional arguments, *optional*): Will be passed along to the underlying model `__init__()` method. config ([`PretrainedConfig`], *optional*): Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. state_dict (*Dict[str, torch.Tensor]*, *optional*): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and [`~PreTrainedModel.from_pretrained`] is not a simpler option. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (`bool`, *optional*, defaults to `False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs (additional keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download model and configuration from huggingface.co and cache. >>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder') >>> # Update configuration during loading >>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_pretrained('./tf_model/shortcut_placeholder_tf_model_config.json') >>> model = BaseAutoModelClass.from_pretrained('./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index', from_tf=True, config=config) ``` """ FROM_PRETRAINED_TF_DOCSTRING = """ Instantiate one of the model classes of the library from a pretrained model. The model class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. model_args (additional positional arguments, *optional*): Will be passed along to the underlying model `__init__()` method. config ([`PretrainedConfig`], *optional*): Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs (additional keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download model and configuration from huggingface.co and cache. >>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder') >>> # Update configuration during loading >>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> config = AutoConfig.from_pretrained('./pt_model/shortcut_placeholder_pt_model_config.json') >>> model = BaseAutoModelClass.from_pretrained('./pt_model/shortcut_placeholder_pytorch_model.bin', from_pt=True, config=config) ``` """ FROM_PRETRAINED_FLAX_DOCSTRING = """ Instantiate one of the model classes of the library from a pretrained model. The model class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. model_args (additional positional arguments, *optional*): Will be passed along to the underlying model `__init__()` method. config ([`PretrainedConfig`], *optional*): Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs (additional keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download model and configuration from huggingface.co and cache. >>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder') >>> # Update configuration during loading >>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> config = AutoConfig.from_pretrained('./pt_model/shortcut_placeholder_pt_model_config.json') >>> model = BaseAutoModelClass.from_pretrained('./pt_model/shortcut_placeholder_pytorch_model.bin', from_pt=True, config=config) ``` """ class _BaseAutoModelClass: # Base class for auto models. _model_mapping = None def __init__(self, *args, **kwargs): raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_config(config)` methods." ) def from_config(cls, config, **kwargs): trust_remote_code = kwargs.pop("trust_remote_code", False) if hasattr(config, "auto_map") and cls.__name__ in config.auto_map: if not trust_remote_code: raise ValueError( "Loading this model requires you to execute the modeling file in that repo " "on your local machine. Make sure you have read the code there to avoid malicious use, then set " "the option `trust_remote_code=True` to remove this error." ) if kwargs.get("revision", None) is None: logger.warn( "Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure " "no malicious code has been contributed in a newer revision." ) class_ref = config.auto_map[cls.__name__] module_file, class_name = class_ref.split(".") model_class = get_class_from_dynamic_module(config.name_or_path, module_file + ".py", class_name, **kwargs) return model_class._from_config(config, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class._from_config(config, **kwargs) raise ValueError( f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) trust_remote_code = kwargs.pop("trust_remote_code", False) kwargs["_from_auto"] = True if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs ) if hasattr(config, "auto_map") and cls.__name__ in config.auto_map: if not trust_remote_code: raise ValueError( f"Loading {pretrained_model_name_or_path} requires you to execute the modeling file in that repo " "on your local machine. Make sure you have read the code there to avoid malicious use, then set " "the option `trust_remote_code=True` to remove this error." ) if kwargs.get("revision", None) is None: logger.warn( "Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure " "no malicious code has been contributed in a newer revision." ) class_ref = config.auto_map[cls.__name__] module_file, class_name = class_ref.split(".") model_class = get_class_from_dynamic_module( pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs ) return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." ) def register(cls, config_class, model_class): """ Register a new model for this class. Args: config_class ([`PretrainedConfig`]): The configuration corresponding to the model to register. model_class ([`PreTrainedModel`]): The model to register. """ if hasattr(model_class, "config_class") and model_class.config_class != config_class: raise ValueError( "The model class you are passing has a `config_class` attribute that is not consistent with the " f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix " "one of those so they match!" ) cls._model_mapping.register(config_class, model_class) def insert_head_doc(docstring, head_doc=""): if len(head_doc) > 0: return docstring.replace( "one of the model classes of the library ", f"one of the model classes of the library (with a {head_doc} head) ", ) return docstring.replace( "one of the model classes of the library ", "one of the base model classes of the library " ) def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True): def docstring_decorator(fn): docstrings = fn.__doc__ lines = docstrings.split("\n") i = 0 while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0] if use_model_types: indent = f"{indent} " lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types) docstrings = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current docstring is:\n{docstrings}" ) fn.__doc__ = docstrings return fn return docstring_decorator def auto_class_update(cls, checkpoint_for_example="bert-base-cased", head_doc=""): # Create a new class with the right name from the base class model_mapping = cls._model_mapping name = cls.__name__ class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc) cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name) # Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't # have a specific docstrings for them. from_config = copy_func(_BaseAutoModelClass.from_config) from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc) from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name) from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example) from_config.__doc__ = from_config_docstring from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config) cls.from_config = classmethod(from_config) if name.startswith("TF"): from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING elif name.startswith("Flax"): from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING else: from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained) from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc) from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name) from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example) shortcut = checkpoint_for_example.split("/")[-1].split("-")[0] from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut) from_pretrained.__doc__ = from_pretrained_docstring from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained) cls.from_pretrained = classmethod(from_pretrained) return cls
null
18,542
import importlib from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ..file_utils import copy_func from ..utils import logging from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings from .dynamic import get_class_from_dynamic_module def get_values(model_mapping): result = [] for model in model_mapping.values(): if isinstance(model, (list, tuple)): result += list(model) else: result.append(model) return result
null
18,543
import importlib from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ..file_utils import copy_func from ..utils import logging from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings from .dynamic import get_class_from_dynamic_module def getattribute_from_module(module, attr): if attr is None: return None if isinstance(attr, tuple): return tuple(getattribute_from_module(module, a) for a in attr) if hasattr(module, attr): return getattr(module, attr) # Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the # object at the top level. transformers_module = importlib.import_module("transformers") return getattribute_from_module(transformers_module, attr)
null
18,544
import importlib import json import os from collections import OrderedDict from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union from ...configuration_utils import PretrainedConfig from ..file_utils import ( cached_path, is_offline_mode, is_sentencepiece_available, is_tokenizers_available, ) from ...tokenization_utils import PreTrainedTokenizer from ..tokenization_utils_base import TOKENIZER_CONFIG_FILE from ...tokenization_utils_fast import PreTrainedTokenizerFast from ..utils import logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, config_class_to_model_type, model_type_to_module_name, replace_list_option_in_docstrings, ) from .dynamic import get_class_from_dynamic_module TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES) def model_type_to_module_name(key): """Converts a config key to the corresponding module.""" # Special treatment if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME: return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key] return key.replace("-", "_") def tokenizer_class_from_name(class_name: str): if class_name == "PreTrainedTokenizerFast": return PreTrainedTokenizerFast for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items(): if class_name in tokenizers: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f".{module_name}", "sofa.models") return getattr(module, class_name) for config, tokenizers in TOKENIZER_MAPPING._extra_content.items(): for tokenizer in tokenizers: if getattr(tokenizer, "__name__", None) == class_name: return tokenizer return None
null
18,545
import importlib import json import os from collections import OrderedDict from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union from ...configuration_utils import PretrainedConfig from ..file_utils import ( cached_path, is_offline_mode, is_sentencepiece_available, is_tokenizers_available, ) from ...tokenization_utils import PreTrainedTokenizer from ..tokenization_utils_base import TOKENIZER_CONFIG_FILE from ...tokenization_utils_fast import PreTrainedTokenizerFast from ..utils import logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, config_class_to_model_type, model_type_to_module_name, replace_list_option_in_docstrings, ) from .dynamic import get_class_from_dynamic_module logger = logging.get_logger(__name__) def is_offline_mode(): return _is_offline_mode def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent: Union[Dict, str, None] = None, extract_compressed_file=False, force_extract=False, use_auth_token: Union[bool, str, None] = None, local_files_only=False, ) -> Optional[str]: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-download the file even if it's already cached in the cache dir. resume_download: if True, resume the download if incompletely received file is found. user_agent: Optional string or dict that will be appended to the user-agent on remote requests. use_auth_token: Optional string or boolean to use as Bearer token for remote files. If True, will get token from ~/.huggingface. extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract: if True when extract_compressed_file is True and the archive was already extracted, re-extract the archive and override the folder where it was extracted. Return: Local path (string) of file or if networking is off, last version of file cached on disk. Raises: In case of non-recoverable file (non-existent or inaccessible url + no cache on disk). """ if cache_dir is None: # Just keep on use transformers' cache dir, no need to change. cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, use_auth_token=use_auth_token, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError(f"file {url_or_filename} not found") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError(f"Archive format of {output_path} could not be identified") return output_path_extracted return output_path TOKENIZER_CONFIG_FILE = "tokenizer_config.json" The provided code snippet includes necessary dependencies for implementing the `get_tokenizer_config` function. Write a Python function `def get_tokenizer_config( pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, **kwargs, )` to solve the following problem: Loads the tokenizer configuration from a pretrained model tokenizer configuration. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Dict`: The configuration of the tokenizer. Here is the function: def get_tokenizer_config( pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, **kwargs, ): """ Loads the tokenizer configuration from a pretrained model tokenizer configuration. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Dict`: The configuration of the tokenizer. """ if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE) else: raise RuntimeError(f"Only support local files.") try: # Load from URL or cache if already cached resolved_config_file = cached_path( config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, ) except EnvironmentError: logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.") return {} with open(resolved_config_file, encoding="utf-8") as reader: return json.load(reader)
Loads the tokenizer configuration from a pretrained model tokenizer configuration. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Dict`: The configuration of the tokenizer.
18,546
import importlib from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ..feature_extraction_utils import FeatureExtractionMixin from ..file_utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_list_of_files from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, config_class_to_model_type, model_type_to_module_name, replace_list_option_in_docstrings, ) PROCESSOR_MAPPING_NAMES = OrderedDict( [ ] ) def model_type_to_module_name(key): def processor_class_from_name(class_name: str): for module_name, processors in PROCESSOR_MAPPING_NAMES.items(): if class_name in processors: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f".{module_name}", "sofa.models") return getattr(module, class_name) break return None
null
18,547
import contextlib import json import math import os import warnings from dataclasses import asdict, dataclass, field from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional from .debug_utils import DebugOption from .file_utils import ( cached_property, get_full_repo_name, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, is_torch_bf16_available, is_torch_tf32_available, is_torch_tpu_available, torch_required, ) from .trainer_utils import EvaluationStrategy, HubStrategy, IntervalStrategy, SchedulerType, ShardedDDPOption from .utils import logging The provided code snippet includes necessary dependencies for implementing the `default_logdir` function. Write a Python function `def default_logdir() -> str` to solve the following problem: Same default as PyTorch Here is the function: def default_logdir() -> str: """ Same default as PyTorch """ import socket from datetime import datetime current_time = datetime.now().strftime("%b%d_%H-%M-%S") return os.path.join("runs", current_time + "_" + socket.gethostname())
Same default as PyTorch
18,548
import copy import json import os import re import warnings from collections import OrderedDict, UserDict from contextlib import contextmanager from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union import numpy as np from packaging import version import requests from .file_utils import ( ExplicitEnum, PaddingStrategy, PushToHubMixin, TensorType, _is_jax, _is_numpy, _is_tensorflow, _is_torch, _is_torch_device, add_end_docstrings, cached_path, copy_func, get_list_of_files, is_flax_available, is_offline_mode, is_remote_url, is_tf_available, is_tokenizers_available, is_torch_available, to_py_obj, torch_required, ) from .utils import logging FULL_TOKENIZER_FILE = "tokenizer.json" def get_list_of_files( path_or_repo: Union[str, os.PathLike], *args, **kwargs ) -> List[str]: """ Gets the list of files inside `path_or_repo`. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision (`str`, *optional*, defaults to `"main"`): This feature is deprecated. use_auth_token (`str` or *bool*, *optional*): This feature is deprecated. local_files_only (`bool`, *optional*, defaults to `False`): This feature is deprecated. Returns: `List[str]`: The list of files available in `path_or_repo`. """ path_or_repo = str(path_or_repo) # If path_or_repo is a folder, we just return what is inside (subdirectories included). if os.path.isdir(path_or_repo): list_of_files = [] for path, dir_names, file_names in os.walk(path_or_repo): list_of_files.extend([os.path.join(path, f) for f in file_names]) return list_of_files raise RuntimeError(f"Only local dir is supported.") The provided code snippet includes necessary dependencies for implementing the `get_fast_tokenizer_file` function. Write a Python function `def get_fast_tokenizer_file( path_or_repo: Union[str, os.PathLike], revision: Optional[str] = None, use_auth_token: Optional[Union[bool, str]] = None, local_files_only: bool = False, ) -> str` to solve the following problem: Get the tokenizer file to use for this version of transformers. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision(`str`, *optional*, defaults to `"main"`): This feature is deprecated. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. Returns: `str`: The tokenizer file to use. Here is the function: def get_fast_tokenizer_file( path_or_repo: Union[str, os.PathLike], revision: Optional[str] = None, use_auth_token: Optional[Union[bool, str]] = None, local_files_only: bool = False, ) -> str: """ Get the tokenizer file to use for this version of transformers. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision(`str`, *optional*, defaults to `"main"`): This feature is deprecated. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. Returns: `str`: The tokenizer file to use. """ # Inspect all files from the repo/folder. all_files = get_list_of_files( path_or_repo, revision=revision, use_auth_token=use_auth_token, local_files_only=local_files_only ) # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions. tokenizer_file = FULL_TOKENIZER_FILE return tokenizer_file
Get the tokenizer file to use for this version of transformers. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision(`str`, *optional*, defaults to `"main"`): This feature is deprecated. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. Returns: `str`: The tokenizer file to use.
18,549
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core deps = { "Pillow": "Pillow", "black": "black==21.4b0", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.2", "dataclasses": "dataclasses", "datasets": "datasets", "deepspeed": "deepspeed>=0.5.7", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flake8": "flake8>=3.8.3", "flax": "flax>=0.3.5", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8", "jaxlib": "jaxlib>=0.1.65", "jieba": "jieba", "keras2onnx": "keras2onnx", "nltk": "nltk", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "optuna": "optuna", "optax": "optax>=0.0.8", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.6.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rouge-score": "rouge-score", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "librosa": "librosa", "starlette": "starlette", "tensorflow-cpu": "tensorflow-cpu>=2.3", "tensorflow": "tensorflow>=2.3", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.10.1", "torch": "torch>=1.0", "torchaudio": "torchaudio", "pyctcdecode": "pyctcdecode>=0.2.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "uvicorn": "uvicorn", } def require_version(requirement: str, hint: Optional[str] = None) -> None: """ Perform a runtime check of the dependency versions, using the exact same syntax used by pip. The installed module version comes from the *site-packages* dir via *importlib_metadata*. Args: requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" hint (`str`, *optional*): what suggestion to print in case of requirements not being met Example: ```python require_version("pandas>1.1.2") require_version("numpy>1.18.5", "this is important to have for whatever reason") ```""" hint = f"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) if not match: raise ValueError( f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}" ) pkg, want_full = match[0] want_range = want_full.split(",") # there could be multiple requirements wanted = {} for w in want_range: match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) if not match: raise ValueError( f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}" ) op, want_ver = match[0] wanted[op] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") # special case if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) return # check if any version is installed try: got_ver = importlib_metadata.version(pkg) except importlib_metadata.PackageNotFoundError: raise importlib_metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) def dep_version_check(pkg, hint=None): require_version(deps[pkg], hint)
null
18,550
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available The provided code snippet includes necessary dependencies for implementing the `set_seed` function. Write a Python function `def set_seed(seed: int)` to solve the following problem: Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). Args: seed (`int`): The seed to set. Here is the function: def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). Args: seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_tf_available(): tf.random.set_seed(seed)
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). Args: seed (`int`): The seed to set.
18,551
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
null
18,552
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) def speed_metrics(split, start_time, num_samples=None, num_steps=None): """ Measure and return speed performance metrics. This function requires a time snapshot `start_time` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed """ runtime = time.time() - start_time result = {f"{split}_runtime": round(runtime, 4)} if num_samples is not None: samples_per_second = num_samples / runtime result[f"{split}_samples_per_second"] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f"{split}_steps_per_second"] = round(steps_per_second, 3) return result The provided code snippet includes necessary dependencies for implementing the `default_compute_objective` function. Write a Python function `def default_compute_objective(metrics: Dict[str, float]) -> float` to solve the following problem: The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`Dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize Here is the function: def default_compute_objective(metrics: Dict[str, float]) -> float: """ The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`Dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize """ metrics = copy.deepcopy(metrics) loss = metrics.pop("eval_loss", None) _ = metrics.pop("epoch", None) # Remove speed metrics speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")] for sm in speed_metrics: _ = metrics.pop(sm, None) return loss if len(metrics) == 0 else sum(metrics.values())
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`Dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize
18,553
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) def is_optuna_available(): return importlib.util.find_spec("optuna") is not None def default_hp_space_optuna(trial) -> Dict[str, float]: from .integrations import is_optuna_available assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" return { "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), "seed": trial.suggest_int("seed", 1, 40), "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), }
null
18,554
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) def is_ray_tune_available(): if not is_ray_available(): return False return importlib.util.find_spec("ray.tune") is not None def default_hp_space_ray(trial) -> Dict[str, float]: from .integrations import is_ray_tune_available assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`" from ray import tune return { "learning_rate": tune.loguniform(1e-6, 1e-4), "num_train_epochs": tune.choice(list(range(1, 6))), "seed": tune.uniform(1, 40), "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), }
null
18,555
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) def default_hp_space_sigopt(trial): return [ {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"}, {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, { "categorical_values": ["4", "8", "16", "32", "64"], "name": "per_device_train_batch_size", "type": "categorical", }, ]
null
18,556
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) def is_torch_tpu_available(): if not _torch_available: return False # This test is probably enough, but just in case, we unpack a bit. if importlib.util.find_spec("torch_xla") is None: return False if importlib.util.find_spec("torch_xla.core") is None: return False return importlib.util.find_spec("torch_xla.core.xla_model") is not None The provided code snippet includes necessary dependencies for implementing the `is_main_process` function. Write a Python function `def is_main_process(local_rank)` to solve the following problem: Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on `local_rank`. Here is the function: def is_main_process(local_rank): """ Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on `local_rank`. """ if is_torch_tpu_available(): import torch_xla.core.xla_model as xm return xm.get_ordinal() == 0 return local_rank in [-1, 0]
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on `local_rank`.
18,557
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) if is_torch_available(): import torch def is_torch_available(): return _torch_available def is_torch_tpu_available(): if not _torch_available: return False # This test is probably enough, but just in case, we unpack a bit. if importlib.util.find_spec("torch_xla") is None: return False if importlib.util.find_spec("torch_xla.core") is None: return False return importlib.util.find_spec("torch_xla.core.xla_model") is not None def is_sagemaker_dp_enabled(): # Get the sagemaker specific env variable. sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". sagemaker_params = json.loads(sagemaker_params) if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None The provided code snippet includes necessary dependencies for implementing the `total_processes_number` function. Write a Python function `def total_processes_number(local_rank)` to solve the following problem: Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. Here is the function: def total_processes_number(local_rank): """ Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. """ if is_torch_tpu_available(): import torch_xla.core.xla_model as xm return xm.xrt_world_size() elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.distributed as dist return dist.get_world_size() elif local_rank != -1 and is_torch_available(): import torch return torch.distributed.get_world_size() return 1
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
18,558
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) if is_torch_available(): import torch def is_torch_available(): return _torch_available The provided code snippet includes necessary dependencies for implementing the `denumpify_detensorize` function. Write a Python function `def denumpify_detensorize(metrics)` to solve the following problem: Recursively calls `.item()` on the element of the dictionary passed Here is the function: def denumpify_detensorize(metrics): """ Recursively calls `.item()` on the element of the dictionary passed """ if isinstance(metrics, (list, tuple)): return type(metrics)(denumpify_detensorize(m) for m in metrics) elif isinstance(metrics, dict): return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) elif isinstance(metrics, np.generic): return metrics.item() elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: return metrics.item() return metrics
Recursively calls `.item()` on the element of the dictionary passed
18,559
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import ( ExplicitEnum, is_psutil_available, is_sagemaker_dp_enabled, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, ) The provided code snippet includes necessary dependencies for implementing the `number_of_arguments` function. Write a Python function `def number_of_arguments(func)` to solve the following problem: Return the number of arguments of the passed function, even if it's a partial function. Here is the function: def number_of_arguments(func): """ Return the number of arguments of the passed function, even if it's a partial function. """ if isinstance(func, functools.partial): total_args = len(inspect.signature(func.func).parameters) return total_args - len(func.args) - len(func.keywords) return len(inspect.signature(func).parameters)
Return the number of arguments of the passed function, even if it's a partial function.
18,560
import random import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union from .file_utils import PaddingStrategy from .tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase InputDataClass = NewType("InputDataClass", Any) def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]: import torch if not isinstance(features[0], (dict, BatchEncoding)): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"] dtype = torch.long if isinstance(label, int) else torch.float batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype) elif "label_ids" in first and first["label_ids"] is not None: if isinstance(first["label_ids"], torch.Tensor): batch["labels"] = torch.stack([f["label_ids"] for f in features]) else: dtype = torch.long if type(first["label_ids"][0]) is int else torch.float batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([f[k] for f in features]) else: batch[k] = torch.tensor([f[k] for f in features]) return batch def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]: import numpy as np import tensorflow as tf if not isinstance(features[0], (dict, BatchEncoding)): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label_col_name = "label" elif "label_ids" in first and first["label_ids"] is not None: label_col_name = "label_ids" elif "labels" in first and first["labels"] is not None: label_col_name = "labels" else: label_col_name = None if label_col_name is not None: if isinstance(first[label_col_name], tf.Tensor): dtype = tf.int64 if first[label_col_name].dtype.is_integer() else tf.float32 elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic): dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32 elif isinstance(first[label_col_name], (tuple, list)): dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32 else: dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32 batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. for k, v in first.items(): if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str): if isinstance(v, (tf.Tensor, np.ndarray)): batch[k] = tf.stack([f[k] for f in features]) else: batch[k] = tf.convert_to_tensor([f[k] for f in features]) return batch def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]: import numpy as np if not isinstance(features[0], (dict, BatchEncoding)): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"] dtype = np.int64 if isinstance(label, int) else np.float32 batch["labels"] = np.array([f["label"] for f in features], dtype=dtype) elif "label_ids" in first and first["label_ids"] is not None: if isinstance(first["label_ids"], np.ndarray): batch["labels"] = np.stack([f["label_ids"] for f in features]) else: dtype = np.int64 if type(first["label_ids"][0]) is int else np.float32 batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, np.ndarray): batch[k] = np.stack([f[k] for f in features]) else: batch[k] = np.array([f[k] for f in features]) return batch The provided code snippet includes necessary dependencies for implementing the `default_data_collator` function. Write a Python function `def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]` to solve the following problem: Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named: - `label`: handles a single value (int or float) per object - `label_ids`: handles a list of values per object Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it's useful. Here is the function: def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]: """ Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named: - `label`: handles a single value (int or float) per object - `label_ids`: handles a list of values per object Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it's useful. """ # In this function we'll make the assumption that all `features` in the batch # have the same attributes. # So we will look at the first element as a proxy for what attributes exist # on the whole batch. if return_tensors == "pt": return torch_default_data_collator(features) elif return_tensors == "tf": return tf_default_data_collator(features) elif return_tensors == "np": return numpy_default_data_collator(features)
Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named: - `label`: handles a single value (int or float) per object - `label_ids`: handles a list of values per object Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it's useful.
18,561
import random import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union from .file_utils import PaddingStrategy from .tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase The provided code snippet includes necessary dependencies for implementing the `_torch_collate_batch` function. Write a Python function `def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None)` to solve the following problem: Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary. Here is the function: def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None): """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.""" import numpy as np import torch # Tensorize if necessary. if isinstance(examples[0], (list, tuple, np.ndarray)): examples = [torch.tensor(e, dtype=torch.long) for e in examples] length_of_first = examples[0].size(0) # Check if padding is necessary. are_tensors_same_length = all(x.size(0) == length_of_first for x in examples) if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0): return torch.stack(examples, dim=0) # If yes, check if we have a `pad_token`. if tokenizer._pad_token is None: raise ValueError( "You are attempting to pad samples but the tokenizer you are using" f" ({tokenizer.__class__.__name__}) does not have a pad token." ) # Creating the full tensor and filling it with our data. max_length = max(x.size(0) for x in examples) if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id) for i, example in enumerate(examples): if tokenizer.padding_side == "right": result[i, : example.shape[0]] = example else: result[i, -example.shape[0] :] = example return result
Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.
18,562
import random import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union from .file_utils import PaddingStrategy from .tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase The provided code snippet includes necessary dependencies for implementing the `_tf_collate_batch` function. Write a Python function `def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None)` to solve the following problem: Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary. Here is the function: def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None): import numpy as np import tensorflow as tf """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.""" # Tensorize if necessary. if isinstance(examples[0], (list, tuple)): examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples] # Check if padding is necessary. length_of_first = len(examples[0]) are_tensors_same_length = all(len(x) == length_of_first for x in examples) if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0): return tf.stack(examples, axis=0) # If yes, check if we have a `pad_token`. if tokenizer._pad_token is None: raise ValueError( "You are attempting to pad samples but the tokenizer you are using" f" ({tokenizer.__class__.__name__}) does not have a pad token." ) # Creating the full tensor and filling it with our data. max_length = max(len(x) for x in examples) if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of # result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id) result = [] rank = tf.rank(examples[0]) paddings = np.zeros((rank, 2), dtype=np.int32) for example in examples: if tokenizer.padding_side == "right": paddings[0, 1] = max_length - len(example) else: paddings[0, 0] = max_length - len(example) result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id)) return tf.stack(result, axis=0)
Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.
18,563
import random import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union from .file_utils import PaddingStrategy from .tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase The provided code snippet includes necessary dependencies for implementing the `_numpy_collate_batch` function. Write a Python function `def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None)` to solve the following problem: Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary. Here is the function: def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None): import numpy as np """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.""" # Tensorize if necessary. if isinstance(examples[0], (list, tuple)): examples = [np.array(e, dtype=np.int64) for e in examples] # Check if padding is necessary. length_of_first = len(examples[0]) are_tensors_same_length = all(len(x) == length_of_first for x in examples) if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0): return np.stack(examples, axis=0) # If yes, check if we have a `pad_token`. if tokenizer._pad_token is None: raise ValueError( "You are attempting to pad samples but the tokenizer you are using" f" ({tokenizer.__class__.__name__}) does not have a pad token." ) # Creating the full tensor and filling it with our data. max_length = max(len(x) for x in examples) if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype) for i, example in enumerate(examples): if tokenizer.padding_side == "right": result[i, : example.shape[0]] = example else: result[i, -example.shape[0] :] = example return result
Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.
18,564
import random import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union from .file_utils import PaddingStrategy from .tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase def tolist(x): if isinstance(x, list): return x elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import x = x.numpy() return x.tolist()
null
18,565
import inspect import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, is_offline_mode, is_remote_url, replace_return_docstrings, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core _init_weights = True The provided code snippet includes necessary dependencies for implementing the `no_init_weights` function. Write a Python function `def no_init_weights(_enable=True)` to solve the following problem: Context manager to globally disable weight initialization to speed up loading large models. TODO(Patrick): Delete safety argument `_enable=True` at next major version. . Here is the function: def no_init_weights(_enable=True): """ Context manager to globally disable weight initialization to speed up loading large models. TODO(Patrick): Delete safety argument `_enable=True` at next major version. . """ global _init_weights if _enable: _init_weights = False try: yield finally: _init_weights = True
Context manager to globally disable weight initialization to speed up loading large models. TODO(Patrick): Delete safety argument `_enable=True` at next major version. .
18,566
import inspect import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, is_offline_mode, is_remote_url, replace_return_docstrings, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: The provided code snippet includes necessary dependencies for implementing the `find_pruneable_heads_and_indices` function. Write a Python function `def find_pruneable_heads_and_indices( heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] ) -> Tuple[Set[int], torch.LongTensor]` to solve the following problem: Finds the heads and their indices taking `already_pruned_heads` into account. Args: heads (`List[int]`): List of the indices of heads to prune. n_heads (`int`): The number of heads in the model. head_size (`int`): The size of each head. already_pruned_heads (`Set[int]`): A set of already pruned heads. Returns: `Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. Here is the function: def find_pruneable_heads_and_indices( heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] ) -> Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking `already_pruned_heads` into account. Args: heads (`List[int]`): List of the indices of heads to prune. n_heads (`int`): The number of heads in the model. head_size (`int`): The size of each head. already_pruned_heads (`Set[int]`): A set of already pruned heads. Returns: `Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: torch.LongTensor = torch.arange(len(mask))[mask].long() return heads, index
Finds the heads and their indices taking `already_pruned_heads` into account. Args: heads (`List[int]`): List of the indices of heads to prune. n_heads (`int`): The number of heads in the model. head_size (`int`): The size of each head. already_pruned_heads (`Set[int]`): A set of already pruned heads. Returns: `Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
18,567
import inspect import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, is_offline_mode, is_remote_url, replace_return_docstrings, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: class GenerationMixin: """ A class containing all of the functions supporting generation, to be used as a mixin in [`PreTrainedModel`]. """ def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[str]]: """ This function extracts the model-specific `inputs` for generation. """ # filter model input names that are `None` model_kwargs = {k: v for k, v in model_kwargs.items() if k not in ENCODER_MODEL_INPUT_NAMES or v is not None} # extract keyword arguments that are model input specific model_input_kwarg_names = set(ENCODER_MODEL_INPUT_NAMES) & set(model_kwargs.keys()) # There are 5 possible scenarios if inputs is not None and len(model_input_kwarg_names) == 0: # 1. `inputs` are passed and no model-specific keyword inputs # -> return input model_input_name = None return inputs, model_input_name, model_kwargs elif inputs is not None and len(model_input_kwarg_names) > 0: # 2. `inputs` are passed as well as model-specific keyword inputs # -> not allowed, raise Error raise ValueError( f"`inputs`: {inputs}` were passed alongside " f"{model_input_kwarg_names} which is not allowed." f"Make sure to not pass any of {model_input_kwarg_names} " "when `inputs` is defined." ) elif inputs is None and len(model_input_kwarg_names) == 0: # 3. no `inputs` and no model-specific keyword inputs are passed # -> try to create `input_ids` from BOS input_tensor = self._prepare_input_ids_for_generation(bos_token_id, model_kwargs.get("encoder_outputs")) return input_tensor, "input_ids", model_kwargs elif inputs is None and len(model_input_kwarg_names) == 1: # 4. no `inputs` are passed and exactly one model-specific keyword input # -> return that model-specific keyword input tensor model_input_name = model_input_kwarg_names.pop() input_tensor = model_kwargs.pop(model_input_name) # make sure model is encoder decoder if not `input_ids` if not self.config.is_encoder_decoder and model_input_name != "input_ids": raise ValueError( f"If {model_input_name} is passed as model-specific keyword " "input then model has to be an encoder-decoder and not a " f"{self.__class__.__name__}." ) return input_tensor, model_input_name, model_kwargs else: # 5. no `inputs` are passed and multiple model-specific keyword inputs # -> not allowed, raise Error raise ValueError( f"Can only pass one of {ENCODER_MODEL_INPUT_NAMES}, " f"but passed {model_input_kwarg_names}." f"Make sure to only pass one of {model_input_kwarg_names}." ) def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to prepare inputs in the generate method. """ return {"input_ids": input_ids} def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. """ return logits def _prepare_input_ids_for_generation( self, bos_token_id: Optional[int], encoder_outputs: Optional[ModelOutput] ) -> torch.LongTensor: if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id def _prepare_attention_mask_for_generation( self, inputs: torch.Tensor, pad_token_id: int, eos_token_id: int, ) -> torch.LongTensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or ( (eos_token_id is not None) and (pad_token_id != eos_token_id) ) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return inputs.ne(pad_token_id).long() else: return torch.ones(inputs.shape[:2], dtype=torch.long, device=self.device) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: if "encoder_outputs" not in model_kwargs: # 1. get encoder encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs encoder_args = (inputs_tensor,) irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } # 3. make sure that encoder returns `ModelOutput` encoder_kwargs["return_dict"] = True # 4. if model_input_name is not defined then pass input_tensor as # first input argument and remove from args if model_input_name is not None: # make sure inputs_tensor is None in case model # accepts multiple model input arguments encoder_kwargs[model_input_name] = inputs_tensor encoder_args = () model_kwargs["encoder_outputs"]: ModelOutput = encoder(*encoder_args, **encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, decoder_start_token_id: int = None, bos_token_id: int = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.LongTensor: if model_kwargs is not None and "decoder_input_ids" in model_kwargs: return model_kwargs.pop("decoder_input_ids") else: decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * decoder_start_token_id def _get_pad_token_id(self, pad_token_id: int = None, eos_token_id: int = None) -> int: if pad_token_id is None and eos_token_id is not None: logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id return pad_token_id def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) def _expand_inputs_for_generation( input_ids: torch.LongTensor, expand_size: int = 1, is_encoder_decoder: bool = False, attention_mask: torch.LongTensor = None, encoder_outputs: ModelOutput = None, **model_kwargs, ) -> Tuple[torch.LongTensor, Dict[str, Any]]: expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) if is_encoder_decoder: if encoder_outputs is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device) ) model_kwargs["encoder_outputs"] = encoder_outputs return input_ids, model_kwargs def _update_model_kwargs_for_generation( outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past if "past_key_values" in outputs: model_kwargs["past"] = outputs.past_key_values elif "mems" in outputs: model_kwargs["past"] = outputs.mems elif "past_buckets_states" in outputs: model_kwargs["past"] = outputs.past_buckets_states else: model_kwargs["past"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) return model_kwargs def _reorder_cache(self, past, beam_idx): raise NotImplementedError( f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to enable beam search for {self.__class__}" ) def _get_logits_warper( self, top_k: int = None, top_p: float = None, temperature: float = None, num_beams: int = None ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances used for multinomial sampling. """ # init warp parameters top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p temperature = temperature if temperature is not None else self.config.temperature # instantiate warpers list warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if temperature is not None and temperature != 1.0: warpers.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) return warpers def _get_logits_processor( self, repetition_penalty: float, no_repeat_ngram_size: int, encoder_no_repeat_ngram_size: int, encoder_input_ids: torch.LongTensor, bad_words_ids: List[List[int]], min_length: int, max_length: int, eos_token_id: int, forced_bos_token_id: int, forced_eos_token_id: int, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int, num_beam_groups: int, diversity_penalty: float, remove_invalid_values: bool, logits_processor: Optional[LogitsProcessorList], ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] instances used to modify the scores of the language model head. """ processors = LogitsProcessorList() # init warp parameters repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) encoder_no_repeat_ngram_size = ( encoder_no_repeat_ngram_size if encoder_no_repeat_ngram_size is not None else self.config.encoder_no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids min_length = min_length if min_length is not None else self.config.min_length eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty forced_bos_token_id = ( forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id ) forced_eos_token_id = ( forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id ) remove_invalid_values = ( remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values ) # instantiate processors list # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if diversity_penalty is not None and diversity_penalty > 0.0: processors.append( HammingDiversityLogitsProcessor( diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups ) ) if repetition_penalty is not None and repetition_penalty != 1.0: processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0: processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size)) if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0: if self.config.is_encoder_decoder: processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids)) else: raise ValueError( "It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture" ) if bad_words_ids is not None: processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id)) if min_length is not None and eos_token_id is not None and min_length > -1: processors.append(MinLengthLogitsProcessor(min_length, eos_token_id)) if prefix_allowed_tokens_fn is not None: processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams // num_beam_groups)) if forced_bos_token_id is not None: processors.append(ForcedBOSTokenLogitsProcessor(forced_bos_token_id)) if forced_eos_token_id is not None: processors.append(ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)) if remove_invalid_values is True: processors.append(InfNanRemoveLogitsProcessor()) processors = self._merge_criteria_processor_list(processors, logits_processor) return processors def _get_stopping_criteria( self, max_length: Optional[int], max_time: Optional[float], stopping_criteria: Optional[StoppingCriteriaList] ) -> StoppingCriteriaList: criteria = StoppingCriteriaList() if max_length is not None: criteria.append(MaxLengthCriteria(max_length=max_length)) if max_time is not None: criteria.append(MaxTimeCriteria(max_time=max_time)) criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) return criteria def _merge_criteria_processor_list( self, default_list: Union[LogitsProcessorList, StoppingCriteriaList], custom_list: Union[LogitsProcessorList, StoppingCriteriaList], ) -> Union[LogitsProcessorList, StoppingCriteriaList]: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to `generate`, " f"but it has already been created with the values {default}. {default} has been created by passing the " "corresponding arguments to generate or by the model's config default values. " f"If you just want to change the default values of {object_type} consider passing them as arguments " f"to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def generate( self, inputs: Optional[torch.Tensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, encoder_no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, max_time: Optional[float] = None, max_new_tokens: Optional[int] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, num_beam_groups: Optional[int] = None, diversity_penalty: Optional[float] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(), stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(), output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, forced_bos_token_id: Optional[int] = None, forced_eos_token_id: Optional[int] = None, remove_invalid_values: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, multinomial sampling, beam-search decoding, and beam-search multinomial sampling. Apart from `inputs`, all the arguments below will default to the value of the attribute of the same name inside the [`PretrainedConfig`] of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in [this blog post](https://huggingface.co/blog/how-to-generate). Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length)`, `(batch_size, sequence_length, feature_dim)` or `(batch_size, num_channels, height, width)`, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. max_length (`int`, *optional*, defaults to `model.config.max_length`): The maximum length of the sequence to be generated. max_new_tokens (`int`, *optional*, defaults to None): The maximum numbers of tokens to generate, ignore the current number of tokens. Use either `max_new_tokens` or `max_length` but not both, they serve the same purpose. min_length (`int`, *optional*, defaults to 10): The minimum length of the sequence to be generated. do_sample (`bool`, *optional*, defaults to `False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (`float`, *optional*, defaults to 1.0): The value used to module the next token probabilities. top_k (`int`, *optional*, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`, *optional*, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. repetition_penalty (`float`, *optional*, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. pad_token_id (`int`, *optional*): The id of the *padding* token. bos_token_id (`int`, *optional*): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. bad_words_ids(`List[List[int]]`, *optional*): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer(bad_word, add_prefix_space=True).input_ids`. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. max_time(`float`, *optional*, defaults to None): The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values are in `[0, 1]`, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as `input_ids` that masks the pad token. [What are attention masks?](../glossary#attention-mask) decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. use_cache: (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. num_beam_groups (`int`, *optional*, defaults to 1): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. diversity_penalty (`float`, *optional*, defaults to 0.0): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. prefix_allowed_tokens_fn: (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. forced_bos_token_id (`int`, *optional*): The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target language token. forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. remove_invalid_values (`bool`, *optional*): Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~file_utils.ModelOutput`] or `torch.LongTensor`: A [`~file_utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~file_utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchDecoderOnlyOutput`], - [`~generation_utils.SampleDecoderOnlyOutput`], - [`~generation_utils.BeamSearchDecoderOnlyOutput`], - [`~generation_utils.BeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~file_utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchEncoderDecoderOutput`], - [`~generation_utils.SampleEncoderDecoderOutput`], - [`~generation_utils.BeamSearchEncoderDecoderOutput`], - [`~generation_utils.BeamSampleEncoderDecoderOutput`] """ # 1. Set generation parameters if not already defined bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id num_beams = num_beams if num_beams is not None else self.config.num_beams length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups do_sample = do_sample if do_sample is not None else self.config.do_sample num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) if pad_token_id is None and eos_token_id is not None: # special case if pad_token_id is not defined logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id # 2. Define model inputs # inputs_tensor has to be defined # model_input_name is defined if model-specific keyword input is passed # otherwise model_input_name is None # all model-specific keyword inputs are removed from `model_kwargs` inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, bos_token_id, model_kwargs) batch_size = inputs_tensor.shape[0] # 3. Define other model kwargs model_kwargs["output_attentions"] = output_attentions model_kwargs["output_hidden_states"] = output_hidden_states model_kwargs["use_cache"] = use_cache if model_kwargs.get("attention_mask", None) is None: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, pad_token_id, eos_token_id ) if self.config.is_encoder_decoder: # if model is encoder decoder encoder_outputs are created # and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 4. Prepare `input_ids` which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids = self._prepare_decoder_input_ids_for_generation( batch_size, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, model_kwargs=model_kwargs, ) else: # if decoder-only then inputs_tensor has to be `input_ids` input_ids = inputs_tensor # 5. Prepare `max_length` depending on other stopping criteria # if `max_new_tokens` is passed, but not `max_length` -> set `max_length = max_new_tokens` if max_length is None and max_new_tokens is not None: max_length = max_new_tokens + input_ids.shape[-1] elif max_length is not None and max_new_tokens is not None: # Both are set, this is odd, raise a warning warnings.warn( "Both `max_length` and `max_new_tokens` have been set " f"but they serve the same purpose. `max_length` {max_length} " f"will take priority over `max_new_tokens` {max_new_tokens}.", UserWarning, ) # default to config if still None max_length = max_length if max_length is not None else self.config.max_length if input_ids.shape[-1] >= max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids.shape[-1]}, but ``max_length`` is set to {max_length}. " "This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``." ) # 6. determine generation mode is_greedy_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is False is_sample_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is True is_beam_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is False is_beam_sample_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is True is_group_beam_gen_mode = (num_beams > 1) and (num_beam_groups > 1) if num_beam_groups > num_beams: raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`") if is_group_beam_gen_mode and do_sample is True: raise ValueError( "Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`." ) # 7. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, encoder_input_ids=inputs_tensor, bad_words_ids=bad_words_ids, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, num_beams=num_beams, num_beam_groups=num_beam_groups, diversity_penalty=diversity_penalty, remove_invalid_values=remove_invalid_values, logits_processor=logits_processor, ) # 8. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria ) # 9. go into different generation modes if is_greedy_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search." ) # 10. run greedy search return self.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams ) # 11. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 12. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams ) if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 11. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size * num_return_sequences, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams * num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam sample return self.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_group_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if num_beams % num_beam_groups != 0: raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, max_length=stopping_criteria.max_length, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) def greedy_search( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[GreedySearchOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using greedy decoding. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.GreedySearchDecoderOnlyOutput`], [`~generation_utils.GreedySearchEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.GreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) cur_len = input_ids.shape[-1] this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_logits,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # pre-process distribution next_tokens_scores = logits_processor(input_ids, next_token_logits) # argmax next_tokens = torch.argmax(next_tokens_scores, dim=-1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) cur_len = cur_len + 1 # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def sample( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[SampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using multinomial sampling. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.SampleDecoderOnlyOutput`], [`~generation_utils.SampleEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.SampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) cur_len = input_ids.shape[-1] this_peer_finished = False # used by synced_gpus only # auto-regressive generation while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_token_scores = logits_processor(input_ids, next_token_logits) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # sample probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) cur_len = cur_len + 1 # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return SampleEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return SampleDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using beam search decoding. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = (next_tokens / vocab_size).long() next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def beam_sample( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using beam search with multinomial sampling. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSampleDecoderOnlyOutput`], [`~generation_utils.BeamSampleEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) next_token_scores = torch.gather(next_token_scores, -1, next_tokens) next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, _indices) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSampleEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSampleDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def group_beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ): r""" Generates sequences for models with a language modeling head using beam search decoding. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs that will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams num_beam_groups = beam_scorer.num_beam_groups num_sub_beams = num_beams // num_beam_groups device = input_ids.device batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) # initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in # the same group don't produce same tokens everytime. beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # predicted tokens in cur_len step current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) # indices which will form the beams in the next time step reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) # do one decoder step on all beams of all sentences in batch model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need if output_scores: processed_score = torch.zeros_like(outputs.logits[:, -1, :]) for beam_group_idx in range(num_beam_groups): group_start_idx = beam_group_idx * num_sub_beams group_end_idx = min(group_start_idx + num_sub_beams, num_beams) group_size = group_end_idx - group_start_idx # indices of beams of current group among all sentences in batch batch_group_indices = [] for batch_idx in range(batch_size): batch_group_indices.extend( [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] ) group_input_ids = input_ids[batch_group_indices] # select outputs of beams of current group only next_token_logits = outputs.logits[batch_group_indices, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * group_size, vocab_size) vocab_size = next_token_scores.shape[-1] next_token_scores = logits_processor( group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx ) next_token_scores = next_token_scores + beam_scores[batch_group_indices].unsqueeze(-1).expand_as( next_token_scores ) if output_scores: processed_score[batch_group_indices] = next_token_scores # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( group_input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids[batch_group_indices] = group_input_ids[beam_idx] group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) current_tokens[batch_group_indices] = group_input_ids[:, -1] # (beam_idx // group_size) -> batch_idx # (beam_idx % group_size) -> offset of idx inside the group reordering_indices[batch_group_indices] = ( num_beams * (beam_idx // group_size) + group_start_idx + (beam_idx % group_size) ) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (processed_score,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): try: return next(parameter.parameters()).device except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device
null
18,568
import inspect import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, is_offline_mode, is_remote_url, replace_return_docstrings, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: class GenerationMixin: """ A class containing all of the functions supporting generation, to be used as a mixin in [`PreTrainedModel`]. """ def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[str]]: """ This function extracts the model-specific `inputs` for generation. """ # filter model input names that are `None` model_kwargs = {k: v for k, v in model_kwargs.items() if k not in ENCODER_MODEL_INPUT_NAMES or v is not None} # extract keyword arguments that are model input specific model_input_kwarg_names = set(ENCODER_MODEL_INPUT_NAMES) & set(model_kwargs.keys()) # There are 5 possible scenarios if inputs is not None and len(model_input_kwarg_names) == 0: # 1. `inputs` are passed and no model-specific keyword inputs # -> return input model_input_name = None return inputs, model_input_name, model_kwargs elif inputs is not None and len(model_input_kwarg_names) > 0: # 2. `inputs` are passed as well as model-specific keyword inputs # -> not allowed, raise Error raise ValueError( f"`inputs`: {inputs}` were passed alongside " f"{model_input_kwarg_names} which is not allowed." f"Make sure to not pass any of {model_input_kwarg_names} " "when `inputs` is defined." ) elif inputs is None and len(model_input_kwarg_names) == 0: # 3. no `inputs` and no model-specific keyword inputs are passed # -> try to create `input_ids` from BOS input_tensor = self._prepare_input_ids_for_generation(bos_token_id, model_kwargs.get("encoder_outputs")) return input_tensor, "input_ids", model_kwargs elif inputs is None and len(model_input_kwarg_names) == 1: # 4. no `inputs` are passed and exactly one model-specific keyword input # -> return that model-specific keyword input tensor model_input_name = model_input_kwarg_names.pop() input_tensor = model_kwargs.pop(model_input_name) # make sure model is encoder decoder if not `input_ids` if not self.config.is_encoder_decoder and model_input_name != "input_ids": raise ValueError( f"If {model_input_name} is passed as model-specific keyword " "input then model has to be an encoder-decoder and not a " f"{self.__class__.__name__}." ) return input_tensor, model_input_name, model_kwargs else: # 5. no `inputs` are passed and multiple model-specific keyword inputs # -> not allowed, raise Error raise ValueError( f"Can only pass one of {ENCODER_MODEL_INPUT_NAMES}, " f"but passed {model_input_kwarg_names}." f"Make sure to only pass one of {model_input_kwarg_names}." ) def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to prepare inputs in the generate method. """ return {"input_ids": input_ids} def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. """ return logits def _prepare_input_ids_for_generation( self, bos_token_id: Optional[int], encoder_outputs: Optional[ModelOutput] ) -> torch.LongTensor: if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id def _prepare_attention_mask_for_generation( self, inputs: torch.Tensor, pad_token_id: int, eos_token_id: int, ) -> torch.LongTensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or ( (eos_token_id is not None) and (pad_token_id != eos_token_id) ) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return inputs.ne(pad_token_id).long() else: return torch.ones(inputs.shape[:2], dtype=torch.long, device=self.device) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: if "encoder_outputs" not in model_kwargs: # 1. get encoder encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs encoder_args = (inputs_tensor,) irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } # 3. make sure that encoder returns `ModelOutput` encoder_kwargs["return_dict"] = True # 4. if model_input_name is not defined then pass input_tensor as # first input argument and remove from args if model_input_name is not None: # make sure inputs_tensor is None in case model # accepts multiple model input arguments encoder_kwargs[model_input_name] = inputs_tensor encoder_args = () model_kwargs["encoder_outputs"]: ModelOutput = encoder(*encoder_args, **encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, decoder_start_token_id: int = None, bos_token_id: int = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.LongTensor: if model_kwargs is not None and "decoder_input_ids" in model_kwargs: return model_kwargs.pop("decoder_input_ids") else: decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * decoder_start_token_id def _get_pad_token_id(self, pad_token_id: int = None, eos_token_id: int = None) -> int: if pad_token_id is None and eos_token_id is not None: logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id return pad_token_id def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) def _expand_inputs_for_generation( input_ids: torch.LongTensor, expand_size: int = 1, is_encoder_decoder: bool = False, attention_mask: torch.LongTensor = None, encoder_outputs: ModelOutput = None, **model_kwargs, ) -> Tuple[torch.LongTensor, Dict[str, Any]]: expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) if is_encoder_decoder: if encoder_outputs is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device) ) model_kwargs["encoder_outputs"] = encoder_outputs return input_ids, model_kwargs def _update_model_kwargs_for_generation( outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past if "past_key_values" in outputs: model_kwargs["past"] = outputs.past_key_values elif "mems" in outputs: model_kwargs["past"] = outputs.mems elif "past_buckets_states" in outputs: model_kwargs["past"] = outputs.past_buckets_states else: model_kwargs["past"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) return model_kwargs def _reorder_cache(self, past, beam_idx): raise NotImplementedError( f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to enable beam search for {self.__class__}" ) def _get_logits_warper( self, top_k: int = None, top_p: float = None, temperature: float = None, num_beams: int = None ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances used for multinomial sampling. """ # init warp parameters top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p temperature = temperature if temperature is not None else self.config.temperature # instantiate warpers list warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if temperature is not None and temperature != 1.0: warpers.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) return warpers def _get_logits_processor( self, repetition_penalty: float, no_repeat_ngram_size: int, encoder_no_repeat_ngram_size: int, encoder_input_ids: torch.LongTensor, bad_words_ids: List[List[int]], min_length: int, max_length: int, eos_token_id: int, forced_bos_token_id: int, forced_eos_token_id: int, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int, num_beam_groups: int, diversity_penalty: float, remove_invalid_values: bool, logits_processor: Optional[LogitsProcessorList], ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] instances used to modify the scores of the language model head. """ processors = LogitsProcessorList() # init warp parameters repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) encoder_no_repeat_ngram_size = ( encoder_no_repeat_ngram_size if encoder_no_repeat_ngram_size is not None else self.config.encoder_no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids min_length = min_length if min_length is not None else self.config.min_length eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty forced_bos_token_id = ( forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id ) forced_eos_token_id = ( forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id ) remove_invalid_values = ( remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values ) # instantiate processors list # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if diversity_penalty is not None and diversity_penalty > 0.0: processors.append( HammingDiversityLogitsProcessor( diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups ) ) if repetition_penalty is not None and repetition_penalty != 1.0: processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0: processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size)) if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0: if self.config.is_encoder_decoder: processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids)) else: raise ValueError( "It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture" ) if bad_words_ids is not None: processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id)) if min_length is not None and eos_token_id is not None and min_length > -1: processors.append(MinLengthLogitsProcessor(min_length, eos_token_id)) if prefix_allowed_tokens_fn is not None: processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams // num_beam_groups)) if forced_bos_token_id is not None: processors.append(ForcedBOSTokenLogitsProcessor(forced_bos_token_id)) if forced_eos_token_id is not None: processors.append(ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)) if remove_invalid_values is True: processors.append(InfNanRemoveLogitsProcessor()) processors = self._merge_criteria_processor_list(processors, logits_processor) return processors def _get_stopping_criteria( self, max_length: Optional[int], max_time: Optional[float], stopping_criteria: Optional[StoppingCriteriaList] ) -> StoppingCriteriaList: criteria = StoppingCriteriaList() if max_length is not None: criteria.append(MaxLengthCriteria(max_length=max_length)) if max_time is not None: criteria.append(MaxTimeCriteria(max_time=max_time)) criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) return criteria def _merge_criteria_processor_list( self, default_list: Union[LogitsProcessorList, StoppingCriteriaList], custom_list: Union[LogitsProcessorList, StoppingCriteriaList], ) -> Union[LogitsProcessorList, StoppingCriteriaList]: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to `generate`, " f"but it has already been created with the values {default}. {default} has been created by passing the " "corresponding arguments to generate or by the model's config default values. " f"If you just want to change the default values of {object_type} consider passing them as arguments " f"to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def generate( self, inputs: Optional[torch.Tensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, encoder_no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, max_time: Optional[float] = None, max_new_tokens: Optional[int] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, num_beam_groups: Optional[int] = None, diversity_penalty: Optional[float] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(), stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(), output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, forced_bos_token_id: Optional[int] = None, forced_eos_token_id: Optional[int] = None, remove_invalid_values: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, multinomial sampling, beam-search decoding, and beam-search multinomial sampling. Apart from `inputs`, all the arguments below will default to the value of the attribute of the same name inside the [`PretrainedConfig`] of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in [this blog post](https://huggingface.co/blog/how-to-generate). Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length)`, `(batch_size, sequence_length, feature_dim)` or `(batch_size, num_channels, height, width)`, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. max_length (`int`, *optional*, defaults to `model.config.max_length`): The maximum length of the sequence to be generated. max_new_tokens (`int`, *optional*, defaults to None): The maximum numbers of tokens to generate, ignore the current number of tokens. Use either `max_new_tokens` or `max_length` but not both, they serve the same purpose. min_length (`int`, *optional*, defaults to 10): The minimum length of the sequence to be generated. do_sample (`bool`, *optional*, defaults to `False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (`float`, *optional*, defaults to 1.0): The value used to module the next token probabilities. top_k (`int`, *optional*, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`, *optional*, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. repetition_penalty (`float`, *optional*, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. pad_token_id (`int`, *optional*): The id of the *padding* token. bos_token_id (`int`, *optional*): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. bad_words_ids(`List[List[int]]`, *optional*): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer(bad_word, add_prefix_space=True).input_ids`. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. max_time(`float`, *optional*, defaults to None): The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values are in `[0, 1]`, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as `input_ids` that masks the pad token. [What are attention masks?](../glossary#attention-mask) decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. use_cache: (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. num_beam_groups (`int`, *optional*, defaults to 1): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. diversity_penalty (`float`, *optional*, defaults to 0.0): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. prefix_allowed_tokens_fn: (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. forced_bos_token_id (`int`, *optional*): The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target language token. forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. remove_invalid_values (`bool`, *optional*): Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~file_utils.ModelOutput`] or `torch.LongTensor`: A [`~file_utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~file_utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchDecoderOnlyOutput`], - [`~generation_utils.SampleDecoderOnlyOutput`], - [`~generation_utils.BeamSearchDecoderOnlyOutput`], - [`~generation_utils.BeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~file_utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchEncoderDecoderOutput`], - [`~generation_utils.SampleEncoderDecoderOutput`], - [`~generation_utils.BeamSearchEncoderDecoderOutput`], - [`~generation_utils.BeamSampleEncoderDecoderOutput`] """ # 1. Set generation parameters if not already defined bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id num_beams = num_beams if num_beams is not None else self.config.num_beams length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups do_sample = do_sample if do_sample is not None else self.config.do_sample num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) if pad_token_id is None and eos_token_id is not None: # special case if pad_token_id is not defined logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id # 2. Define model inputs # inputs_tensor has to be defined # model_input_name is defined if model-specific keyword input is passed # otherwise model_input_name is None # all model-specific keyword inputs are removed from `model_kwargs` inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, bos_token_id, model_kwargs) batch_size = inputs_tensor.shape[0] # 3. Define other model kwargs model_kwargs["output_attentions"] = output_attentions model_kwargs["output_hidden_states"] = output_hidden_states model_kwargs["use_cache"] = use_cache if model_kwargs.get("attention_mask", None) is None: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, pad_token_id, eos_token_id ) if self.config.is_encoder_decoder: # if model is encoder decoder encoder_outputs are created # and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 4. Prepare `input_ids` which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids = self._prepare_decoder_input_ids_for_generation( batch_size, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, model_kwargs=model_kwargs, ) else: # if decoder-only then inputs_tensor has to be `input_ids` input_ids = inputs_tensor # 5. Prepare `max_length` depending on other stopping criteria # if `max_new_tokens` is passed, but not `max_length` -> set `max_length = max_new_tokens` if max_length is None and max_new_tokens is not None: max_length = max_new_tokens + input_ids.shape[-1] elif max_length is not None and max_new_tokens is not None: # Both are set, this is odd, raise a warning warnings.warn( "Both `max_length` and `max_new_tokens` have been set " f"but they serve the same purpose. `max_length` {max_length} " f"will take priority over `max_new_tokens` {max_new_tokens}.", UserWarning, ) # default to config if still None max_length = max_length if max_length is not None else self.config.max_length if input_ids.shape[-1] >= max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids.shape[-1]}, but ``max_length`` is set to {max_length}. " "This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``." ) # 6. determine generation mode is_greedy_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is False is_sample_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is True is_beam_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is False is_beam_sample_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is True is_group_beam_gen_mode = (num_beams > 1) and (num_beam_groups > 1) if num_beam_groups > num_beams: raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`") if is_group_beam_gen_mode and do_sample is True: raise ValueError( "Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`." ) # 7. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, encoder_input_ids=inputs_tensor, bad_words_ids=bad_words_ids, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, num_beams=num_beams, num_beam_groups=num_beam_groups, diversity_penalty=diversity_penalty, remove_invalid_values=remove_invalid_values, logits_processor=logits_processor, ) # 8. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria ) # 9. go into different generation modes if is_greedy_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search." ) # 10. run greedy search return self.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams ) # 11. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 12. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams ) if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 11. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size * num_return_sequences, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams * num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam sample return self.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_group_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if num_beams % num_beam_groups != 0: raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, max_length=stopping_criteria.max_length, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) def greedy_search( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[GreedySearchOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using greedy decoding. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.GreedySearchDecoderOnlyOutput`], [`~generation_utils.GreedySearchEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.GreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) cur_len = input_ids.shape[-1] this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_logits,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # pre-process distribution next_tokens_scores = logits_processor(input_ids, next_token_logits) # argmax next_tokens = torch.argmax(next_tokens_scores, dim=-1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) cur_len = cur_len + 1 # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def sample( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[SampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using multinomial sampling. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.SampleDecoderOnlyOutput`], [`~generation_utils.SampleEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.SampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) cur_len = input_ids.shape[-1] this_peer_finished = False # used by synced_gpus only # auto-regressive generation while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_token_scores = logits_processor(input_ids, next_token_logits) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # sample probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) cur_len = cur_len + 1 # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return SampleEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return SampleDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using beam search decoding. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = (next_tokens / vocab_size).long() next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def beam_sample( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using beam search with multinomial sampling. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSampleDecoderOnlyOutput`], [`~generation_utils.BeamSampleEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) next_token_scores = torch.gather(next_token_scores, -1, next_tokens) next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, _indices) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSampleEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSampleDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def group_beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ): r""" Generates sequences for models with a language modeling head using beam search decoding. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to *False*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to *False*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to *False*): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to *False*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs that will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or obj:*torch.LongTensor*: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams num_beam_groups = beam_scorer.num_beam_groups num_sub_beams = num_beams // num_beam_groups device = input_ids.device batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) # initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in # the same group don't produce same tokens everytime. beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # predicted tokens in cur_len step current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) # indices which will form the beams in the next time step reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) # do one decoder step on all beams of all sentences in batch model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need if output_scores: processed_score = torch.zeros_like(outputs.logits[:, -1, :]) for beam_group_idx in range(num_beam_groups): group_start_idx = beam_group_idx * num_sub_beams group_end_idx = min(group_start_idx + num_sub_beams, num_beams) group_size = group_end_idx - group_start_idx # indices of beams of current group among all sentences in batch batch_group_indices = [] for batch_idx in range(batch_size): batch_group_indices.extend( [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] ) group_input_ids = input_ids[batch_group_indices] # select outputs of beams of current group only next_token_logits = outputs.logits[batch_group_indices, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * group_size, vocab_size) vocab_size = next_token_scores.shape[-1] next_token_scores = logits_processor( group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx ) next_token_scores = next_token_scores + beam_scores[batch_group_indices].unsqueeze(-1).expand_as( next_token_scores ) if output_scores: processed_score[batch_group_indices] = next_token_scores # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( group_input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids[batch_group_indices] = group_input_ids[beam_idx] group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) current_tokens[batch_group_indices] = group_input_ids[:, -1] # (beam_idx // group_size) -> batch_idx # (beam_idx % group_size) -> offset of idx inside the group reordering_indices[batch_group_indices] = ( num_beams * (beam_idx // group_size) + group_start_idx + (beam_idx % group_size) ) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (processed_score,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): try: return next(parameter.parameters()).dtype except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype
null
18,569
import inspect import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, is_offline_mode, is_remote_url, replace_return_docstrings, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core The provided code snippet includes necessary dependencies for implementing the `unwrap_model` function. Write a Python function `def unwrap_model(model: nn.Module) -> nn.Module` to solve the following problem: Recursively unwraps a model from potential containers (as used in distributed training). Args: model (`torch.nn.Module`): The model to unwrap. Here is the function: def unwrap_model(model: nn.Module) -> nn.Module: """ Recursively unwraps a model from potential containers (as used in distributed training). Args: model (`torch.nn.Module`): The model to unwrap. """ # since there could be multiple levels of wrapping, unwrap recursively if hasattr(model, "module"): return unwrap_model(model.module) else: return model
Recursively unwraps a model from potential containers (as used in distributed training). Args: model (`torch.nn.Module`): The model to unwrap.
18,570
import inspect import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, is_offline_mode, is_remote_url, replace_return_docstrings, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: class Conv1D(nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (`torch.nn.Linear`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices. Returns: `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: """ Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Used to remove heads. Args: layer ([`~modeling_utils.Conv1D`]): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices. Returns: [`~modeling_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer The provided code snippet includes necessary dependencies for implementing the `prune_layer` function. Write a Python function `def prune_layer( layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[nn.Linear, Conv1D]` to solve the following problem: Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*): The dimension on which to keep the indices. Returns: `torch.nn.Linear` or [`~modeling_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. Here is the function: def prune_layer( layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[nn.Linear, Conv1D]: """ Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*): The dimension on which to keep the indices. Returns: `torch.nn.Linear` or [`~modeling_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. """ if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError(f"Can't prune layer of class {layer.__class__}")
Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*): The dimension on which to keep the indices. Returns: `torch.nn.Linear` or [`~modeling_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
18,571
import inspect import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, is_offline_mode, is_remote_url, replace_return_docstrings, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: The provided code snippet includes necessary dependencies for implementing the `apply_chunking_to_forward` function. Write a Python function `def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor` to solve the following problem: This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly applying `forward_fn` to `input_tensors`. Args: forward_fn (`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (`int`): The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (`int`): The dimension over which the `input_tensors` should be chunked. input_tensors (`Tuple[torch.Tensor]`): The input tensors of `forward_fn` which will be chunked Returns: `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. Examples: ```python # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) ``` Here is the function: def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor: """ This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly applying `forward_fn` to `input_tensors`. Args: forward_fn (`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (`int`): The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (`int`): The dimension over which the `input_tensors` should be chunked. input_tensors (`Tuple[torch.Tensor]`): The input tensors of `forward_fn` which will be chunked Returns: `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. Examples: ```python # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) ```""" assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors" # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) if num_args_in_forward_chunk_fn != len(input_tensors): raise ValueError( f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input " "tensors are given" ) if chunk_size > 0: tensor_shape = input_tensors[0].shape[chunk_dim] for input_tensor in input_tensors: if input_tensor.shape[chunk_dim] != tensor_shape: raise ValueError( f"All input tenors have to be of the same shape: {tensor_shape}, " f"found shape {input_tensor.shape[chunk_dim]}" ) if input_tensors[0].shape[chunk_dim] % chunk_size != 0: raise ValueError( f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk " f"size {chunk_size}" ) num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size # chunk input tensor into tuples input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) # apply forward fn to every tuple output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) # concatenate output at same dimension return torch.cat(output_chunks, dim=chunk_dim) return forward_fn(*input_tensors)
This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly applying `forward_fn` to `input_tensors`. Args: forward_fn (`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (`int`): The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (`int`): The dimension over which the `input_tensors` should be chunked. input_tensors (`Tuple[torch.Tensor]`): The input tensors of `forward_fn` which will be chunked Returns: `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. Examples: ```python # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) ```
18,572
import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import torch import torch.distributed as dist from torch import nn from .file_utils import ModelOutput from .generation_beam_search import BeamScorer, BeamSearchScorer from .generation_logits_process import ( EncoderNoRepeatNGramLogitsProcessor, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) from .generation_stopping_criteria import ( MaxLengthCriteria, MaxTimeCriteria, StoppingCriteria, StoppingCriteriaList, validate_stopping_criteria, ) from .utils import logging class TopPLogitsWarper(LogitsWarper): """ [`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): top_p = float(top_p) if top_p < 0 or top_p > 1.0: raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: sorted_logits, sorted_indices = torch.sort(scores, descending=True) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > self.top_p if self.min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class TopKLogitsWarper(LogitsWarper): r""" [`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = top_k self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: top_k = min(max(self.top_k, self.min_tokens_to_keep), scores.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None] scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores The provided code snippet includes necessary dependencies for implementing the `top_k_top_p_filtering` function. Write a Python function `def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor` to solve the following problem: Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 Here is the function: def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor: """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) if 0 <= top_p <= 1.0: logits = TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=min_tokens_to_keep)(None, logits) return logits
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
18,573
import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union, overload from .file_utils import PaddingStrategy, TensorType, add_end_docstrings from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import logging def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `char` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False The provided code snippet includes necessary dependencies for implementing the `_is_end_of_word` function. Write a Python function `def _is_end_of_word(text)` to solve the following problem: Checks whether the last character in text is one of a punctuation, control or whitespace character. Here is the function: def _is_end_of_word(text): """Checks whether the last character in text is one of a punctuation, control or whitespace character.""" last_char = text[-1] return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
Checks whether the last character in text is one of a punctuation, control or whitespace character.
18,574
import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union, overload from .file_utils import PaddingStrategy, TensorType, add_end_docstrings from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import logging def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `char` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False The provided code snippet includes necessary dependencies for implementing the `_is_start_of_word` function. Write a Python function `def _is_start_of_word(text)` to solve the following problem: Checks whether the first character in text is one of a punctuation, control or whitespace character. Here is the function: def _is_start_of_word(text): """Checks whether the first character in text is one of a punctuation, control or whitespace character.""" first_char = text[0] return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
Checks whether the first character in text is one of a punctuation, control or whitespace character.
18,575
import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union, overload from .file_utils import PaddingStrategy, TensorType, add_end_docstrings from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import logging The provided code snippet includes necessary dependencies for implementing the `_insert_one_token_to_ordered_list` function. Write a Python function `def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str)` to solve the following problem: Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted. Here is the function: def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str): """ Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted. """ insertion_idx = bisect.bisect_left(token_list, new_token) # Checks if new_token is already in the ordered token_list if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token: # new_token is in token_list, don't add return else: token_list.insert(insertion_idx, new_token)
Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
18,576
import copy import json import os import re import warnings from typing import Any, Dict, Optional, Tuple, Union from .file_utils import ( CONFIG_NAME, PushToHubMixin, cached_path, copy_func, get_list_of_files, is_offline_mode, is_remote_url, is_torch_available, ) from .utils import logging FULL_CONFIGURATION_FILE = "config.json" def get_list_of_files( path_or_repo: Union[str, os.PathLike], *args, **kwargs ) -> List[str]: """ Gets the list of files inside `path_or_repo`. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision (`str`, *optional*, defaults to `"main"`): This feature is deprecated. use_auth_token (`str` or *bool*, *optional*): This feature is deprecated. local_files_only (`bool`, *optional*, defaults to `False`): This feature is deprecated. Returns: `List[str]`: The list of files available in `path_or_repo`. """ path_or_repo = str(path_or_repo) # If path_or_repo is a folder, we just return what is inside (subdirectories included). if os.path.isdir(path_or_repo): list_of_files = [] for path, dir_names, file_names in os.walk(path_or_repo): list_of_files.extend([os.path.join(path, f) for f in file_names]) return list_of_files raise RuntimeError(f"Only local dir is supported.") The provided code snippet includes necessary dependencies for implementing the `get_configuration_file` function. Write a Python function `def get_configuration_file( path_or_repo: Union[str, os.PathLike], *args, **kwargs ) -> str` to solve the following problem: Get the configuration file to use for this version of transformers. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision(`str`, *optional*, defaults to `"main"`): This feature is deperated. use_auth_token (`str` or *bool*, *optional*): This feature is deperated. local_files_only (`bool`, *optional*, defaults to `False`): This feature is deperated. Returns: `str`: The configuration file to use. Here is the function: def get_configuration_file( path_or_repo: Union[str, os.PathLike], *args, **kwargs ) -> str: """ Get the configuration file to use for this version of transformers. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision(`str`, *optional*, defaults to `"main"`): This feature is deperated. use_auth_token (`str` or *bool*, *optional*): This feature is deperated. local_files_only (`bool`, *optional*, defaults to `False`): This feature is deperated. Returns: `str`: The configuration file to use. """ # Inspect all files from the repo/folder. all_files = get_list_of_files( path_or_repo ) if not any([FULL_CONFIGURATION_FILE in file for file in all_files]): raise RuntimeError(f"No config.json found in dir:{path_or_repo}") configuration_file = FULL_CONFIGURATION_FILE return configuration_file
Get the configuration file to use for this version of transformers. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision(`str`, *optional*, defaults to `"main"`): This feature is deperated. use_auth_token (`str` or *bool*, *optional*): This feature is deperated. local_files_only (`bool`, *optional*, defaults to `False`): This feature is deperated. Returns: `str`: The configuration file to use.
18,577
import collections import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy The provided code snippet includes necessary dependencies for implementing the `format_time` function. Write a Python function `def format_time(t)` to solve the following problem: Format `t` (in seconds) to (h):mm:ss Here is the function: def format_time(t): "Format `t` (in seconds) to (h):mm:ss" t = int(t) h, m, s = t // 3600, (t // 60) % 60, t % 60 return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
Format `t` (in seconds) to (h):mm:ss
18,578
import collections import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy def html_progress_bar(value, total, prefix, label, width=300): # docstyle-ignore return f""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """
null
18,579
import collections import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy The provided code snippet includes necessary dependencies for implementing the `text_to_html_table` function. Write a Python function `def text_to_html_table(items)` to solve the following problem: Put the texts in `items` in an HTML table. Here is the function: def text_to_html_table(items): "Put the texts in `items` in an HTML table." html_code = """<table border="1" class="dataframe">\n""" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt) html_code += f" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code
Put the texts in `items` in an HTML table.
18,580
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _pyctcdecode_available = importlib.util.find_spec("pyctcdecode") is not None def is_pyctcdecode_available(): return _pyctcdecode_available
null
18,581
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _librosa_available = importlib.util.find_spec("librosa") is not None def is_librosa_available(): return _librosa_available
null
18,582
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_torch_available(): return _torch_available def is_torch_cuda_available(): if is_torch_available(): import torch return torch.cuda.is_available() else: return False
null
18,583
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_torch_available(): return _torch_available def is_torch_bf16_available(): if not is_torch_available(): return False import torch # since currently no utility function is available we build our own. # some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51 # with additional check for torch version # to succeed: # 1. the hardware needs to support bf16 (arch >= Ampere) # 2. torch >= 1.10 (1.9 should be enough for AMP API has changed in 1.10, so using 1.10 as minimal) # 3. CUDA >= 11 # 4. torch.autocast exists # XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's # really only correct for the 0th gpu (or currently set default device if different from 0) if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if version.parse(torch.__version__) < version.parse("1.10"): return False if not hasattr(torch, "autocast"): return False return True
null
18,584
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_torch_available(): return _torch_available def is_torch_tf32_available(): if not is_torch_available(): return False import torch if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if version.parse(torch.__version__) < version.parse("1.7"): return False return True
null
18,585
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_torch_onnx_dict_inputs_support_available(): return _torch_onnx_dict_inputs_support_available
null
18,586
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging try: _coloredlogs_available = importlib_metadata.version("coloredlogs") logger.debug(f"Successfully imported sympy version {_coloredlogs_available}") except importlib_metadata.PackageNotFoundError: _coloredlogs_available = False def is_coloredlogs_available(): return _coloredlogs_available
null
18,587
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _tf2onnx_available = importlib.util.find_spec("tf2onnx") is not None def is_tf2onnx_available(): return _tf2onnx_available
null
18,588
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _onnx_available = importlib.util.find_spec("onnxruntime") is not None def is_onnx_available(): return _onnx_available
null
18,589
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _ftfy_available = importlib.util.find_spec("ftfy") is not None def is_ftfy_available(): return _ftfy_available
null
18,590
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging if _torch_available: torch_version = version.parse(importlib_metadata.version("torch")) _torch_fx_available = (torch_version.major, torch_version.minor) == ( TORCH_FX_REQUIRED_VERSION.major, TORCH_FX_REQUIRED_VERSION.minor, ) _torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION def is_torch_tpu_available(): if not _torch_available: return False # This test is probably enough, but just in case, we unpack a bit. if importlib.util.find_spec("torch_xla") is None: return False if importlib.util.find_spec("torch_xla.core") is None: return False return importlib.util.find_spec("torch_xla.core.xla_model") is not None
null
18,591
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _datasets_available = importlib.util.find_spec("datasets") is not None def is_datasets_available(): return _datasets_available
null
18,592
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _detectron2_available = importlib.util.find_spec("detectron2") is not None def is_detectron2_available(): return _detectron2_available
null
18,593
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_rjieba_available(): return importlib.util.find_spec("rjieba") is not None
null
18,594
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_psutil_available(): return importlib.util.find_spec("psutil") is not None
null
18,595
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_py3nvml_available(): return importlib.util.find_spec("py3nvml") is not None
null
18,596
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_apex_available(): return importlib.util.find_spec("apex") is not None
null
18,597
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _faiss_available = importlib.util.find_spec("faiss") is not None def is_faiss_available(): return _faiss_available
null
18,598
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_scipy_available(): return importlib.util.find_spec("scipy") is not None def is_sklearn_available(): if importlib.util.find_spec("sklearn") is None: return False return is_scipy_available() and importlib.util.find_spec("sklearn.metrics")
null
18,599
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_sentencepiece_available(): return importlib.util.find_spec("sentencepiece") is not None
null
18,600
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_protobuf_available(): if importlib.util.find_spec("google") is None: return False return importlib.util.find_spec("google.protobuf") is not None
null
18,601
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_tokenizers_available(): return importlib.util.find_spec("tokenizers") is not None
null
18,602
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_vision_available(): return importlib.util.find_spec("PIL") is not None
null
18,603
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_pytesseract_available(): return importlib.util.find_spec("pytesseract") is not None
null
18,604
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_spacy_available(): return importlib.util.find_spec("spacy") is not None
null
18,605
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging import sys sys.path.append('./mpu') def is_in_notebook(): try: # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py get_ipython = sys.modules["IPython"].get_ipython if "IPKernelApp" not in get_ipython().config: raise ImportError("console") if "VSCODE_PID" in os.environ: raise ImportError("vscode") return importlib.util.find_spec("IPython") is not None except (AttributeError, ImportError, KeyError): return False
null
18,606
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _scatter_available = importlib.util.find_spec("torch_scatter") is not None def is_scatter_available(): return _scatter_available
null
18,607
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _pytorch_quantization_available = importlib.util.find_spec("pytorch_quantization") is not None def is_pytorch_quantization_available(): return _pytorch_quantization_available
null
18,608
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _tensorflow_probability_available = importlib.util.find_spec("tensorflow_probability") is not None def is_tensorflow_probability_available(): return _tensorflow_probability_available
null
18,609
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_pandas_available(): return importlib.util.find_spec("pandas") is not None
null
18,610
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_sagemaker_dp_enabled(): # Get the sagemaker specific env variable. sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". sagemaker_params = json.loads(sagemaker_params) if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None
null
18,611
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_sagemaker_mp_enabled(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None
null
18,612
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_training_run_on_sagemaker(): return "SAGEMAKER_JOB_NAME" in os.environ
null
18,613
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _soundfile_available = importlib.util.find_spec("soundfile") is not None def is_soundfile_availble(): return _soundfile_available
null
18,614
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _timm_available = importlib.util.find_spec("timm") is not None def is_timm_available(): return _timm_available
null
18,615
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _torchaudio_available = importlib.util.find_spec("torchaudio") is not None def is_torchaudio_available(): return _torchaudio_available
null
18,616
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _torchaudio_available = importlib.util.find_spec("torchaudio") is not None def is_speech_available(): # For now this depends on torchaudio but the exact dependency might evolve in the future. return _torchaudio_available
null
18,617
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging _phonemizer_available = importlib.util.find_spec("phonemizer") is not None def is_phonemizer_available(): return _phonemizer_available
null
18,618
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging if _torch_available: torch_version = version.parse(importlib_metadata.version("torch")) _torch_fx_available = (torch_version.major, torch_version.minor) == ( TORCH_FX_REQUIRED_VERSION.major, TORCH_FX_REQUIRED_VERSION.minor, ) _torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION def torch_only_method(fn): def wrapper(*args, **kwargs): if not _torch_available: raise ImportError( "You need to install pytorch to use this method or class, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) else: return fn(*args, **kwargs) return wrapper
null
18,619
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging BACKENDS_MAPPING = OrderedDict( [ ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), ("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)), ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("vision", (is_vision_available, VISION_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ checks = (BACKENDS_MAPPING[backend] for backend in backends) failed = [msg.format(name) for available, msg in checks if not available()] if failed: raise ImportError("".join(failed))
null
18,620
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_torch_available(): return _torch_available def torch_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_torch_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires PyTorch.") return wrapper
null
18,621
import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from ..utils.versions import importlib_metadata from . import logging def is_tf_available(): def tf_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_tf_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires TF.") return wrapper
null
18,622
from collections import OrderedDict, UserDict from contextlib import ExitStack from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available def is_flax_available(): return _flax_available def is_torch_fx_proxy(x): if is_torch_fx_available(): import torch.fx return isinstance(x, torch.fx.Proxy) return False The provided code snippet includes necessary dependencies for implementing the `is_tensor` function. Write a Python function `def is_tensor(x)` to solve the following problem: Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`. Here is the function: def is_tensor(x): """ Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`. """ if is_torch_fx_proxy(x): return True if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(x, tf.Tensor): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(x, (jnp.ndarray, Tracer)): return True return isinstance(x, np.ndarray)
Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`.
18,623
from collections import OrderedDict, UserDict from contextlib import ExitStack from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def _is_numpy(x): return isinstance(x, np.ndarray)
null
18,624
from collections import OrderedDict, UserDict from contextlib import ExitStack from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def _is_torch_device(x): import torch return isinstance(x, torch.device)
null
18,625
from collections import OrderedDict, UserDict from contextlib import ExitStack from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def _is_torch(x): import torch return isinstance(x, torch.Tensor) def _is_tensorflow(x): import tensorflow as tf return isinstance(x, tf.Tensor) def _is_jax(x): import jax.numpy as jnp # noqa: F811 return isinstance(x, jnp.ndarray) def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available def is_flax_available(): return _flax_available The provided code snippet includes necessary dependencies for implementing the `to_py_obj` function. Write a Python function `def to_py_obj(obj)` to solve the following problem: Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list. Here is the function: def to_py_obj(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list. """ if isinstance(obj, (dict, UserDict)): return {k: to_py_obj(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return [to_py_obj(o) for o in obj] elif is_tf_available() and _is_tensorflow(obj): return obj.numpy().tolist() elif is_torch_available() and _is_torch(obj): return obj.detach().cpu().tolist() elif is_flax_available() and _is_jax(obj): return np.asarray(obj).tolist() elif isinstance(obj, (np.ndarray, np.number)): # tolist also works on 0d np arrays return obj.tolist() else: return obj
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
18,626
from collections import OrderedDict, UserDict from contextlib import ExitStack from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def _is_torch(x): import torch return isinstance(x, torch.Tensor) def _is_tensorflow(x): import tensorflow as tf return isinstance(x, tf.Tensor) def _is_jax(x): import jax.numpy as jnp # noqa: F811 return isinstance(x, jnp.ndarray) def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available def is_flax_available(): return _flax_available The provided code snippet includes necessary dependencies for implementing the `to_numpy` function. Write a Python function `def to_numpy(obj)` to solve the following problem: Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array. Here is the function: def to_numpy(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array. """ if isinstance(obj, (dict, UserDict)): return {k: to_numpy(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return np.array(obj) elif is_tf_available() and _is_tensorflow(obj): return obj.numpy() elif is_torch_available() and _is_torch(obj): return obj.detach().cpu().numpy() elif is_flax_available() and _is_jax(obj): return np.asarray(obj) else: return obj
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.
18,627
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) logging.Logger.warning_advice = warning_advice import logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None
null