id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
10,656
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline def softmax(_outputs): maxes = np.max(_outputs, axis=-1, keepdims=True) shifted_exp = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
null
10,657
import collections import csv import importlib import json import os import pickle import sys import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from packaging import version from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..modelcard import ModelCard from ..models.auto.configuration_auto import AutoConfig from ..tokenization_utils import PreTrainedTokenizer from ..utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available, logging def no_collate_fn(items): if len(items) != 1: raise ValueError("This collate_fn is meant to be used with batch_size=1") return items[0]
null
10,658
import collections import csv import importlib import json import os import pickle import sys import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from packaging import version from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..modelcard import ModelCard from ..models.auto.configuration_auto import AutoConfig from ..tokenization_utils import PreTrainedTokenizer from ..utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available, logging def _pad(items, key, padding_value, padding_side): batch_size = len(items) if isinstance(items[0][key], torch.Tensor): # Others include `attention_mask` etc... shape = items[0][key].shape dim = len(shape) if key == "pixel_values": # This is probable image so padding shouldn't be necessary # B, C, H, W return torch.cat([item[key] for item in items], dim=0) max_length = max(item[key].shape[1] for item in items) min_length = min(item[key].shape[1] for item in items) dtype = items[0][key].dtype if dim == 2: if max_length == min_length: # Bypass for `ImageGPT` which doesn't provide a padding value, yet # we can consistently pad since the size should be matching return torch.cat([item[key] for item in items], dim=0) tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value elif dim == 3: tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value for i, item in enumerate(items): if dim == 2: if padding_side == "left": tensor[i, -len(item[key][0]) :] = item[key][0].clone() else: tensor[i, : len(item[key][0])] = item[key][0].clone() elif dim == 3: if padding_side == "left": tensor[i, -len(item[key][0]) :, :] = item[key][0].clone() else: tensor[i, : len(item[key][0]), :] = item[key][0].clone() return tensor else: return [item[key] for item in items] def pad_collate_fn(tokenizer, feature_extractor): # Tokenizer t_padding_side = None # Feature extractor f_padding_side = None if tokenizer is None and feature_extractor is None: raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching") if tokenizer is not None: if tokenizer.pad_token_id is None: raise ValueError( "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with " "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`." ) else: t_padding_value = tokenizer.pad_token_id t_padding_side = tokenizer.padding_side if feature_extractor is not None: # Feature extractor can be images, where no padding is expected f_padding_value = getattr(feature_extractor, "padding_value", None) f_padding_side = getattr(feature_extractor, "padding_side", None) if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side: raise ValueError( f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}" ) padding_side = "right" if t_padding_side is not None: padding_side = t_padding_side if f_padding_side is not None: padding_side = f_padding_side def inner(items): keys = set(items[0].keys()) for item in items: if set(item.keys()) != keys: raise ValueError( f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !=" f" {keys})" ) # input_values, input_pixels, input_ids, ... padded = {} for key in keys: if key in {"input_ids"}: # ImageGPT uses a feature extractor if feature_extractor is not None: _padding_value = f_padding_value else: _padding_value = t_padding_value elif key in {"input_values", "pixel_values", "input_features"}: _padding_value = f_padding_value elif key in {"p_mask", "special_tokens_mask"}: _padding_value = 1 elif key in {"attention_mask", "token_type_ids"}: _padding_value = 0 else: # This is likely another random key maybe even user provided _padding_value = 0 padded[key] = _pad(items, key, _padding_value, padding_side) return padded return inner
null
10,659
import collections import csv import importlib import json import os import pickle import sys import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from packaging import version from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..modelcard import ModelCard from ..models.auto.configuration_auto import AutoConfig from ..tokenization_utils import PreTrainedTokenizer from ..utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available, logging def infer_framework_load_model( model, config: AutoConfig, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. config ([`AutoConfig`]): The config associated with the model to help using the correct class model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): model_kwargs["_from_pipeline"] = task class_tuple = () look_pt = is_torch_available() and framework in {"pt", None} look_tf = is_tf_available() and framework in {"tf", None} if model_classes: if look_pt: class_tuple = class_tuple + model_classes.get("pt", (AutoModel,)) if look_tf: class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,)) if config.architectures: classes = [] for architecture in config.architectures: transformers_module = importlib.import_module("transformers") if look_pt: _class = getattr(transformers_module, architecture, None) if _class is not None: classes.append(_class) if look_tf: _class = getattr(transformers_module, f"TF{architecture}", None) if _class is not None: classes.append(_class) class_tuple = class_tuple + tuple(classes) if len(class_tuple) == 0: raise ValueError(f"Pipeline cannot infer suitable model classes from {model}") for model_class in class_tuple: kwargs = model_kwargs.copy() if framework == "pt" and model.endswith(".h5"): kwargs["from_tf"] = True logger.warning( "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " "Trying to load the model with PyTorch." ) elif framework == "tf" and model.endswith(".bin"): kwargs["from_pt"] = True logger.warning( "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " "Trying to load the model with Tensorflow." ) try: model = model_class.from_pretrained(model, **kwargs) if hasattr(model, "eval"): model = model.eval() # Stop loading on the first successful load. break except (OSError, ValueError): continue if isinstance(model, str): raise ValueError(f"Could not load model {model} with any of the following classes: {class_tuple}.") framework = "tf" if model.__class__.__name__.startswith("TF") else "pt" return framework, model class AutoConfig: r""" This is a generic configuration class that will be instantiated as one of the configuration classes of the library when created with the [`~AutoConfig.from_pretrained`] class method. This class cannot be instantiated directly using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoConfig is designed to be instantiated " "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method." ) def for_model(cls, model_type: str, *args, **kwargs): if model_type in CONFIG_MAPPING: config_class = CONFIG_MAPPING[model_type] return config_class(*args, **kwargs) raise ValueError( f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}" ) def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate one of the configuration classes of the library from a pretrained model configuration. The configuration class to instantiate is selected based on the `model_type` property of the config object that is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing a configuration file saved using the [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method, e.g., `./my_model_directory/`. - A path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download the model weights and configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final configuration object. If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of `kwargs` which has not been used to update `config` and is otherwise ignored. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs(additional keyword arguments, *optional*): The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. Examples: ```python >>> from transformers import AutoConfig >>> # Download configuration from huggingface.co and cache. >>> config = AutoConfig.from_pretrained("bert-base-uncased") >>> # Download configuration from huggingface.co (user-uploaded) and cache. >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased") >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*). >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/") >>> # Load a specific configuration file. >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json") >>> # Change some config attributes when loading a pretrained config. >>> config = AutoConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) >>> config.output_attentions True >>> config, unused_kwargs = AutoConfig.from_pretrained( ... "bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True ... ) >>> config.output_attentions True >>> unused_kwargs {'foo': False} ```""" kwargs["_from_auto"] = True kwargs["name_or_path"] = pretrained_model_name_or_path trust_remote_code = kwargs.pop("trust_remote_code", False) config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) if "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]: if not trust_remote_code: raise ValueError( f"Loading {pretrained_model_name_or_path} requires you to execute the configuration file in that" " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" " set the option `trust_remote_code=True` to remove this error." ) if kwargs.get("revision", None) is None: logger.warning( "Explicitly passing a `revision` is encouraged when loading a configuration with custom code to " "ensure no malicious code has been contributed in a newer revision." ) class_ref = config_dict["auto_map"]["AutoConfig"] module_file, class_name = class_ref.split(".") config_class = get_class_from_dynamic_module( pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs ) return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs) elif "model_type" in config_dict: config_class = CONFIG_MAPPING[config_dict["model_type"]] return config_class.from_dict(config_dict, **unused_kwargs) else: # Fallback: use pattern matching on the string. # We go from longer names to shorter names to catch roberta before bert (for instance) for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True): if pattern in str(pretrained_model_name_or_path): return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs) raise ValueError( f"Unrecognized model in {pretrained_model_name_or_path}. " f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings " f"in its name: {', '.join(CONFIG_MAPPING.keys())}" ) def register(model_type, config): """ Register a new configuration for this class. Args: model_type (`str`): The model type like "bert" or "gpt". config ([`PretrainedConfig`]): The config to register. """ if issubclass(config, PretrainedConfig) and config.model_type != model_type: raise ValueError( "The config you are passing has a `model_type` attribute that is not consistent with the model type " f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they " "match!" ) CONFIG_MAPPING.register(model_type, config) The provided code snippet includes necessary dependencies for implementing the `infer_framework_from_model` function. Write a Python function `def infer_framework_from_model( model, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, )` to solve the following problem: Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. Here is the function: def infer_framework_from_model( model, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs) else: config = model.config return infer_framework_load_model( model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs )
Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model.
10,660
import collections import csv import importlib import json import os import pickle import sys import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from packaging import version from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..modelcard import ModelCard from ..models.auto.configuration_auto import AutoConfig from ..tokenization_utils import PreTrainedTokenizer from ..utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available, logging if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TFAutoModel if is_torch_available(): import torch from torch.utils.data import DataLoader, Dataset from ..models.auto.modeling_auto import AutoModel # Re-export for backward compatibility from .pt_utils import KeyDataset else: Dataset = None KeyDataset = None if is_torch_available(): from transformers.pipelines.pt_utils import ( PipelineChunkIterator, PipelineDataset, PipelineIterator, PipelinePackIterator, ) class TFAutoModel(_BaseAutoModelClass): _model_mapping = TF_MODEL_MAPPING TFAutoModel = auto_class_update(TFAutoModel) class AutoModel(_BaseAutoModelClass): _model_mapping = MODEL_MAPPING AutoModel = auto_class_update(AutoModel) The provided code snippet includes necessary dependencies for implementing the `get_framework` function. Write a Python function `def get_framework(model, revision: Optional[str] = None)` to solve the following problem: Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch. Here is the function: def get_framework(model, revision: Optional[str] = None): """ Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch. """ warnings.warn( "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.", FutureWarning, ) if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): if is_torch_available() and not is_tf_available(): model = AutoModel.from_pretrained(model, revision=revision) elif is_tf_available() and not is_torch_available(): model = TFAutoModel.from_pretrained(model, revision=revision) else: try: model = AutoModel.from_pretrained(model, revision=revision) except OSError: model = TFAutoModel.from_pretrained(model, revision=revision) framework = "tf" if model.__class__.__name__.startswith("TF") else "pt" return framework
Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch.
10,661
import collections import csv import importlib import json import os import pickle import sys import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from packaging import version from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..modelcard import ModelCard from ..models.auto.configuration_auto import AutoConfig from ..tokenization_utils import PreTrainedTokenizer from ..utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available, logging if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TFAutoModel if is_torch_available(): import torch from torch.utils.data import DataLoader, Dataset from ..models.auto.modeling_auto import AutoModel # Re-export for backward compatibility from .pt_utils import KeyDataset else: Dataset = None KeyDataset = None if is_torch_available(): from transformers.pipelines.pt_utils import ( PipelineChunkIterator, PipelineDataset, PipelineIterator, PipelinePackIterator, ) The provided code snippet includes necessary dependencies for implementing the `get_default_model_and_revision` function. Write a Python function `def get_default_model_and_revision( targeted_task: Dict, framework: Optional[str], task_options: Optional[Any] ) -> Union[str, Tuple[str, str]]` to solve the following problem: Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict` ): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns `str` The model string representing the default model for this pipeline Here is the function: def get_default_model_and_revision( targeted_task: Dict, framework: Optional[str], task_options: Optional[Any] ) -> Union[str, Tuple[str, str]]: """ Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict` ): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns `str` The model string representing the default model for this pipeline """ if is_torch_available() and not is_tf_available(): framework = "pt" elif is_tf_available() and not is_torch_available(): framework = "tf" defaults = targeted_task["default"] if task_options: if task_options not in defaults: raise ValueError(f"The task does not provide any default models for options {task_options}") default_models = defaults[task_options]["model"] elif "model" in defaults: default_models = targeted_task["default"]["model"] else: # XXX This error message needs to be updated to be more generic if more tasks are going to become # parametrized raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"') if framework is None: framework = "pt" return default_models[framework]
Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict` ): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns `str` The model string representing the default model for this pipeline
10,662
import inspect from collections import defaultdict from typing import TYPE_CHECKING, Dict, Optional, Union import numpy as np from ..utils import is_torch_available, logging from .audio_utils import ffmpeg_read from .base import ChunkPipeline The provided code snippet includes necessary dependencies for implementing the `rescale_stride` function. Write a Python function `def rescale_stride(tokens_or_logits, stride, ratio)` to solve the following problem: Rescales the stride values from audio space to tokens/logits space. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance. Here is the function: def rescale_stride(tokens_or_logits, stride, ratio): """ Rescales the stride values from audio space to tokens/logits space. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance. """ # Shape is [B, SEQ] for tokens # [B, SEQ, V] for logits new_strides = [] for input_n, left, right in stride: token_n = int(round(input_n * ratio)) left = int(round(left / input_n * token_n)) right = int(round(right / input_n * token_n)) new_stride = (token_n, left, right) new_strides.append(new_stride) return new_strides
Rescales the stride values from audio space to tokens/logits space. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance.
10,663
import inspect from collections import defaultdict from typing import TYPE_CHECKING, Dict, Optional, Union import numpy as np from ..utils import is_torch_available, logging from .audio_utils import ffmpeg_read from .base import ChunkPipeline def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right): inputs_len = inputs.shape[0] step = chunk_len - stride_left - stride_right for i in range(0, inputs_len, step): # add start and end paddings to the chunk chunk = inputs[i : i + chunk_len] processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt") _stride_left = 0 if i == 0 else stride_left is_last = i + step + stride_left >= inputs_len _stride_right = 0 if is_last else stride_right if chunk.shape[0] > _stride_left: yield {"is_last": is_last, "stride": (chunk.shape[0], _stride_left, _stride_right), **processed}
null
10,664
import math import tensorflow as tf from packaging import version The provided code snippet includes necessary dependencies for implementing the `_gelu_new` function. Write a Python function `def _gelu_new(x)` to solve the following problem: Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841 Args: x: float Tensor to perform activation Returns: `x` with the GELU activation applied. Here is the function: def _gelu_new(x): """ Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841 Args: x: float Tensor to perform activation Returns: `x` with the GELU activation applied. """ x = tf.convert_to_tensor(x) pi = tf.cast(math.pi, x.dtype) coeff = tf.cast(0.044715, x.dtype) cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3)))) return x * cdf
Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841 Args: x: float Tensor to perform activation Returns: `x` with the GELU activation applied.
10,665
import math import tensorflow as tf from packaging import version def mish(x): x = tf.convert_to_tensor(x) return x * tf.tanh(tf.math.softplus(x))
null
10,666
import math import tensorflow as tf from packaging import version def gelu_fast(x): x = tf.convert_to_tensor(x) coeff1 = tf.cast(0.044715, x.dtype) coeff2 = tf.cast(0.7978845608, x.dtype) return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x)))
null
10,667
import math import tensorflow as tf from packaging import version def quick_gelu(x): x = tf.convert_to_tensor(x) coeff = tf.cast(1.702, x.dtype) return x * tf.math.sigmoid(coeff * x)
null
10,668
import math import tensorflow as tf from packaging import version def _gelu(x): """ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ x = tf.convert_to_tensor(x) cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype))) return x * cdf The provided code snippet includes necessary dependencies for implementing the `gelu_10` function. Write a Python function `def gelu_10(x)` to solve the following problem: Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to https://arxiv.org/abs/2004.09602 Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 :param x: :return: Here is the function: def gelu_10(x): """ Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to https://arxiv.org/abs/2004.09602 Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 :param x: :return: """ return tf.clip_by_value(_gelu(x), -10, 10)
Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to https://arxiv.org/abs/2004.09602 Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 :param x: :return:
10,669
import math import tensorflow as tf from packaging import version The provided code snippet includes necessary dependencies for implementing the `glu` function. Write a Python function `def glu(x, axis=-1)` to solve the following problem: Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B). Args: `x`: float Tensor to perform activation `axis`: dimension across which `x` be split in half Returns: `x` with the GLU activation applied (with its size halved across the dimension `axis`). Here is the function: def glu(x, axis=-1): """ Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B). Args: `x`: float Tensor to perform activation `axis`: dimension across which `x` be split in half Returns: `x` with the GLU activation applied (with its size halved across the dimension `axis`). """ a, b = tf.split(x, 2, axis=axis) return a * tf.math.sigmoid(b)
Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B). Args: `x`: float Tensor to perform activation `axis`: dimension across which `x` be split in half Returns: `x` with the GLU activation applied (with its size halved across the dimension `axis`).
10,670
import math import tensorflow as tf from packaging import version def approximate_gelu_wrap(x): return tf.keras.activations.gelu(x, approximate=True)
null
10,671
import math import tensorflow as tf from packaging import version ACT2FN = { "gelu": gelu, "gelu_10": gelu_10, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def get_tf_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
null
10,672
import warnings from argparse import ArgumentParser from os import listdir, makedirs from pathlib import Path from typing import Dict, List, Optional, Tuple from packaging.version import Version, parse from transformers.pipelines import Pipeline, pipeline from transformers.tokenization_utils import BatchEncoding from transformers.utils import ModelOutput, is_tf_available, is_torch_available ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") The provided code snippet includes necessary dependencies for implementing the `check_onnxruntime_requirements` function. Write a Python function `def check_onnxruntime_requirements(minimum_version: Version)` to solve the following problem: Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found Here is the function: def check_onnxruntime_requirements(minimum_version: Version): """ Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found """ try: import onnxruntime # Parse the version of the installed onnxruntime ort_version = parse(onnxruntime.__version__) # We require 1.4.0 minimum if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError( f"We found an older version of onnxruntime ({onnxruntime.__version__}) " f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" "Please update onnxruntime by running `pip install --upgrade onnxruntime`" ) except ImportError: raise ImportError( "onnxruntime doesn't seem to be currently installed. " "Please install the onnxruntime by running `pip install onnxruntime`" " and relaunch the conversion." )
Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found
10,673
import warnings from argparse import ArgumentParser from os import listdir, makedirs from pathlib import Path from typing import Dict, List, Optional, Tuple from packaging.version import Version, parse from transformers.pipelines import Pipeline, pipeline from transformers.tokenization_utils import BatchEncoding from transformers.utils import ModelOutput, is_tf_available, is_torch_available def generate_identified_filename(filename: Path, identifier: str) -> Path: """ Append a string-identifier at the end (before the extension, if any) to the provided filepath Args: filename: pathlib.Path The actual path object we would like to add an identifier suffix identifier: The suffix to add Returns: String with concatenated identifier at the end of the filename """ return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix) The provided code snippet includes necessary dependencies for implementing the `optimize` function. Write a Python function `def optimize(onnx_model_path: Path) -> Path` to solve the following problem: Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the optimizations possible Args: onnx_model_path: filepath where the model binary description is stored Returns: Path where the optimized model binary description has been saved Here is the function: def optimize(onnx_model_path: Path) -> Path: """ Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the optimizations possible Args: onnx_model_path: filepath where the model binary description is stored Returns: Path where the optimized model binary description has been saved """ from onnxruntime import InferenceSession, SessionOptions # Generate model name with suffix "optimized" opt_model_path = generate_identified_filename(onnx_model_path, "-optimized") sess_option = SessionOptions() sess_option.optimized_model_filepath = opt_model_path.as_posix() _ = InferenceSession(onnx_model_path.as_posix(), sess_option) print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}") print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\") return opt_model_path
Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the optimizations possible Args: onnx_model_path: filepath where the model binary description is stored Returns: Path where the optimized model binary description has been saved
10,674
import warnings from argparse import ArgumentParser from os import listdir, makedirs from pathlib import Path from typing import Dict, List, Optional, Tuple from packaging.version import Version, parse from transformers.pipelines import Pipeline, pipeline from transformers.tokenization_utils import BatchEncoding from transformers.utils import ModelOutput, is_tf_available, is_torch_available def generate_identified_filename(filename: Path, identifier: str) -> Path: """ Append a string-identifier at the end (before the extension, if any) to the provided filepath Args: filename: pathlib.Path The actual path object we would like to add an identifier suffix identifier: The suffix to add Returns: String with concatenated identifier at the end of the filename """ return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix) The provided code snippet includes necessary dependencies for implementing the `quantize` function. Write a Python function `def quantize(onnx_model_path: Path) -> Path` to solve the following problem: Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU Args: onnx_model_path: Path to location the exported ONNX model is stored Returns: The Path generated for the quantized Here is the function: def quantize(onnx_model_path: Path) -> Path: """ Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU Args: onnx_model_path: Path to location the exported ONNX model is stored Returns: The Path generated for the quantized """ import onnx import onnxruntime from onnx.onnx_pb import ModelProto from onnxruntime.quantization import QuantizationMode from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer from onnxruntime.quantization.registry import IntegerOpsRegistry # Load the ONNX model onnx_model = onnx.load(onnx_model_path.as_posix()) if parse(onnx.__version__) < parse("1.5.0"): print( "Models larger than 2GB will fail to quantize due to protobuf constraint.\n" "Please upgrade to onnxruntime >= 1.5.0." ) # Copy it copy_model = ModelProto() copy_model.CopyFrom(onnx_model) # Construct quantizer # onnxruntime renamed input_qType to activation_qType in v1.13.1, so we # check the onnxruntime version to ensure backward compatibility. # See also: https://github.com/microsoft/onnxruntime/pull/12873 if parse(onnxruntime.__version__) < parse("1.13.1"): quantizer = ONNXQuantizer( model=copy_model, per_channel=False, reduce_range=False, mode=QuantizationMode.IntegerOps, static=False, weight_qType=True, input_qType=False, tensors_range=None, nodes_to_quantize=None, nodes_to_exclude=None, op_types_to_quantize=list(IntegerOpsRegistry), ) else: quantizer = ONNXQuantizer( model=copy_model, per_channel=False, reduce_range=False, mode=QuantizationMode.IntegerOps, static=False, weight_qType=True, activation_qType=False, tensors_range=None, nodes_to_quantize=None, nodes_to_exclude=None, op_types_to_quantize=list(IntegerOpsRegistry), ) # Quantize and export quantizer.quantize_model() # Append "-quantized" at the end of the model's name quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized") # Save model print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}") onnx.save_model(quantizer.model.model, quantized_model_path.as_posix()) return quantized_model_path
Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU Args: onnx_model_path: Path to location the exported ONNX model is stored Returns: The Path generated for the quantized
10,675
import warnings from argparse import ArgumentParser from os import listdir, makedirs from pathlib import Path from typing import Dict, List, Optional, Tuple from packaging.version import Version, parse from transformers.pipelines import Pipeline, pipeline from transformers.tokenization_utils import BatchEncoding from transformers.utils import ModelOutput, is_tf_available, is_torch_available def verify(path: Path): from onnxruntime import InferenceSession, SessionOptions from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException print(f"Checking ONNX model loading from: {path} ...") try: onnx_options = SessionOptions() _ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"]) print(f"Model {path} correctly loaded: \N{heavy check mark}") except RuntimeException as re: print(f"Error while loading the model {re}: \N{heavy ballot x}")
null
10,676
import inspect import warnings from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from .generation_tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from .tf_utils import shape_list, stable_softmax from .utils import ModelOutput, logging def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: def _create_next_token_logits_penalties(input_ids, logits, repetition_penalty): # create logit penalties for already seen input_ids token_penalties = np.ones(shape_list(logits)) prev_input_ids = [np.unique(input_id) for input_id in input_ids.numpy()] for i, prev_input_id in enumerate(prev_input_ids): logit_penalized = logits[i].numpy()[prev_input_id] logit_penalties = np.zeros(logit_penalized.shape) # if previous logit score is < 0 then multiply repetition penalty else divide logit_penalties[logit_penalized < 0] = repetition_penalty logit_penalties[logit_penalized > 0] = 1 / repetition_penalty np.put(token_penalties[i], prev_input_id, logit_penalties) return tf.convert_to_tensor(token_penalties, dtype=tf.float32)
null
10,677
import inspect import warnings from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from .generation_tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from .tf_utils import shape_list, stable_softmax from .utils import ModelOutput, logging def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len): # Copied from fairseq for no_repeat_ngram in beam_search if cur_len + 1 < no_repeat_ngram_size: # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].numpy().tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] def _get_generated_ngrams(hypo_idx): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - no_repeat_ngram_size ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) return generated_ngrams[hypo_idx].get(ngram_idx, []) banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] return banned_tokens
null
10,678
import inspect import warnings from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from .generation_tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from .tf_utils import shape_list, stable_softmax from .utils import ModelOutput, logging def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids): banned_tokens = [] def _tokens_match(prev_tokens, tokens): if len(tokens) == 0: # if bad word tokens is just one token always ban it return True if len(tokens) > len(prev_tokens): # if bad word tokens are longer than prev tokens they can't be equal return False if prev_tokens[-len(tokens) :] == tokens: # if tokens match return True else: return False for prev_input_ids_slice in prev_input_ids: banned_tokens_slice = [] for banned_token_seq in bad_words_ids: assert ( len(banned_token_seq) > 0 ), f"Banned words token sequences { bad_words_ids} cannot have an empty list" if _tokens_match(prev_input_ids_slice.numpy().tolist(), banned_token_seq[:-1]) is False: # if tokens do not match continue continue banned_tokens_slice.append(banned_token_seq[-1]) banned_tokens.append(banned_tokens_slice) return banned_tokens
null
10,679
import inspect import warnings from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from .generation_tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from .tf_utils import shape_list, stable_softmax from .utils import ModelOutput, logging def scatter_values_on_batch_indices(values, batch_indices): shape = shape_list(batch_indices) # broadcast batch dim to shape broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. Returns: `List[int]`: The shape of the tensor as a list. """ if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor: """ Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is meant as a workaround for the [following issue](https://github.com/tensorflow/tensorflow/issues/55682), and will be removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that `softmax(x) = softmax(x + c)` (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html). Args: logits (`tf.Tensor`): Must be one of the following types: half, float32, float64. axis (`int`, *optional*): The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name (`str`, *optional*): A name for the operation. Returns: `tf.Tensor`: A Tensor. Has the same type and shape as logits. """ # TODO: When the issue linked above gets sorted, add a check on TF version here and use the original function if # it has the fix. After we drop the support for unfixed versions, remove this function. return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name) The provided code snippet includes necessary dependencies for implementing the `tf_top_k_top_p_filtering` function. Write a Python function `def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1)` to solve the following problem: Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 Here is the function: def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ logits_shape = shape_list(logits) if top_k > 0: top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None] logits = tf.where(indices_to_remove, filter_value, logits) if top_p < 1.0: sorted_indices = tf.argsort(logits, direction="DESCENDING") sorted_logits = tf.gather( logits, sorted_indices, axis=-1, batch_dims=1 ) # expects logits to be of dim (batch_size, vocab_size) cumulative_probs = tf.math.cumsum(stable_softmax(sorted_logits, axis=-1), axis=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove = tf.concat( [ tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]), sorted_indices_to_remove[:, min_tokens_to_keep:], ], -1, ) # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove = tf.concat( [tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, :-1]], -1, ) # scatter sorted tensors to original indexing indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices) logits = tf.where(indices_to_remove, filter_value, logits) return logits
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
10,680
import inspect import warnings from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from .generation_tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from .tf_utils import shape_list, stable_softmax from .utils import ModelOutput, logging def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. Returns: `List[int]`: The shape of the tensor as a list. """ if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] The provided code snippet includes necessary dependencies for implementing the `sample_without_replacement` function. Write a Python function `def sample_without_replacement(logits, num_samples)` to solve the following problem: categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see https://github.com/tensorflow/tensorflow/issues/9260 for more info Here is the function: def sample_without_replacement(logits, num_samples): """ categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(logits), 0, 1)) _, indices = tf.nn.top_k(logits + z, num_samples) return indices
categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see https://github.com/tensorflow/tensorflow/issues/9260 for more info
10,681
import contextlib import json import math import os import warnings from dataclasses import asdict, dataclass, field, fields from datetime import timedelta from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Union from packaging import version from .debug_utils import DebugOption from .trainer_utils import ( EvaluationStrategy, FSDPOption, HubStrategy, IntervalStrategy, SchedulerType, ShardedDDPOption, ) from .utils import ( ExplicitEnum, cached_property, ccl_version, get_full_repo_name, is_accelerate_available, is_psutil_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, is_torch_bf16_cpu_available, is_torch_bf16_gpu_available, is_torch_tf32_available, is_torch_tpu_available, logging, requires_backends, torch_required, ) The provided code snippet includes necessary dependencies for implementing the `default_logdir` function. Write a Python function `def default_logdir() -> str` to solve the following problem: Same default as PyTorch Here is the function: def default_logdir() -> str: """ Same default as PyTorch """ import socket from datetime import datetime current_time = datetime.now().strftime("%b%d_%H-%M-%S") return os.path.join("runs", current_time + "_" + socket.gethostname())
Same default as PyTorch
10,682
import contextlib import json import math import os import warnings from dataclasses import asdict, dataclass, field, fields from datetime import timedelta from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Union from packaging import version from .debug_utils import DebugOption from .trainer_utils import ( EvaluationStrategy, FSDPOption, HubStrategy, IntervalStrategy, SchedulerType, ShardedDDPOption, ) from .utils import ( ExplicitEnum, cached_property, ccl_version, get_full_repo_name, is_accelerate_available, is_psutil_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, is_torch_bf16_cpu_available, is_torch_bf16_gpu_available, is_torch_tf32_available, is_torch_tpu_available, logging, requires_backends, torch_required, ) The provided code snippet includes necessary dependencies for implementing the `get_int_from_env` function. Write a Python function `def get_int_from_env(env_keys, default)` to solve the following problem: Returns the first positive env value found in the `env_keys` list or the default. Here is the function: def get_int_from_env(env_keys, default): """Returns the first positive env value found in the `env_keys` list or the default.""" for e in env_keys: val = int(os.environ.get(e, -1)) if val >= 0: return val return default
Returns the first positive env value found in the `env_keys` list or the default.
10,683
import contextlib import json import math import os import warnings from dataclasses import asdict, dataclass, field, fields from datetime import timedelta from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Union from packaging import version from .debug_utils import DebugOption from .trainer_utils import ( EvaluationStrategy, FSDPOption, HubStrategy, IntervalStrategy, SchedulerType, ShardedDDPOption, ) from .utils import ( ExplicitEnum, cached_property, ccl_version, get_full_repo_name, is_accelerate_available, is_psutil_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, is_torch_bf16_cpu_available, is_torch_bf16_gpu_available, is_torch_tf32_available, is_torch_tpu_available, logging, requires_backends, torch_required, ) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm The provided code snippet includes necessary dependencies for implementing the `get_xla_device_type` function. Write a Python function `def get_xla_device_type(device: "torch.device") -> Optional[str]` to solve the following problem: Returns the xla device type (CPU|GPU|TPU) or None if the device is a non-xla device. Here is the function: def get_xla_device_type(device: "torch.device") -> Optional[str]: """ Returns the xla device type (CPU|GPU|TPU) or None if the device is a non-xla device. """ if is_torch_tpu_available(): return xm.xla_real_devices([device])[0].split(":")[0] return None
Returns the xla device type (CPU|GPU|TPU) or None if the device is a non-xla device.
10,684
from ctypes import c_float, sizeof from enum import Enum from typing import TYPE_CHECKING, Optional, Union The provided code snippet includes necessary dependencies for implementing the `compute_effective_axis_dimension` function. Write a Python function `def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int` to solve the following problem: Args: dimension: fixed_dimension: num_token_to_add: Returns: Here is the function: def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int: """ Args: dimension: fixed_dimension: num_token_to_add: Returns: """ # < 0 is possible if using a dynamic axis if dimension <= 0: dimension = fixed_dimension dimension -= num_token_to_add return dimension
Args: dimension: fixed_dimension: num_token_to_add: Returns:
10,685
from ctypes import c_float, sizeof from enum import Enum from typing import TYPE_CHECKING, Optional, Union class ParameterFormat(Enum): Float = c_float def size(self) -> int: """ Number of byte required for this data type Returns: Integer > 0 """ return sizeof(self.value) The provided code snippet includes necessary dependencies for implementing the `compute_serialized_parameters_size` function. Write a Python function `def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int` to solve the following problem: Compute the size taken by all the parameters in the given the storage format when serializing the model Args: num_parameters: Number of parameters to be saved dtype: The data format each parameter will be saved Returns: Size (in byte) taken to save all the parameters Here is the function: def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int: """ Compute the size taken by all the parameters in the given the storage format when serializing the model Args: num_parameters: Number of parameters to be saved dtype: The data format each parameter will be saved Returns: Size (in byte) taken to save all the parameters """ return num_parameters * dtype.size
Compute the size taken by all the parameters in the given the storage format when serializing the model Args: num_parameters: Number of parameters to be saved dtype: The data format each parameter will be saved Returns: Size (in byte) taken to save all the parameters
10,686
from ctypes import c_float, sizeof from enum import Enum from typing import TYPE_CHECKING, Optional, Union The provided code snippet includes necessary dependencies for implementing the `get_preprocessor` function. Write a Python function `def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]` to solve the following problem: Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found. Here is the function: def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]: """ Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found. """ # Avoid circular imports by only importing this here. from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore try: return AutoProcessor.from_pretrained(model_name) except (ValueError, OSError, KeyError): tokenizer, feature_extractor = None, None try: tokenizer = AutoTokenizer.from_pretrained(model_name) except (OSError, KeyError): pass try: feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) except (OSError, KeyError): pass if tokenizer is not None and feature_extractor is not None: raise ValueError( f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor." ) elif tokenizer is None and feature_extractor is None: return None elif tokenizer is not None: return tokenizer else: return feature_extractor
Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found.
10,687
import warnings from inspect import signature from itertools import chain from pathlib import Path from typing import TYPE_CHECKING, Iterable, List, Tuple, Union import numpy as np from packaging.version import Version, parse from ..tokenization_utils_base import PreTrainedTokenizerBase from ..utils import ( TensorType, is_tf_available, is_torch_available, is_torch_onnx_dict_inputs_support_available, logging, ) from .config import OnnxConfig ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") The provided code snippet includes necessary dependencies for implementing the `check_onnxruntime_requirements` function. Write a Python function `def check_onnxruntime_requirements(minimum_version: Version)` to solve the following problem: Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found Here is the function: def check_onnxruntime_requirements(minimum_version: Version): """ Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found """ try: import onnxruntime # Parse the version of the installed onnxruntime ort_version = parse(onnxruntime.__version__) # We require 1.4.0 minimum if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError( f"We found an older version of onnxruntime ({onnxruntime.__version__}) " f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" "Please update onnxruntime by running `pip install --upgrade onnxruntime`" ) except ImportError: raise ImportError( "onnxruntime doesn't seem to be currently installed. " "Please install the onnxruntime by running `pip install onnxruntime`" " and relaunch the conversion." )
Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found
10,688
import warnings from inspect import signature from itertools import chain from pathlib import Path from typing import TYPE_CHECKING, Iterable, List, Tuple, Union import numpy as np from packaging.version import Version, parse from ..tokenization_utils_base import PreTrainedTokenizerBase from ..utils import ( TensorType, is_tf_available, is_torch_available, is_torch_onnx_dict_inputs_support_available, logging, ) from .config import OnnxConfig if is_torch_available(): from ..modeling_utils import PreTrainedModel from ..pytorch_utils import is_torch_less_than_1_11 logger = logging.get_logger(__name__) class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): def __init__(self, **kwargs): def max_len_single_sentence(self) -> int: def max_len_sentences_pair(self) -> int: def max_len_single_sentence(self, value) -> int: def max_len_sentences_pair(self, value) -> int: def _set_processor_class(self, processor_class: str): def __repr__(self) -> str: def get_vocab(self) -> Dict[str, int]: def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): def _from_pretrained( cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, use_auth_token=None, cache_dir=None, local_files_only=False, _commit_hash=None, **kwargs ): def convert_added_tokens(obj: Union[AddedToken, Any]): def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): def save_pretrained( self, save_directory: Union[str, os.PathLike], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, push_to_hub: bool = False, **kwargs, ) -> Tuple[str]: def convert_added_tokens(obj: Union[AddedToken, Any], add_type_field=True): def _save_pretrained( self, save_directory: Union[str, os.PathLike], file_names: Tuple[str], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, ) -> Tuple[str]: def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: def encode( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs ) -> List[int]: def num_special_tokens_to_add(self, pair: bool = False) -> int: def _get_padding_truncation_strategies( self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs ): def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] ] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def _call_one( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def _is_valid_text_input(t): def encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def pad( self, encoded_inputs: Union[ BatchEncoding, List[BatchEncoding], Dict[str, EncodedInput], Dict[str, List[EncodedInput]], List[Dict[str, EncodedInput]], ], padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, ) -> BatchEncoding: def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: def prepare_for_model( self, ids: List[int], pair_ids: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs ) -> BatchEncoding: def truncate_sequences( self, ids: List[int], pair_ids: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: def convert_tokens_to_string(self, tokens: List[str]) -> str: def batch_decode( self, sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True, **kwargs ) -> List[str]: def decode( self, token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True, **kwargs ) -> str: def _decode( self, token_ids: Union[int, List[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True, **kwargs ) -> str: def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: def clean_up_tokenization(out_string: str) -> str: def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool): def _switch_to_input_mode(self): def _switch_to_target_mode(self): def as_target_tokenizer(self): def register_for_auto_class(cls, auto_class="AutoTokenizer"): def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = None, truncation: bool = True, **kwargs, ) -> BatchEncoding: PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub) PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format( object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files" ) class OnnxConfig(ABC): def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None): def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig": def inputs(self) -> Mapping[str, Mapping[int, str]]: def outputs(self) -> Mapping[str, Mapping[int, str]]: def values_override(self) -> Optional[Mapping[str, Any]]: def default_batch_size(self) -> int: def default_sequence_length(self) -> int: def default_num_choices(self) -> int: def default_onnx_opset(self) -> int: def atol_for_validation(self) -> float: def is_torch_support_available(self) -> bool: def use_external_data_format(num_parameters: int) -> bool: def _generate_dummy_images( self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40 ): def _generate_dummy_audio( self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220 ): def generate_dummy_inputs( self, preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], batch_size: int = -1, seq_length: int = -1, num_choices: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220, tokenizer: "PreTrainedTokenizerBase" = None, ) -> Mapping[str, Any]: def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]: def patch_ops(self): def restore_ops(self): def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]: class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin): def dummy_inputs(self) -> Dict[str, torch.Tensor]: def framework(self) -> str: def __init__(self, config: PretrainedConfig, *inputs, **kwargs): def post_init(self): def _backward_compatibility_gradient_checkpointing(self): def _from_config(cls, config, **kwargs): def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: def base_model(self) -> nn.Module: def get_input_embeddings(self) -> nn.Module: def set_input_embeddings(self, value: nn.Module): def get_output_embeddings(self) -> nn.Module: def _init_weights(self, module): def tie_weights(self): def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str): def tie_encoder_to_decoder_recursively( decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, uninitialized_encoder_weights: List[str], depth=0, ): def _tie_or_clone_weights(self, output_embeddings, input_embeddings): def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: def _resize_token_embeddings(self, new_num_tokens): def _get_resized_embeddings( self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None ) -> nn.Embedding: def _get_resized_lm_head( self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False ) -> nn.Linear: def resize_position_embeddings(self, new_num_position_embeddings: int): def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: def init_weights(self): def prune_heads(self, heads_to_prune: Dict[int, List[int]]): def gradient_checkpointing_enable(self): def gradient_checkpointing_disable(self): def is_gradient_checkpointing(self) -> bool: def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, state_dict: Optional[dict] = None, save_function: Callable = torch.save, push_to_hub: bool = False, max_shard_size: Union[int, str] = "10GB", safe_serialization: bool = False, **kwargs, ): def get_memory_footprint(self, return_buffers=True): def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): def _load_pretrained_model( cls, model, state_dict, loaded_keys, resolved_archive_file, pretrained_model_name_or_path, ignore_mismatched_sizes=False, sharded_metadata=None, _fast_init=True, low_cpu_mem_usage=False, device_map=None, offload_folder=None, offload_state_dict=None, dtype=None, load_in_8bit=False, ): def _fix_key(key): def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes, ): def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): def _load_pretrained_model_low_mem(model, loaded_state_dict_keys, resolved_archive_file, start_prefix=""): def register_for_auto_class(cls, auto_class="AutoModel"): PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="AutoModel", object_files="model file" ) def validate_model_outputs( config: OnnxConfig, preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], reference_model: Union["PreTrainedModel", "TFPreTrainedModel"], onnx_model: Path, onnx_named_outputs: List[str], atol: float, tokenizer: "PreTrainedTokenizer" = None, ): from onnxruntime import InferenceSession, SessionOptions logger.info("Validating ONNX model...") if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to validate the model outputs.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer # generate inputs with a different batch_size and seq_len that was used for conversion to properly test # dynamic input shapes. if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): reference_model_inputs = config.generate_dummy_inputs( preprocessor, batch_size=config.default_fixed_batch + 1, seq_length=config.default_fixed_sequence + 1, framework=TensorType.PYTORCH, ) else: reference_model_inputs = config.generate_dummy_inputs( preprocessor, batch_size=config.default_fixed_batch + 1, seq_length=config.default_fixed_sequence + 1, framework=TensorType.TENSORFLOW, ) # Create ONNX Runtime session options = SessionOptions() session = InferenceSession(onnx_model.as_posix(), options, providers=["CPUExecutionProvider"]) # Compute outputs from the reference model if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): reference_model.to("cpu") ref_outputs = reference_model(**reference_model_inputs) ref_outputs_dict = {} # We flatten potential collection of outputs (i.e. past_keys) to a flat structure for name, value in ref_outputs.items(): # Overwriting the output name as "present" since it is the name used for the ONNX outputs # ("past_key_values" being taken for the ONNX inputs) if name == "past_key_values": name = "present" if isinstance(value, (list, tuple)): value = config.flatten_output_collection_property(name, value) ref_outputs_dict.update(value) else: ref_outputs_dict[name] = value # Create onnxruntime inputs from the reference model inputs reference_model_inputs_onnxruntime = config.generate_dummy_inputs_onnxruntime(reference_model_inputs) # We flatten potential collection of inputs (i.e. past_keys) onnx_inputs = {} for name, value in reference_model_inputs_onnxruntime.items(): if isinstance(value, (list, tuple)): value = config.flatten_output_collection_property(name, value) onnx_inputs.update({tensor_name: pt_tensor.numpy() for tensor_name, pt_tensor in value.items()}) else: onnx_inputs[name] = value.numpy() # Compute outputs from the ONNX model onnx_outputs = session.run(onnx_named_outputs, onnx_inputs) # Check we have a subset of the keys into onnx_outputs against ref_outputs ref_outputs_set, onnx_outputs_set = set(ref_outputs_dict.keys()), set(onnx_named_outputs) if not onnx_outputs_set.issubset(ref_outputs_set): logger.info( f"\t-[x] ONNX model output names {onnx_outputs_set} do not match reference model {ref_outputs_set}" ) raise ValueError( "Outputs doesn't match between reference model and ONNX exported model: " f"{onnx_outputs_set.difference(ref_outputs_set)}" ) else: logger.info(f"\t-[✓] ONNX model output names match reference model ({onnx_outputs_set})") # Check the shape and values match for name, ort_value in zip(onnx_named_outputs, onnx_outputs): if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): ref_value = ref_outputs_dict[name].detach().numpy() else: ref_value = ref_outputs_dict[name].numpy() logger.info(f'\t- Validating ONNX Model output "{name}":') # Shape if not ort_value.shape == ref_value.shape: logger.info(f"\t\t-[x] shape {ort_value.shape} doesn't match {ref_value.shape}") raise ValueError( "Outputs shape doesn't match between reference model and ONNX exported model: " f"Got {ref_value.shape} (reference) and {ort_value.shape} (ONNX)" ) else: logger.info(f"\t\t-[✓] {ort_value.shape} matches {ref_value.shape}") # Values if not np.allclose(ref_value, ort_value, atol=atol): bad_indices = np.logical_not(np.isclose(ref_value, ort_value, atol=atol)) logger.info(f"\t\t-[x] values not close enough (atol: {atol})") raise ValueError( "Outputs values doesn't match between reference model and ONNX exported model: " f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))} for " f"{ref_value[bad_indices]} vs {ort_value[bad_indices]}" ) else: logger.info(f"\t\t-[✓] all values close (atol: {atol})")
null
10,689
import os from functools import partial, reduce from typing import TYPE_CHECKING, Callable, Dict, Optional, Tuple, Type, Union import transformers from .. import PretrainedConfig, is_tf_available, is_torch_available from ..utils import TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging from .config import OnnxConfig class OnnxConfig(ABC): """ Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format. """ default_fixed_batch = 2 default_fixed_sequence = 8 default_fixed_num_choices = 4 torch_onnx_minimum_version = version.parse("1.8") _tasks_to_common_outputs = { "causal-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "default": OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}}), "image-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "image-segmentation": OrderedDict( { "logits": {0: "batch", 1: "sequence"}, "pred_boxes": {0: "batch", 1: "sequence"}, "pred_masks": {0: "batch", 1: "sequence"}, } ), "masked-im": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "masked-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "multiple-choice": OrderedDict({"logits": {0: "batch"}}), "object-detection": OrderedDict( { "logits": {0: "batch", 1: "sequence"}, "pred_boxes": {0: "batch", 1: "sequence"}, } ), "question-answering": OrderedDict( { "start_logits": {0: "batch", 1: "sequence"}, "end_logits": {0: "batch", 1: "sequence"}, } ), "semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}), "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}), "sequence-classification": OrderedDict({"logits": {0: "batch"}}), "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "speech2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), } def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None): self._config = config if task not in self._tasks_to_common_outputs: raise ValueError( f"{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}" ) self.task = task self._patching_specs = [] for spec in patching_specs if patching_specs is not None else []: final_spec = spec if spec.orig_op is None: final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name)) self._patching_specs.append(final_spec) def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig": """ Instantiate a OnnxConfig for a specific model Args: config: The model's configuration to use when exporting to ONNX Returns: OnnxConfig for this model """ return cls(config, task=task) def inputs(self) -> Mapping[str, Mapping[int, str]]: """ Mapping containing the axis definition of the input tensors to provide to the model Returns: For each input: its name associated to the axes symbolic name and the axis position within the tensor """ raise NotImplementedError() def outputs(self) -> Mapping[str, Mapping[int, str]]: """ Mapping containing the axis definition of the output tensors to provide to the model Returns: For each output: its name associated to the axes symbolic name and the axis position within the tensor """ common_outputs = self._tasks_to_common_outputs[self.task] return copy.deepcopy(common_outputs) def values_override(self) -> Optional[Mapping[str, Any]]: """ Dictionary of keys to override in the model's config before exporting Returns: Dictionary with the keys (and their corresponding values) to override """ if hasattr(self._config, "use_cache"): return {"use_cache": False} return None def default_batch_size(self) -> int: """ The default batch size to use if no other indication Returns: Integer > 0 """ # Using 2 avoid ONNX making assumption about single sample batch return OnnxConfig.default_fixed_batch def default_sequence_length(self) -> int: """ The default sequence length to use if no other indication Returns: Integer > 0 """ return OnnxConfig.default_fixed_sequence def default_num_choices(self) -> int: """ The default number of choices to use if no other indication Returns: Integer > 0 """ return OnnxConfig.default_fixed_num_choices def default_onnx_opset(self) -> int: """ Which onnx opset to use when exporting the model Returns: Integer ONNX Opset version """ return DEFAULT_ONNX_OPSET def atol_for_validation(self) -> float: """ What absolute tolerance value to use during model conversion validation. Returns: Float absolute tolerance value. """ return 1e-5 def is_torch_support_available(self) -> bool: """ The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model. """ if is_torch_available(): from transformers.utils import torch_version return torch_version >= self.torch_onnx_minimum_version else: return False def use_external_data_format(num_parameters: int) -> bool: """ Flag indicating if the model requires using external data format Args: num_parameters: Number of parameter on the model Returns: True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise """ return ( compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT ) def _generate_dummy_images( self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40 ): images = [] for _ in range(batch_size): data = np.random.rand(image_height, image_width, num_channels) * 255 images.append(Image.fromarray(data.astype("uint8")).convert("RGB")) return images def _generate_dummy_audio( self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220 ): audio_data = [] for _ in range(batch_size): # time variable t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False) # generate pure sine wave at `frequency` Hz audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t)) return audio_data def generate_dummy_inputs( self, preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], batch_size: int = -1, seq_length: int = -1, num_choices: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220, tokenizer: "PreTrainedTokenizerBase" = None, ) -> Mapping[str, Any]: """ Generate inputs to provide to the ONNX exporter for the specific framework Args: preprocessor: ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]): The preprocessor associated with this model configuration. batch_size (`int`, *optional*, defaults to -1): The batch size to export the model for (-1 means dynamic axis). num_choices (`int`, *optional*, defaults to -1): The number of candidate answers provided for multiple choice task (-1 means dynamic axis). seq_length (`int`, *optional*, defaults to -1): The sequence length to export the model for (-1 means dynamic axis). is_pair (`bool`, *optional*, defaults to `False`): Indicate if the input is a pair (sentence 1, sentence 2) framework (`TensorType`, *optional*, defaults to `None`): The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for. num_channels (`int`, *optional*, defaults to 3): The number of channels of the generated images. image_width (`int`, *optional*, defaults to 40): The width of the generated images. image_height (`int`, *optional*, defaults to 40): The height of the generated images. sampling_rate (`int`, *optional* defaults to 22050) The sampling rate for audio data generation. time_duration (`float`, *optional* defaults to 5.0) Total seconds of sampling for audio data generation. frequency (`int`, *optional* defaults to 220) The desired natural frequency of generated audio. Returns: Mapping[str, Tensor] holding the kwargs to provide to the model's forward function """ from ..feature_extraction_utils import FeatureExtractionMixin from ..tokenization_utils_base import PreTrainedTokenizerBase if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer if isinstance(preprocessor, PreTrainedTokenizerBase): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = preprocessor.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence input_token = ( preprocessor.unk_token if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0) else "0" ) dummy_input = [" ".join([input_token]) * seq_length] * batch_size if self.task == "multiple-choice": # If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations # made by ONNX num_choices = compute_effective_axis_dimension( num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0 ) dummy_input = dummy_input * num_choices # The shape of the tokenized inputs values is [batch_size * num_choices, seq_length] tokenized_input = preprocessor(dummy_input, text_pair=dummy_input) # Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length] for k, v in tokenized_input.items(): tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)] return dict(tokenized_input.convert_to_tensors(tensor_type=framework)) return dict(preprocessor(dummy_input, return_tensors=framework)) elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) elif ( isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features" ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency) return dict(preprocessor(dummy_input, return_tensors=framework)) else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]: """ Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq models which have the encoder and decoder exported as separate ONNX files. Args: reference_model_inputs ([`Mapping[str, Tensor]`): Reference inputs for the model. Returns: `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function """ return reference_model_inputs def patch_ops(self): for spec in self._patching_specs: custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op) setattr(spec.o, spec.name, custom_op) def restore_ops(self): for spec in self._patching_specs: orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op) setattr(spec.o, spec.name, orig_op) def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]: """ Flatten any potential nested structure expanding the name of the field with the index of the element within the structure. Args: name: The name of the nested structure field: The structure to, potentially, be flattened Returns: (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure. """ from itertools import chain return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))} The provided code snippet includes necessary dependencies for implementing the `supported_features_mapping` function. Write a Python function `def supported_features_mapping( *supported_features: str, onnx_config_cls: str = None ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]` to solve the following problem: Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. Args: *supported_features: The names of the supported features. onnx_config_cls: The OnnxConfig full name corresponding to the model. Returns: The dictionary mapping a feature to an OnnxConfig constructor. Here is the function: def supported_features_mapping( *supported_features: str, onnx_config_cls: str = None ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: """ Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. Args: *supported_features: The names of the supported features. onnx_config_cls: The OnnxConfig full name corresponding to the model. Returns: The dictionary mapping a feature to an OnnxConfig constructor. """ if onnx_config_cls is None: raise ValueError("A OnnxConfig class must be provided") config_cls = transformers for attr_name in onnx_config_cls.split("."): config_cls = getattr(config_cls, attr_name) mapping = {} for feature in supported_features: if "-with-past" in feature: task = feature.replace("-with-past", "") mapping[feature] = partial(config_cls.with_past, task=task) else: mapping[feature] = partial(config_cls.from_model_config, task=feature) return mapping
Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. Args: *supported_features: The names of the supported features. onnx_config_cls: The OnnxConfig full name corresponding to the model. Returns: The dictionary mapping a feature to an OnnxConfig constructor.
10,690
import copy import json import os import re import warnings from collections import OrderedDict, UserDict from collections.abc import Mapping from contextlib import contextmanager from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union import numpy as np from packaging import version from . import __version__ from .dynamic_module_utils import custom_object_save from .utils import ( ExplicitEnum, PaddingStrategy, PushToHubMixin, TensorType, add_end_docstrings, cached_file, copy_func, download_url, extract_commit_hash, is_flax_available, is_jax_tensor, is_numpy_array, is_offline_mode, is_remote_url, is_tf_available, is_tf_tensor, is_tokenizers_available, is_torch_available, is_torch_device, is_torch_tensor, logging, to_py_obj, torch_required, ) FULL_TOKENIZER_FILE = "tokenizer.json" _re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json") The provided code snippet includes necessary dependencies for implementing the `get_fast_tokenizer_file` function. Write a Python function `def get_fast_tokenizer_file(tokenization_files: List[str]) -> str` to solve the following problem: Get the tokenization file to use for this version of transformers. Args: tokenization_files (`List[str]`): The list of available configuration files. Returns: `str`: The tokenization file to use. Here is the function: def get_fast_tokenizer_file(tokenization_files: List[str]) -> str: """ Get the tokenization file to use for this version of transformers. Args: tokenization_files (`List[str]`): The list of available configuration files. Returns: `str`: The tokenization file to use. """ tokenizer_files_map = {} for file_name in tokenization_files: search = _re_tokenizer_file.search(file_name) if search is not None: v = search.groups()[0] tokenizer_files_map[v] = file_name available_versions = sorted(tokenizer_files_map.keys()) # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions. tokenizer_file = FULL_TOKENIZER_FILE transformers_version = version.parse(__version__) for v in available_versions: if version.parse(v) <= transformers_version: tokenizer_file = tokenizer_files_map[v] else: # No point going further since the versions are sorted. break return tokenizer_file
Get the tokenization file to use for this version of transformers. Args: tokenization_files (`List[str]`): The list of available configuration files. Returns: `str`: The tokenization file to use.
10,691
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core deps = { "Pillow": "Pillow", "accelerate": "accelerate>=0.10.0", "black": "black==22.3", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "deepspeed": "deepspeed>=0.6.5", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flake8": "flake8>=3.8.3", "flax": "flax>=0.4.1", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.10.0,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.3.6", "jaxlib": "jaxlib>=0.1.65,<=0.3.6", "jieba": "jieba", "kenlm": "kenlm", "nltk": "nltk", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "optuna": "optuna", "optax": "optax>=0.0.8", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf<=3.20.2", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.7.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.2.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "librosa": "librosa", "starlette": "starlette", "tensorflow-cpu": "tensorflow-cpu>=2.3", "tensorflow": "tensorflow>=2.4", "tensorflow-text": "tensorflow-text", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.7,!=1.12.0", "torchaudio": "torchaudio", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "uvicorn": "uvicorn", "beautifulsoup4": "beautifulsoup4", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "pyknp": "pyknp>=0.6.1", } def require_version(requirement: str, hint: Optional[str] = None) -> None: """ Perform a runtime check of the dependency versions, using the exact same syntax used by pip. The installed module version comes from the *site-packages* dir via *importlib_metadata*. Args: requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" hint (`str`, *optional*): what suggestion to print in case of requirements not being met Example: ```python require_version("pandas>1.1.2") require_version("numpy>1.18.5", "this is important to have for whatever reason") ```""" hint = f"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f" got {requirement}" ) pkg, want_full = match[0] want_range = want_full.split(",") # there could be multiple requirements wanted = {} for w in want_range: match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f" but got {requirement}" ) op, want_ver = match[0] wanted[op] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") # special case if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) return # check if any version is installed try: got_ver = importlib_metadata.version(pkg) except importlib_metadata.PackageNotFoundError: raise importlib_metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) def dep_version_check(pkg, hint=None): require_version(deps[pkg], hint)
null
10,692
import argparse from . import ( BertConfig, BertGenerationConfig, BertGenerationDecoder, BertGenerationEncoder, load_tf_weights_in_bert_generation, logging, ) def convert_tf_checkpoint_to_pytorch(tf_hub_path, pytorch_dump_path, is_encoder_named_decoder, vocab_size, is_encoder): # Initialise PyTorch model bert_config = BertConfig.from_pretrained( "bert-large-cased", vocab_size=vocab_size, max_position_embeddings=512, is_decoder=True, add_cross_attention=True, ) bert_config_dict = bert_config.to_dict() del bert_config_dict["type_vocab_size"] config = BertGenerationConfig(**bert_config_dict) if is_encoder: model = BertGenerationEncoder(config) else: model = BertGenerationDecoder(config) print(f"Building PyTorch model from configuration: {config}") # Load weights from tf checkpoint load_tf_weights_in_bert_generation( model, tf_hub_path, model_class="bert", is_encoder_named_decoder=is_encoder_named_decoder, is_encoder=is_encoder, ) # Save pytorch-model print(f"Save PyTorch model and config to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path)
null
10,693
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). Args: seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_tf_available(): tf.random.set_seed(seed) The provided code snippet includes necessary dependencies for implementing the `seed_worker` function. Write a Python function `def seed_worker(_)` to solve the following problem: Helper function to set worker seed during Dataloader initialization. Here is the function: def seed_worker(_): """ Helper function to set worker seed during Dataloader initialization. """ worker_seed = torch.initial_seed() % 2**32 set_seed(worker_seed)
Helper function to set worker seed during Dataloader initialization.
10,694
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). Args: seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_tf_available(): tf.random.set_seed(seed) The provided code snippet includes necessary dependencies for implementing the `enable_full_determinism` function. Write a Python function `def enable_full_determinism(seed: int)` to solve the following problem: Helper function for reproducible behavior during distributed training. See - https://pytorch.org/docs/stable/notes/randomness.html for pytorch - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow Here is the function: def enable_full_determinism(seed: int): """ Helper function for reproducible behavior during distributed training. See - https://pytorch.org/docs/stable/notes/randomness.html for pytorch - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow """ # set seed first set_seed(seed) if is_torch_available(): #  Enable PyTorch deterministic mode. This potentially requires either the environment #  variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, # depending on the CUDA version, so we set them both here os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.use_deterministic_algorithms(True) # Enable CUDNN deterministic mode torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if is_tf_available(): tf.config.experimental.enable_op_determinism()
Helper function for reproducible behavior during distributed training. See - https://pytorch.org/docs/stable/notes/randomness.html for pytorch - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow
10,695
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
null
10,696
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) def speed_metrics(split, start_time, num_samples=None, num_steps=None): """ Measure and return speed performance metrics. This function requires a time snapshot `start_time` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed """ runtime = time.time() - start_time result = {f"{split}_runtime": round(runtime, 4)} if num_samples is not None: samples_per_second = num_samples / runtime result[f"{split}_samples_per_second"] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f"{split}_steps_per_second"] = round(steps_per_second, 3) return result The provided code snippet includes necessary dependencies for implementing the `default_compute_objective` function. Write a Python function `def default_compute_objective(metrics: Dict[str, float]) -> float` to solve the following problem: The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`Dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize Here is the function: def default_compute_objective(metrics: Dict[str, float]) -> float: """ The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`Dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize """ metrics = copy.deepcopy(metrics) loss = metrics.pop("eval_loss", None) _ = metrics.pop("epoch", None) # Remove speed metrics speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")] for sm in speed_metrics: _ = metrics.pop(sm, None) return loss if len(metrics) == 0 else sum(metrics.values())
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`Dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize
10,697
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) def is_optuna_available(): return importlib.util.find_spec("optuna") is not None def default_hp_space_optuna(trial) -> Dict[str, float]: from .integrations import is_optuna_available assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" return { "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), "seed": trial.suggest_int("seed", 1, 40), "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), }
null
10,698
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) def is_ray_tune_available(): if not is_ray_available(): return False return importlib.util.find_spec("ray.tune") is not None def default_hp_space_ray(trial) -> Dict[str, float]: from .integrations import is_ray_tune_available assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`" from ray import tune return { "learning_rate": tune.loguniform(1e-6, 1e-4), "num_train_epochs": tune.choice(list(range(1, 6))), "seed": tune.uniform(1, 40), "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), }
null
10,699
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) def default_hp_space_sigopt(trial): return [ {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"}, {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, { "categorical_values": ["4", "8", "16", "32", "64"], "name": "per_device_train_batch_size", "type": "categorical", }, ]
null
10,700
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) def is_wandb_available(): # any value of WANDB_DISABLED disables wandb if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES: logger.warning( "Using the `WANDB_DISABLED` environment variable is deprecated and will be removed in v5. Use the " "--report_to flag to control the integrations used for logging result (for instance --report_to none)." ) return False return importlib.util.find_spec("wandb") is not None def default_hp_space_wandb(trial) -> Dict[str, float]: from .integrations import is_wandb_available if not is_wandb_available(): raise ImportError("This function needs wandb installed: `pip install wandb`") return { "method": "random", "metric": {"name": "objective", "goal": "minimize"}, "parameters": { "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6}, "seed": {"distribution": "int_uniform", "min": 1, "max": 40}, "per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]}, }, }
null
10,701
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) The provided code snippet includes necessary dependencies for implementing the `is_main_process` function. Write a Python function `def is_main_process(local_rank)` to solve the following problem: Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on `local_rank`. Here is the function: def is_main_process(local_rank): """ Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on `local_rank`. """ if is_torch_tpu_available(check_device=True): import torch_xla.core.xla_model as xm return xm.get_ordinal() == 0 return local_rank in [-1, 0]
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on `local_rank`.
10,702
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) if is_torch_available(): import torch The provided code snippet includes necessary dependencies for implementing the `total_processes_number` function. Write a Python function `def total_processes_number(local_rank)` to solve the following problem: Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. Here is the function: def total_processes_number(local_rank): """ Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. """ if is_torch_tpu_available(check_device=True): import torch_xla.core.xla_model as xm return xm.xrt_world_size() elif local_rank != -1 and is_torch_available(): import torch return torch.distributed.get_world_size() return 1
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
10,703
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) The provided code snippet includes necessary dependencies for implementing the `has_length` function. Write a Python function `def has_length(dataset)` to solve the following problem: Checks if the dataset implements __len__() and it doesn't raise an error Here is the function: def has_length(dataset): """ Checks if the dataset implements __len__() and it doesn't raise an error """ try: return len(dataset) is not None except TypeError: # TypeError: len() of unsized object return False
Checks if the dataset implements __len__() and it doesn't raise an error
10,704
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) if is_torch_available(): import torch The provided code snippet includes necessary dependencies for implementing the `denumpify_detensorize` function. Write a Python function `def denumpify_detensorize(metrics)` to solve the following problem: Recursively calls `.item()` on the element of the dictionary passed Here is the function: def denumpify_detensorize(metrics): """ Recursively calls `.item()` on the element of the dictionary passed """ if isinstance(metrics, (list, tuple)): return type(metrics)(denumpify_detensorize(m) for m in metrics) elif isinstance(metrics, dict): return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) elif isinstance(metrics, np.generic): return metrics.item() elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: return metrics.item() return metrics
Recursively calls `.item()` on the element of the dictionary passed
10,705
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) The provided code snippet includes necessary dependencies for implementing the `number_of_arguments` function. Write a Python function `def number_of_arguments(func)` to solve the following problem: Return the number of arguments of the passed function, even if it's a partial function. Here is the function: def number_of_arguments(func): """ Return the number of arguments of the passed function, even if it's a partial function. """ if isinstance(func, functools.partial): total_args = len(inspect.signature(func.func).parameters) return total_args - len(func.args) - len(func.keywords) return len(inspect.signature(func).parameters)
Return the number of arguments of the passed function, even if it's a partial function.
10,706
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_tpu_available, requires_backends, ) The provided code snippet includes necessary dependencies for implementing the `find_executable_batch_size` function. Write a Python function `def find_executable_batch_size( function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False )` to solve the following problem: Args: A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. function (`callable`, *optional*) A function to wrap starting_batch_size (`int`, *optional*) The batch size to try and fit into memory auto_find_batch_size (`bool`, *optional*) If False, will just execute `function` Here is the function: def find_executable_batch_size( function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False ): """ Args: A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. function (`callable`, *optional*) A function to wrap starting_batch_size (`int`, *optional*) The batch size to try and fit into memory auto_find_batch_size (`bool`, *optional*) If False, will just execute `function` """ if function is None: return functools.partial( find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size, ) if auto_find_batch_size: requires_backends(find_executable_batch_size, "accelerate") import accelerate.memory_utils as mem_utils return mem_utils.find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) return functools.partial(function, batch_size=starting_batch_size)
Args: A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. function (`callable`, *optional*) A function to wrap starting_batch_size (`int`, *optional*) The batch size to try and fit into memory auto_find_batch_size (`bool`, *optional*) If False, will just execute `function`
10,707
import os import re import numpy from .utils import ExplicitEnum, expand_dims, is_numpy_array, is_torch_tensor, logging, reshape, squeeze, tensor_size from .utils import transpose as transpose_func logger = logging.get_logger(__name__) def load_pytorch_weights_in_tf2_model( tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False, output_loading_info=False ): """Load pytorch state_dict in a TF 2.0 model.""" try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} return load_pytorch_state_dict_in_tf2_model( tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info, ) The provided code snippet includes necessary dependencies for implementing the `load_pytorch_checkpoint_in_tf2_model` function. Write a Python function `def load_pytorch_checkpoint_in_tf2_model( tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False )` to solve the following problem: Load pytorch checkpoints in a TF 2.0 model Here is the function: def load_pytorch_checkpoint_in_tf2_model( tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False ): """Load pytorch checkpoints in a TF 2.0 model""" try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise # Treats a single file as a collection of shards with 1 shard. if isinstance(pytorch_checkpoint_path, str): pytorch_checkpoint_path = [pytorch_checkpoint_path] # Loads all shards into a single state dictionary pt_state_dict = {} for path in pytorch_checkpoint_path: pt_path = os.path.abspath(path) logger.info(f"Loading PyTorch weights from {pt_path}") pt_state_dict.update(torch.load(pt_path, map_location="cpu")) logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters") return load_pytorch_weights_in_tf2_model( tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info, )
Load pytorch checkpoints in a TF 2.0 model
10,708
import os import re import numpy from .utils import ExplicitEnum, expand_dims, is_numpy_array, is_torch_tensor, logging, reshape, squeeze, tensor_size from .utils import transpose as transpose_func def load_pytorch_weights_in_tf2_model( tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False, output_loading_info=False ): """Load pytorch state_dict in a TF 2.0 model.""" try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} return load_pytorch_state_dict_in_tf2_model( tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info, ) The provided code snippet includes necessary dependencies for implementing the `load_pytorch_model_in_tf2_model` function. Write a Python function `def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False)` to solve the following problem: Load pytorch checkpoints in a TF 2.0 model Here is the function: def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False): """Load pytorch checkpoints in a TF 2.0 model""" pt_state_dict = pt_model.state_dict() return load_pytorch_weights_in_tf2_model( tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys )
Load pytorch checkpoints in a TF 2.0 model
10,709
import os import re import numpy from .utils import ExplicitEnum, expand_dims, is_numpy_array, is_torch_tensor, logging, reshape, squeeze, tensor_size from .utils import transpose as transpose_func logger = logging.get_logger(__name__) def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False, output_loading_info=False): """Load TF 2.0 model in a pytorch model""" weights = tf_model.weights return load_tf2_weights_in_pytorch_model( pt_model, weights, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info ) def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): """ Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. Args: model (`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (`str`): The location of the H5 file. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ if resolved_archive_file.endswith(".safetensors"): load_function = load_tf_weights_from_safetensors else: load_function = load_tf_weights_from_h5 return load_function( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix ) The provided code snippet includes necessary dependencies for implementing the `load_tf2_checkpoint_in_pytorch_model` function. Write a Python function `def load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False )` to solve the following problem: Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). Here is the function: def load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False ): """ Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). """ try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise import transformers from .modeling_tf_utils import load_tf_weights logger.info(f"Loading TensorFlow weights from {tf_checkpoint_path}") # Instantiate and load the associated TF 2.0 model tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beginning tf_model_class = getattr(transformers, tf_model_class_name) tf_model = tf_model_class(pt_model.config) if tf_inputs is None: tf_inputs = tf_model.dummy_inputs if tf_inputs is not None: tf_model(tf_inputs, training=False) # Make sure model is built load_tf_weights(tf_model, tf_checkpoint_path) return load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info )
Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
10,710
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from transformers.training_args import TrainingArguments from transformers.utils import cached_property, is_sagemaker_dp_enabled, logging def is_sagemaker_model_parallel_available(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None
null
10,711
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPT2Config, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, T5Config, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPT2LMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFT5ForConditionalGeneration, TFTransfoXLLMHeadModel, TFWav2Vec2Model, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, Wav2Vec2Config, Wav2Vec2Model, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tf2_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging MODEL_CLASSES = { "bart": ( BartConfig, TFBartForConditionalGeneration, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), "bert": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-cased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-base-cased-finetuned-mrpc": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "dpr": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), "gpt2": ( GPT2Config, TFGPT2LMHeadModel, GPT2LMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlnet": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm-roberta": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "transfo-xl": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "openai-gpt": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "roberta": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "layoutlm": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), "roberta-large-mnli": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "camembert": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "flaubert": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert-base-distilled-squad": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert-visual-feature-encoder": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "ctrl": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "albert": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "t5": ( T5Config, TFT5ForConditionalGeneration, T5ForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "electra": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "wav2vec2": ( Wav2Vec2Config, TFWav2Vec2Model, Wav2Vec2Model, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def convert_pt_checkpoint_to_tf( model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True ): if model_type not in MODEL_CLASSES: raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys())}.") config_class, model_class, pt_model_class, aws_config_map = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: config_file = cached_file(config_file, CONFIG_NAME, force_download=not use_cached_models) config = config_class.from_json_file(config_file) config.output_hidden_states = True config.output_attentions = True print(f"Building TensorFlow model from configuration: {config}") tf_model = model_class(config) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): pytorch_checkpoint_path = cached_file( pytorch_checkpoint_path, WEIGHTS_NAME, force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path) if compare_with_pt_model: tfo = tf_model(tf_model.dummy_inputs, training=False) # build the network state_dict = torch.load(pytorch_checkpoint_path, map_location="cpu") pt_model = pt_model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) with torch.no_grad(): pto = pt_model(**pt_model.dummy_inputs) np_pt = pto[0].numpy() np_tf = tfo[0].numpy() diff = np.amax(np.abs(np_pt - np_tf)) print(f"Max absolute difference between models outputs {diff}") assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}" # Save pytorch-model print(f"Save TensorFlow model to {tf_dump_path}") tf_model.save_weights(tf_dump_path, save_format="h5") WEIGHTS_NAME = "pytorch_model.bin" CONFIG_NAME = "config.json" def convert_all_pt_checkpoints_to_tf( args_model_type, tf_dump_path, model_shortcut_names_or_path=None, config_shortcut_names_or_path=None, compare_with_pt_model=False, use_cached_models=False, remove_cached_files=False, only_convert_finetuned_models=False, ): if args_model_type is None: model_types = list(MODEL_CLASSES.keys()) else: model_types = [args_model_type] for j, model_type in enumerate(model_types, start=1): print("=" * 100) print(f" Converting model type {j}/{len(model_types)}: {model_type}") print("=" * 100) if model_type not in MODEL_CLASSES: raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys())}.") config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: model_shortcut_names_or_path = list(aws_model_maps.keys()) if config_shortcut_names_or_path is None: config_shortcut_names_or_path = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1 ): print("-" * 100) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f" Skipping finetuned checkpoint {model_shortcut_name}") continue model_type = model_shortcut_name elif only_convert_finetuned_models: print(f" Skipping not finetuned checkpoint {model_shortcut_name}") continue print( f" Converting checkpoint {i}/{len(aws_config_map)}: {model_shortcut_name} - model_type {model_type}" ) print("-" * 100) if config_shortcut_name in aws_config_map: config_file = cached_file(config_shortcut_name, CONFIG_NAME, force_download=not use_cached_models) else: config_file = config_shortcut_name if model_shortcut_name in aws_model_maps: model_file = cached_file(model_shortcut_name, WEIGHTS_NAME, force_download=not use_cached_models) else: model_file = model_shortcut_name if os.path.isfile(model_shortcut_name): model_shortcut_name = "converted_model" convert_pt_checkpoint_to_tf( model_type=model_type, pytorch_checkpoint_path=model_file, config_file=config_file, tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + "-tf_model.h5"), compare_with_pt_model=compare_with_pt_model, ) if remove_cached_files: os.remove(config_file) os.remove(model_file)
null
10,712
import warnings from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np from transformers.image_utils import PILImageResampling from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]: """ Returns the (height, width) dimensions of the image. Args: image (`np.ndarray`): The image to get the dimensions of. channel_dim (`ChannelDimension`, *optional*): Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image. Returns: A tuple of the image's height and width. """ if channel_dim is None: channel_dim = infer_channel_dimension_format(image) if channel_dim == ChannelDimension.FIRST: return image.shape[-2], image.shape[-1] elif channel_dim == ChannelDimension.LAST: return image.shape[-3], image.shape[-2] else: raise ValueError(f"Unsupported data format: {channel_dim}") The provided code snippet includes necessary dependencies for implementing the `get_resize_output_image_size` function. Write a Python function `def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], default_to_square: bool = True, max_size: Optional[int] = None, ) -> tuple` to solve the following problem: Find the target (height, width) dimension of the output image after resizing given the input image and the desired size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]): The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size). default_to_square (`bool`, *optional*, defaults to `True`): How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square (`size`,`size`). If set to `False`, will replicate [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize) with support for resizing only the smallest edge and providing an optional `max_size`. max_size (`int`, *optional*): The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than `max_size` after being resized according to `size`, then the image is resized again so that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter than `size`. Only used if `default_to_square` is `False`. Returns: `tuple`: The target (height, width) dimension of the output image after resizing. Here is the function: def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], default_to_square: bool = True, max_size: Optional[int] = None, ) -> tuple: """ Find the target (height, width) dimension of the output image after resizing given the input image and the desired size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]): The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size). default_to_square (`bool`, *optional*, defaults to `True`): How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square (`size`,`size`). If set to `False`, will replicate [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize) with support for resizing only the smallest edge and providing an optional `max_size`. max_size (`int`, *optional*): The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than `max_size` after being resized according to `size`, then the image is resized again so that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter than `size`. Only used if `default_to_square` is `False`. Returns: `tuple`: The target (height, width) dimension of the output image after resizing. """ if isinstance(size, (tuple, list)): if len(size) == 2: return tuple(size) elif len(size) == 1: # Perform same logic as if size was an int size = size[0] else: raise ValueError("size must have 1 or 2 elements if it is a list or tuple") if default_to_square: return (size, size) height, width = get_image_size(input_image) short, long = (width, height) if width <= height else (height, width) requested_new_short = size if short == requested_new_short: return (height, width) new_short, new_long = requested_new_short, int(requested_new_short * long / short) if max_size is not None: if max_size <= requested_new_short: raise ValueError( f"max_size = {max_size} must be strictly greater than the requested " f"size for the smaller edge size = {size}" ) if new_long > max_size: new_short, new_long = int(max_size * new_short / new_long), max_size return (new_long, new_short) if width <= height else (new_short, new_long)
Find the target (height, width) dimension of the output image after resizing given the input image and the desired size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]): The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size). default_to_square (`bool`, *optional*, defaults to `True`): How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square (`size`,`size`). If set to `False`, will replicate [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize) with support for resizing only the smallest edge and providing an optional `max_size`. max_size (`int`, *optional*): The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than `max_size` after being resized according to `size`, then the image is resized again so that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter than `size`. Only used if `default_to_square` is `False`. Returns: `tuple`: The target (height, width) dimension of the output image after resizing.
10,713
import warnings from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np from transformers.image_utils import PILImageResampling from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str]) -> np.ndarray: """ Converts `image` to the channel dimension format specified by `channel_dim`. Args: image (`numpy.ndarray`): The image to have its channel dimension set. channel_dim (`ChannelDimension`): The channel dimension format to use. Returns: `np.ndarray`: The image with the channel dimension set to `channel_dim`. """ if not isinstance(image, np.ndarray): raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") current_channel_dim = infer_channel_dimension_format(image) target_channel_dim = ChannelDimension(channel_dim) if current_channel_dim == target_channel_dim: return image if target_channel_dim == ChannelDimension.FIRST: image = image.transpose((2, 0, 1)) elif target_channel_dim == ChannelDimension.LAST: image = image.transpose((1, 2, 0)) else: raise ValueError("Unsupported channel dimension format: {}".format(channel_dim)) return image def to_pil_image( image: Union[np.ndarray, PIL.Image.Image, "torch.Tensor", "tf.Tensor", "jnp.Tensor"], do_rescale: Optional[bool] = None, ) -> PIL.Image.Image: """ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed. Args: image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`): The image to convert to the `PIL.Image` format. do_rescale (`bool`, *optional*): Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to `True` if the image type is a floating type, `False` otherwise. Returns: `PIL.Image.Image`: The converted image. """ if isinstance(image, PIL.Image.Image): return image # Convert all tensors to numpy arrays before converting to PIL image if is_torch_tensor(image) or is_tf_tensor(image): image = image.numpy() elif is_jax_tensor(image): image = np.array(image) elif not isinstance(image, np.ndarray): raise ValueError("Input image type not supported: {}".format(type(image))) # If the channel as been moved to first dim, we put it back at the end. image = to_channel_dimension_format(image, ChannelDimension.LAST) # PIL.Image can only store uint8 values, so we rescale the image to be between 0 and 255 if needed. do_rescale = isinstance(image.flat[0], float) if do_rescale is None else do_rescale if do_rescale: image = rescale(image, 255) image = image.astype(np.uint8) return PIL.Image.fromarray(image) class ChannelDimension(ExplicitEnum): FIRST = "channels_first" LAST = "channels_last" def infer_channel_dimension_format(image: np.ndarray) -> ChannelDimension: """ Infers the channel dimension format of `image`. Args: image (`np.ndarray`): The image to infer the channel dimension of. Returns: The channel dimension of the image. """ if image.ndim == 3: first_dim, last_dim = 0, 2 elif image.ndim == 4: first_dim, last_dim = 1, 3 else: raise ValueError(f"Unsupported number of image dimensions: {image.ndim}") if image.shape[first_dim] in (1, 3): return ChannelDimension.FIRST elif image.shape[last_dim] in (1, 3): return ChannelDimension.LAST raise ValueError("Unable to infer channel dimension format") The provided code snippet includes necessary dependencies for implementing the `resize` function. Write a Python function `def resize( image, size: Tuple[int, int], resample=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, return_numpy: bool = True, ) -> np.ndarray` to solve the following problem: Resizes `image` to (h, w) specified by `size` using the PIL library. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image to resize. size (`Tuple[int, int]`): The size to use for resizing the image. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): The filter to user for resampling. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If `None`, will use the inferred format from the input. return_numpy (`bool`, *optional*, defaults to `True`): Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is returned. Returns: `np.ndarray`: The resized image. Here is the function: def resize( image, size: Tuple[int, int], resample=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, return_numpy: bool = True, ) -> np.ndarray: """ Resizes `image` to (h, w) specified by `size` using the PIL library. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image to resize. size (`Tuple[int, int]`): The size to use for resizing the image. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): The filter to user for resampling. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If `None`, will use the inferred format from the input. return_numpy (`bool`, *optional*, defaults to `True`): Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is returned. Returns: `np.ndarray`: The resized image. """ if not len(size) == 2: raise ValueError("size must have 2 elements") # For all transformations, we want to keep the same data format as the input image unless otherwise specified. # The resized image from PIL will always have channels last, so find the input format first. data_format = infer_channel_dimension_format(image) if data_format is None else data_format # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use # the pillow library to resize the image and then convert back to numpy if not isinstance(image, PIL.Image.Image): # PIL expects image to have channels last image = to_channel_dimension_format(image, ChannelDimension.LAST) image = to_pil_image(image) height, width = size # PIL images are in the format (width, height) resized_image = image.resize((width, height), resample=resample) if return_numpy: resized_image = np.array(resized_image) resized_image = to_channel_dimension_format(resized_image, data_format) return resized_image
Resizes `image` to (h, w) specified by `size` using the PIL library. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image to resize. size (`Tuple[int, int]`): The size to use for resizing the image. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): The filter to user for resampling. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If `None`, will use the inferred format from the input. return_numpy (`bool`, *optional*, defaults to `True`): Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is returned. Returns: `np.ndarray`: The resized image.
10,714
import warnings from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np from transformers.image_utils import PILImageResampling from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str]) -> np.ndarray: """ Converts `image` to the channel dimension format specified by `channel_dim`. Args: image (`numpy.ndarray`): The image to have its channel dimension set. channel_dim (`ChannelDimension`): The channel dimension format to use. Returns: `np.ndarray`: The image with the channel dimension set to `channel_dim`. """ if not isinstance(image, np.ndarray): raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") current_channel_dim = infer_channel_dimension_format(image) target_channel_dim = ChannelDimension(channel_dim) if current_channel_dim == target_channel_dim: return image if target_channel_dim == ChannelDimension.FIRST: image = image.transpose((2, 0, 1)) elif target_channel_dim == ChannelDimension.LAST: image = image.transpose((1, 2, 0)) else: raise ValueError("Unsupported channel dimension format: {}".format(channel_dim)) return image def rescale( image: np.ndarray, scale: float, data_format: Optional[ChannelDimension] = None, dtype=np.float32 ) -> np.ndarray: """ Rescales `image` by `scale`. Args: image (`np.ndarray`): The image to rescale. scale (`float`): The scale to use for rescaling the image. data_format (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. dtype (`np.dtype`, *optional*, defaults to `np.float32`): The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature extractors. Returns: `np.ndarray`: The rescaled image. """ if not isinstance(image, np.ndarray): raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") rescaled_image = image * scale if data_format is not None: rescaled_image = to_channel_dimension_format(rescaled_image, data_format) rescaled_image = rescaled_image.astype(dtype) return rescaled_image class ChannelDimension(ExplicitEnum): FIRST = "channels_first" LAST = "channels_last" def to_numpy_array(img) -> np.ndarray: if isinstance(img, PIL.Image.Image): return np.array(img) return to_numpy(img) def infer_channel_dimension_format(image: np.ndarray) -> ChannelDimension: """ Infers the channel dimension format of `image`. Args: image (`np.ndarray`): The image to infer the channel dimension of. Returns: The channel dimension of the image. """ if image.ndim == 3: first_dim, last_dim = 0, 2 elif image.ndim == 4: first_dim, last_dim = 1, 3 else: raise ValueError(f"Unsupported number of image dimensions: {image.ndim}") if image.shape[first_dim] in (1, 3): return ChannelDimension.FIRST elif image.shape[last_dim] in (1, 3): return ChannelDimension.LAST raise ValueError("Unable to infer channel dimension format") def get_channel_dimension_axis(image: np.ndarray) -> int: """ Returns the channel dimension axis of the image. Args: image (`np.ndarray`): The image to get the channel dimension axis of. Returns: The channel dimension axis of the image. """ channel_dim = infer_channel_dimension_format(image) if channel_dim == ChannelDimension.FIRST: return image.ndim - 3 elif channel_dim == ChannelDimension.LAST: return image.ndim - 1 raise ValueError(f"Unsupported data format: {channel_dim}") The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize( image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray` to solve the following problem: Normalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image - mean) / std Args: image (`np.ndarray`): The image to normalize. mean (`float` or `Iterable[float]`): The mean to use for normalization. std (`float` or `Iterable[float]`): The standard deviation to use for normalization. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If `None`, will use the inferred format from the input. Here is the function: def normalize( image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image - mean) / std Args: image (`np.ndarray`): The image to normalize. mean (`float` or `Iterable[float]`): The mean to use for normalization. std (`float` or `Iterable[float]`): The standard deviation to use for normalization. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If `None`, will use the inferred format from the input. """ if isinstance(image, PIL.Image.Image): warnings.warn( "PIL.Image.Image inputs are deprecated and will be removed in v4.26.0. Please use numpy arrays instead.", FutureWarning, ) # Convert PIL image to numpy array with the same logic as in the previous feature extractor normalize - # casting to numpy array and dividing by 255. image = to_numpy_array(image) image = rescale(image, scale=1 / 255) input_data_format = infer_channel_dimension_format(image) channel_axis = get_channel_dimension_axis(image) num_channels = image.shape[channel_axis] if isinstance(mean, Iterable): if len(mean) != num_channels: raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}") else: mean = [mean] * num_channels if isinstance(std, Iterable): if len(std) != num_channels: raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(std)}") else: std = [std] * num_channels if input_data_format == ChannelDimension.LAST: image = (image - mean) / std else: image = ((image.T - mean) / std).T image = to_channel_dimension_format(image, data_format) if data_format is not None else image return image
Normalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image - mean) / std Args: image (`np.ndarray`): The image to normalize. mean (`float` or `Iterable[float]`): The mean to use for normalization. std (`float` or `Iterable[float]`): The standard deviation to use for normalization. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If `None`, will use the inferred format from the input.
10,715
import warnings from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np from transformers.image_utils import PILImageResampling from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str]) -> np.ndarray: """ Converts `image` to the channel dimension format specified by `channel_dim`. Args: image (`numpy.ndarray`): The image to have its channel dimension set. channel_dim (`ChannelDimension`): The channel dimension format to use. Returns: `np.ndarray`: The image with the channel dimension set to `channel_dim`. """ if not isinstance(image, np.ndarray): raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") current_channel_dim = infer_channel_dimension_format(image) target_channel_dim = ChannelDimension(channel_dim) if current_channel_dim == target_channel_dim: return image if target_channel_dim == ChannelDimension.FIRST: image = image.transpose((2, 0, 1)) elif target_channel_dim == ChannelDimension.LAST: image = image.transpose((1, 2, 0)) else: raise ValueError("Unsupported channel dimension format: {}".format(channel_dim)) return image def to_pil_image( image: Union[np.ndarray, PIL.Image.Image, "torch.Tensor", "tf.Tensor", "jnp.Tensor"], do_rescale: Optional[bool] = None, ) -> PIL.Image.Image: """ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed. Args: image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`): The image to convert to the `PIL.Image` format. do_rescale (`bool`, *optional*): Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to `True` if the image type is a floating type, `False` otherwise. Returns: `PIL.Image.Image`: The converted image. """ if isinstance(image, PIL.Image.Image): return image # Convert all tensors to numpy arrays before converting to PIL image if is_torch_tensor(image) or is_tf_tensor(image): image = image.numpy() elif is_jax_tensor(image): image = np.array(image) elif not isinstance(image, np.ndarray): raise ValueError("Input image type not supported: {}".format(type(image))) # If the channel as been moved to first dim, we put it back at the end. image = to_channel_dimension_format(image, ChannelDimension.LAST) # PIL.Image can only store uint8 values, so we rescale the image to be between 0 and 255 if needed. do_rescale = isinstance(image.flat[0], float) if do_rescale is None else do_rescale if do_rescale: image = rescale(image, 255) image = image.astype(np.uint8) return PIL.Image.fromarray(image) class ChannelDimension(ExplicitEnum): FIRST = "channels_first" LAST = "channels_last" def to_numpy_array(img) -> np.ndarray: if isinstance(img, PIL.Image.Image): return np.array(img) return to_numpy(img) def infer_channel_dimension_format(image: np.ndarray) -> ChannelDimension: """ Infers the channel dimension format of `image`. Args: image (`np.ndarray`): The image to infer the channel dimension of. Returns: The channel dimension of the image. """ if image.ndim == 3: first_dim, last_dim = 0, 2 elif image.ndim == 4: first_dim, last_dim = 1, 3 else: raise ValueError(f"Unsupported number of image dimensions: {image.ndim}") if image.shape[first_dim] in (1, 3): return ChannelDimension.FIRST elif image.shape[last_dim] in (1, 3): return ChannelDimension.LAST raise ValueError("Unable to infer channel dimension format") def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]: """ Returns the (height, width) dimensions of the image. Args: image (`np.ndarray`): The image to get the dimensions of. channel_dim (`ChannelDimension`, *optional*): Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image. Returns: A tuple of the image's height and width. """ if channel_dim is None: channel_dim = infer_channel_dimension_format(image) if channel_dim == ChannelDimension.FIRST: return image.shape[-2], image.shape[-1] elif channel_dim == ChannelDimension.LAST: return image.shape[-3], image.shape[-2] else: raise ValueError(f"Unsupported data format: {channel_dim}") The provided code snippet includes necessary dependencies for implementing the `center_crop` function. Write a Python function `def center_crop( image: np.ndarray, size: Tuple[int, int], data_format: Optional[Union[str, ChannelDimension]] = None, return_numpy: Optional[bool] = None, ) -> np.ndarray` to solve the following problem: Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result will always be of size `size`). Args: image (`np.ndarray`): The image to crop. size (`Tuple[int, int]`): The target size for the cropped image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. return_numpy (`bool`, *optional*): Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the previous ImageFeatureExtractionMixin method. - Unset: will return the same type as the input image. - `True`: will return a numpy array. - `False`: will return a `PIL.Image.Image` object. Returns: `np.ndarray`: The cropped image. Here is the function: def center_crop( image: np.ndarray, size: Tuple[int, int], data_format: Optional[Union[str, ChannelDimension]] = None, return_numpy: Optional[bool] = None, ) -> np.ndarray: """ Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result will always be of size `size`). Args: image (`np.ndarray`): The image to crop. size (`Tuple[int, int]`): The target size for the cropped image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. return_numpy (`bool`, *optional*): Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the previous ImageFeatureExtractionMixin method. - Unset: will return the same type as the input image. - `True`: will return a numpy array. - `False`: will return a `PIL.Image.Image` object. Returns: `np.ndarray`: The cropped image. """ if isinstance(image, PIL.Image.Image): warnings.warn( "PIL.Image.Image inputs are deprecated and will be removed in v4.26.0. Please use numpy arrays instead.", FutureWarning, ) image = to_numpy_array(image) return_numpy = False if return_numpy is None else return_numpy else: return_numpy = True if return_numpy is None else return_numpy if not isinstance(image, np.ndarray): raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") if not isinstance(size, Iterable) or len(size) != 2: raise ValueError("size must have 2 elements representing the height and width of the output image") input_data_format = infer_channel_dimension_format(image) output_data_format = data_format if data_format is not None else input_data_format # We perform the crop in (C, H, W) format and then convert to the output format image = to_channel_dimension_format(image, ChannelDimension.FIRST) orig_height, orig_width = get_image_size(image) crop_height, crop_width = size # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result. top = (orig_height - crop_height) // 2 bottom = top + crop_height # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result. left = (orig_width - crop_width) // 2 right = left + crop_width # Check if cropped area is within image boundaries if top >= 0 and bottom <= orig_height and left >= 0 and right <= orig_width: image = image[..., top:bottom, left:right] image = to_channel_dimension_format(image, output_data_format) return image # Otherwise, we may need to pad if the image is too small. Oh joy... new_height = max(crop_height, orig_height) new_width = max(crop_width, orig_width) new_shape = image.shape[:-2] + (new_height, new_width) new_image = np.zeros_like(image, shape=new_shape) # If the image is too small, pad it with zeros top_pad = (new_height - orig_height) // 2 bottom_pad = top_pad + orig_height left_pad = (new_width - orig_width) // 2 right_pad = left_pad + orig_width new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image top += top_pad bottom += top_pad left += left_pad right += left_pad new_image = new_image[..., max(0, top) : min(new_height, bottom), max(0, left) : min(new_width, right)] new_image = to_channel_dimension_format(new_image, output_data_format) if not return_numpy: new_image = to_pil_image(new_image) return new_image
Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result will always be of size `size`). Args: image (`np.ndarray`): The image to crop. size (`Tuple[int, int]`): The target size for the cropped image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. return_numpy (`bool`, *optional*): Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the previous ImageFeatureExtractionMixin method. - Unset: will return the same type as the input image. - `True`: will return a numpy array. - `False`: will return a `PIL.Image.Image` object. Returns: `np.ndarray`: The cropped image.
10,716
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata)
null
10,717
import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments logger = logging.get_logger(__name__) The provided code snippet includes necessary dependencies for implementing the `separate_process_wrapper_fn` function. Write a Python function `def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]` to solve the following problem: This function wraps another function into its own separated process. In order to ensure accurate memory measurements it is important that the function is executed in a separate process Args: - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process - `do_multi_processing`: (`bool`) Whether to run function on separate process or not Here is the function: def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]: """ This function wraps another function into its own separated process. In order to ensure accurate memory measurements it is important that the function is executed in a separate process Args: - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process - `do_multi_processing`: (`bool`) Whether to run function on separate process or not """ def multi_process_func(*args, **kwargs): # run function in an individual # process to get correct memory def wrapper_func(queue: Queue, *args): try: result = func(*args) except Exception as e: logger.error(e) print(e) result = "N/A" queue.put(result) queue = Queue() p = Process(target=wrapper_func, args=[queue] + list(args)) p.start() result = queue.get() p.join() return result if do_multi_processing: logger.info(f"Function {func} is executed in its own process...") return multi_process_func else: return func
This function wraps another function into its own separated process. In order to ensure accurate memory measurements it is important that the function is executed in a separate process Args: - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process - `do_multi_processing`: (`bool`) Whether to run function on separate process or not
10,718
import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments _is_memory_tracing_enabled = False def is_memory_tracing_enabled(): global _is_memory_tracing_enabled return _is_memory_tracing_enabled
null
10,719
import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments if is_psutil_available(): import psutil logger = logging.get_logger(__name__) The provided code snippet includes necessary dependencies for implementing the `measure_peak_memory_cpu` function. Write a Python function `def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int` to solve the following problem: measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239 Args: - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure the peak memory - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage Returns: - `max_memory`: (`int`) consumed memory peak in Bytes Here is the function: def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int: """ measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239 Args: - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure the peak memory - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage Returns: - `max_memory`: (`int`) consumed memory peak in Bytes """ def get_cpu_memory(process_id: int) -> int: """ measures current cpu memory usage of a given `process_id` Args: - `process_id`: (`int`) process_id for which to measure memory Returns - `memory`: (`int`) consumed memory in Bytes """ process = psutil.Process(process_id) try: meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info" memory = getattr(process, meminfo_attr)()[0] except psutil.AccessDenied: raise ValueError("Error with Psutil.") return memory if not is_psutil_available(): logger.warning( "Psutil not installed, we won't log CPU memory usage. " "Install Psutil (pip install psutil) to use CPU memory tracing." ) max_memory = "N/A" else: class MemoryMeasureProcess(Process): """ `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the memory usage of a process """ def __init__(self, process_id: int, child_connection: Connection, interval: float): super().__init__() self.process_id = process_id self.interval = interval self.connection = child_connection self.num_measurements = 1 self.mem_usage = get_cpu_memory(self.process_id) def run(self): self.connection.send(0) stop = False while True: self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id)) self.num_measurements += 1 if stop: break stop = self.connection.poll(self.interval) # send results to parent pipe self.connection.send(self.mem_usage) self.connection.send(self.num_measurements) while True: # create child, parent connection child_connection, parent_connection = Pipe() # instantiate process mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval) mem_process.start() # wait until we get memory parent_connection.recv() try: # execute function function() # start parent connection parent_connection.send(0) # receive memory and num measurements max_memory = parent_connection.recv() num_measurements = parent_connection.recv() except Exception: # kill process in a clean way parent = psutil.Process(os.getpid()) for child in parent.children(recursive=True): os.kill(child.pid, SIGKILL) mem_process.join(0) raise RuntimeError("Process killed. Error in Process") # run process at least 20 * interval or until it finishes mem_process.join(20 * interval) if (num_measurements > 4) or (interval < 1e-6): break # reduce interval interval /= 10 return max_memory
measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239 Args: - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure the peak memory - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage Returns: - `max_memory`: (`int`) consumed memory peak in Bytes
10,720
import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): from torch.cuda import empty_cache as torch_empty_cache if is_tf_available(): from tensorflow.python.eager import context as tf_context if is_psutil_available(): import psutil if is_py3nvml_available(): import py3nvml.py3nvml as nvml logger = logging.get_logger(__name__) _is_memory_tracing_enabled = False class Frame(NamedTuple): """ `Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script """ filename: str module: str line_number: int event: str line_text: str class UsedMemoryState(NamedTuple): """ `UsedMemoryState` are named tuples with the following fields: - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file) - 'cpu_memory': CPU RSS memory state *before* executing the line - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided) """ frame: Frame cpu_memory: int gpu_memory: int MemoryTrace = List[UsedMemoryState] The provided code snippet includes necessary dependencies for implementing the `start_memory_tracing` function. Write a Python function `def start_memory_tracing( modules_to_trace: Optional[Union[str, Iterable[str]]] = None, modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None, events_to_trace: str = "line", gpus_to_trace: Optional[List[int]] = None, ) -> MemoryTrace` to solve the following problem: Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident Set Size” (the non-swapped physical memory the process is using). See https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info Args: - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.models.gpt2.modeling_gpt2') - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch') - `events_to_trace`: string or list of string of events to be recorded (see official python doc for `sys.settrace` for the list of events) default to line - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs Return: - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script). - `UsedMemoryState` are named tuples with the following fields: - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file) - 'cpu_memory': CPU RSS memory state *before* executing the line - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided) `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script Here is the function: def start_memory_tracing( modules_to_trace: Optional[Union[str, Iterable[str]]] = None, modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None, events_to_trace: str = "line", gpus_to_trace: Optional[List[int]] = None, ) -> MemoryTrace: """ Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident Set Size” (the non-swapped physical memory the process is using). See https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info Args: - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.models.gpt2.modeling_gpt2') - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch') - `events_to_trace`: string or list of string of events to be recorded (see official python doc for `sys.settrace` for the list of events) default to line - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs Return: - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script). - `UsedMemoryState` are named tuples with the following fields: - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file) - 'cpu_memory': CPU RSS memory state *before* executing the line - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided) `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script """ if is_psutil_available(): process = psutil.Process(os.getpid()) else: logger.warning( "Psutil not installed, we won't log CPU memory usage. " "Install psutil (pip install psutil) to use CPU memory tracing." ) process = None if is_py3nvml_available(): try: nvml.nvmlInit() devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace nvml.nvmlShutdown() except (OSError, nvml.NVMLError): logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.") log_gpu = False else: log_gpu = is_torch_available() or is_tf_available() else: logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to use GPU memory tracing." ) log_gpu = False memory_trace = [] def traceit(frame, event, args): """ Tracing method executed before running each line in a module or sub-module Record memory allocated in a list with debugging information """ global _is_memory_tracing_enabled if not _is_memory_tracing_enabled: return traceit # Filter events if events_to_trace is not None: if isinstance(events_to_trace, str) and event != events_to_trace: return traceit elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace: return traceit if "__name__" not in frame.f_globals: return traceit # Filter modules name = frame.f_globals["__name__"] if not isinstance(name, str): return traceit else: # Filter whitelist of modules to trace if modules_to_trace is not None: if isinstance(modules_to_trace, str) and modules_to_trace not in name: return traceit elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace): return traceit # Filter blacklist of modules not to trace if modules_not_to_trace is not None: if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name: return traceit elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace): return traceit # Record current tracing state (file, location in file...) lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] line = linecache.getline(filename, lineno).rstrip() traced_state = Frame(filename, name, lineno, event, line) # Record current memory state (rss memory) and compute difference with previous memory state cpu_mem = 0 if process is not None: mem = process.memory_info() cpu_mem = mem.rss gpu_mem = 0 if log_gpu: # Clear GPU caches if is_torch_available(): torch_empty_cache() if is_tf_available(): tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802 # Sum used memory for all GPUs nvml.nvmlInit() for i in devices: handle = nvml.nvmlDeviceGetHandleByIndex(i) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) gpu_mem += meminfo.used nvml.nvmlShutdown() mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem) memory_trace.append(mem_state) return traceit sys.settrace(traceit) global _is_memory_tracing_enabled _is_memory_tracing_enabled = True return memory_trace
Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident Set Size” (the non-swapped physical memory the process is using). See https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info Args: - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.models.gpt2.modeling_gpt2') - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch') - `events_to_trace`: string or list of string of events to be recorded (see official python doc for `sys.settrace` for the list of events) default to line - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs Return: - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script). - `UsedMemoryState` are named tuples with the following fields: - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file) - 'cpu_memory': CPU RSS memory state *before* executing the line - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided) `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script
10,721
import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments _is_memory_tracing_enabled = False class Memory(NamedTuple): """ `Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by calling `__repr__` - `byte` (integer): number of bytes, """ bytes: int def __repr__(self) -> str: return str(bytes_to_mega_bytes(self.bytes)) class MemoryState(NamedTuple): """ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields: - `frame` (`Frame`): the current frame (see above) - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple """ frame: Frame cpu: Memory gpu: Memory cpu_gpu: Memory class MemorySummary(NamedTuple): """ `MemorySummary` namedtuple otherwise with the fields: - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by subtracting the memory after executing each line from the memory before executing said line. - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released) - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default). """ sequential: List[MemoryState] cumulative: List[MemoryState] current: List[MemoryState] total: Memory MemoryTrace = List[UsedMemoryState] The provided code snippet includes necessary dependencies for implementing the `stop_memory_tracing` function. Write a Python function `def stop_memory_tracing( memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True ) -> Optional[MemorySummary]` to solve the following problem: Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory Return: - None if `memory_trace` is None - `MemorySummary` namedtuple otherwise with the fields: - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by subtracting the memory after executing each line from the memory before executing said line. - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released) - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default). `Memory` named tuple have fields - `byte` (integer): number of bytes, - `string` (string): same as human readable string (ex: "3.5MB") `Frame` are namedtuple used to list the current frame state and have the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields: - `frame` (`Frame`): the current frame (see above) - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple Here is the function: def stop_memory_tracing( memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True ) -> Optional[MemorySummary]: """ Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory Return: - None if `memory_trace` is None - `MemorySummary` namedtuple otherwise with the fields: - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by subtracting the memory after executing each line from the memory before executing said line. - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released) - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default). `Memory` named tuple have fields - `byte` (integer): number of bytes, - `string` (string): same as human readable string (ex: "3.5MB") `Frame` are namedtuple used to list the current frame state and have the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields: - `frame` (`Frame`): the current frame (see above) - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple """ global _is_memory_tracing_enabled _is_memory_tracing_enabled = False if memory_trace is not None and len(memory_trace) > 1: memory_diff_trace = [] memory_curr_trace = [] cumulative_memory_dict = defaultdict(lambda: [0, 0, 0]) for ( (frame, cpu_mem, gpu_mem), (next_frame, next_cpu_mem, next_gpu_mem), ) in zip(memory_trace[:-1], memory_trace[1:]): cpu_mem_inc = next_cpu_mem - cpu_mem gpu_mem_inc = next_gpu_mem - gpu_mem cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc memory_diff_trace.append( MemoryState( frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc), ) ) memory_curr_trace.append( MemoryState( frame=frame, cpu=Memory(next_cpu_mem), gpu=Memory(next_gpu_mem), cpu_gpu=Memory(next_gpu_mem + next_cpu_mem), ) ) cumulative_memory_dict[frame][0] += cpu_mem_inc cumulative_memory_dict[frame][1] += gpu_mem_inc cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc cumulative_memory = sorted( list(cumulative_memory_dict.items()), key=lambda x: x[1][2], reverse=True ) # order by the total CPU + GPU memory increase cumulative_memory = list( MemoryState( frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc), ) for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory ) memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True) if ignore_released_memory: total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace) else: total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace) total_memory = Memory(total_memory) return MemorySummary( sequential=memory_diff_trace, cumulative=cumulative_memory, current=memory_curr_trace, total=total_memory, ) return None
Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory Return: - None if `memory_trace` is None - `MemorySummary` namedtuple otherwise with the fields: - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by subtracting the memory after executing each line from the memory before executing said line. - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released) - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default). `Memory` named tuple have fields - `byte` (integer): number of bytes, - `string` (string): same as human readable string (ex: "3.5MB") `Frame` are namedtuple used to list the current frame state and have the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields: - `frame` (`Frame`): the current frame (see above) - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
10,722
import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments The provided code snippet includes necessary dependencies for implementing the `bytes_to_mega_bytes` function. Write a Python function `def bytes_to_mega_bytes(memory_amount: int) -> int` to solve the following problem: Utility to convert a number of bytes (int) into a number of mega bytes (int) Here is the function: def bytes_to_mega_bytes(memory_amount: int) -> int: """Utility to convert a number of bytes (int) into a number of mega bytes (int)""" return memory_amount >> 20
Utility to convert a number of bytes (int) into a number of mega bytes (int)
10,723
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_py3nvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool): def run_func(func): @wraps(func) def run_in_eager_mode(*args, **kwargs): return func(*args, **kwargs) @wraps(func) @tf.function(experimental_compile=use_xla) def run_in_graph_mode(*args, **kwargs): return func(*args, **kwargs) if do_eager_mode is True: assert ( use_xla is False ), "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." return run_in_eager_mode else: return run_in_graph_mode return run_func
null
10,724
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_py3nvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ["tf.Tensor"]: rng = random.Random() values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)] return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32)
null
10,725
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core _init_weights = True The provided code snippet includes necessary dependencies for implementing the `no_init_weights` function. Write a Python function `def no_init_weights(_enable=True)` to solve the following problem: Context manager to globally disable weight initialization to speed up loading large models. TODO(Patrick): Delete safety argument `_enable=True` at next major version. . Here is the function: def no_init_weights(_enable=True): """ Context manager to globally disable weight initialization to speed up loading large models. TODO(Patrick): Delete safety argument `_enable=True` at next major version. . """ global _init_weights old_init_weights = _init_weights if _enable: _init_weights = False try: yield finally: _init_weights = old_init_weights
Context manager to globally disable weight initialization to speed up loading large models. TODO(Patrick): Delete safety argument `_enable=True` at next major version. .
10,726
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: class GenerationMixin: """ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`]. The class exposes [`~generation_utils.GenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation_utils.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False`. - *contrastive search* by calling [`~generation_utils.GenerationMixin.contrastive_search`] if `penalty_alpha>0` and `top_k>1` - *multinomial sampling* by calling [`~generation_utils.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True`. - *beam-search decoding* by calling [`~generation_utils.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False`. - *beam-search multinomial sampling* by calling [`~generation_utils.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True`. - *diverse beam-search decoding* by calling [`~generation_utils.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1`. - *constrained beam-search decoding* by calling [`~generation_utils.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None`. """ def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ # 1. retrieve all kwargs that are non-None or non-model input related. # some encoder-decoder models have different names for model and encoder if ( self.config.is_encoder_decoder and hasattr(self, "encoder") and self.encoder.main_input_name != self.main_input_name ): input_name = self.encoder.main_input_name else: input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} # 2. check whether model_input_name is passed as kwarg # if yes and `inputs` is None use kwarg inputs inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside " f"{input_name} which is not allowed." f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg # 3. models with `input_ids` can also make use of `inputs_embeds` if self._can_retrieve_inputs_from_name(inputs, "inputs_embeds", model_kwargs): inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" # 4. Only encoder-decoder models can have non `input_ids` input format if not self.config.is_encoder_decoder and input_name != "input_ids": raise ValueError( f"If {input_name} is passed as model-specific keyword " "input then model has to be an encoder-decoder and not a " f"{self.__class__.__name__}." ) # 5. if `inputs` is still None, try to create `input_ids` from BOS token if inputs is None: inputs = self._prepare_input_ids_for_generation(bos_token_id, model_kwargs.get("encoder_outputs")) return inputs, input_name, model_kwargs def _can_retrieve_inputs_from_name( self, inputs: Optional[torch.Tensor], name: str, model_kwargs: Dict[str, torch.Tensor] ) -> torch.Tensor: """ If `inputs` is None and `name` is in both forward function and keyword arguments, then inputs can be retrieved from name """ can_retrieve_inputs = model_kwargs.get(name, None) is not None and name in set( inspect.signature(self.forward).parameters.keys() ) if can_retrieve_inputs and inputs is not None: raise ValueError(f"Cannot only pass one of {name} and {self.main_input_name}") return can_retrieve_inputs def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. """ return logits def _prepare_input_ids_for_generation( self, bos_token_id: Optional[int], encoder_outputs: Optional[ModelOutput] ) -> torch.LongTensor: if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id def _prepare_attention_mask_for_generation( self, inputs: torch.Tensor, pad_token_id: Optional[int], eos_token_id: Optional[int], ) -> torch.LongTensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return inputs.ne(pad_token_id).long() else: return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: # 1. get encoder encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } # 3. make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.main_input_name encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, decoder_start_token_id: int = None, bos_token_id: int = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, device: torch.device = None, ) -> torch.LongTensor: if model_kwargs is not None and "decoder_input_ids" in model_kwargs: return model_kwargs.pop("decoder_input_ids") else: decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device return torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) def _expand_inputs_for_generation( input_ids: torch.LongTensor, expand_size: int = 1, is_encoder_decoder: bool = False, attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[ModelOutput] = None, **model_kwargs, ) -> Tuple[torch.LongTensor, Dict[str, Any]]: expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) if is_encoder_decoder: if encoder_outputs is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device) ) model_kwargs["encoder_outputs"] = encoder_outputs return input_ids, model_kwargs def _update_model_kwargs_for_generation( outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past if "past_key_values" in outputs: model_kwargs["past"] = outputs.past_key_values elif "mems" in outputs: model_kwargs["past"] = outputs.mems elif "past_buckets_states" in outputs: model_kwargs["past"] = outputs.past_buckets_states else: model_kwargs["past"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) return model_kwargs def _reorder_cache(self, past, beam_idx): raise NotImplementedError( f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to" f" enable beam search for {self.__class__}" ) def _get_logits_warper( self, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, temperature: Optional[float] = None, num_beams: Optional[int] = None, renormalize_logits: Optional[bool] = None, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances used for multinomial sampling. """ # init warp parameters top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p typical_p = typical_p if typical_p is not None else self.config.typical_p temperature = temperature if temperature is not None else self.config.temperature # instantiate warpers list warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if temperature is not None and temperature != 1.0: warpers.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if typical_p is not None and typical_p < 1.0: warpers.append(TypicalLogitsWarper(mass=typical_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) # `LogitNormalization` should always be the last logit processor, when present if renormalize_logits is True: warpers.append(LogitNormalization()) return warpers def _get_logits_processor( self, repetition_penalty: float, no_repeat_ngram_size: int, encoder_no_repeat_ngram_size: int, input_ids_seq_length: int, encoder_input_ids: torch.LongTensor, bad_words_ids: List[List[int]], min_length: int, max_length: int, eos_token_id: int, forced_bos_token_id: int, forced_eos_token_id: int, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int, num_beam_groups: int, diversity_penalty: float, remove_invalid_values: bool, exponential_decay_length_penalty: Tuple, logits_processor: Optional[LogitsProcessorList], renormalize_logits: Optional[bool], suppress_tokens: Optional[List[int]] = None, begin_suppress_tokens: Optional[List[int]] = None, forced_decoder_ids: Optional[List[List[int]]] = None, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] instances used to modify the scores of the language model head. """ processors = LogitsProcessorList() # init warp parameters repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) encoder_no_repeat_ngram_size = ( encoder_no_repeat_ngram_size if encoder_no_repeat_ngram_size is not None else self.config.encoder_no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty forced_bos_token_id = ( forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id ) forced_eos_token_id = ( forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id ) remove_invalid_values = ( remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values ) exponential_decay_length_penalty = ( exponential_decay_length_penalty if exponential_decay_length_penalty is not None else self.config.exponential_decay_length_penalty ) suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens begin_suppress_tokens = ( begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens ) if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"): forced_decoder_ids = self.config.forced_decoder_ids # instantiate processors list # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if diversity_penalty is not None and diversity_penalty > 0.0: processors.append( HammingDiversityLogitsProcessor( diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups ) ) if repetition_penalty is not None and repetition_penalty != 1.0: processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0: processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size)) if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0: if self.config.is_encoder_decoder: processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids)) else: raise ValueError( "It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture" ) if bad_words_ids is not None: processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id)) if min_length is not None and eos_token_id is not None and min_length > 0: processors.append(MinLengthLogitsProcessor(min_length, eos_token_id)) if prefix_allowed_tokens_fn is not None: processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams // num_beam_groups)) if forced_bos_token_id is not None: processors.append(ForcedBOSTokenLogitsProcessor(forced_bos_token_id)) if forced_eos_token_id is not None: processors.append(ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)) if remove_invalid_values is True: processors.append(InfNanRemoveLogitsProcessor()) if exponential_decay_length_penalty is not None: processors.append( ExponentialDecayLengthPenalty(exponential_decay_length_penalty, eos_token_id, input_ids_seq_length) ) if suppress_tokens is not None: processors.append(SuppressTokensLogitsProcessor(suppress_tokens)) if begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = begin_index if (input_ids_seq_length > 1 or forced_bos_token_id is None) else begin_index + 1 if forced_decoder_ids is not None: begin_index += forced_decoder_ids[-1][0] # generation starts after the last token that is forced processors.append(SuppressTokensAtBeginLogitsProcessor(begin_suppress_tokens, begin_index)) if forced_decoder_ids is not None: processors.append(ForceTokensLogitsProcessor(forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) # `LogitNormalization` should always be the last logit processor, when present if renormalize_logits is True: processors.append(LogitNormalization()) return processors def _get_stopping_criteria( self, max_length: Optional[int], max_time: Optional[float], stopping_criteria: Optional[StoppingCriteriaList] ) -> StoppingCriteriaList: criteria = StoppingCriteriaList() if max_length is not None: criteria.append(MaxLengthCriteria(max_length=max_length)) if max_time is not None: criteria.append(MaxTimeCriteria(max_time=max_time)) criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) return criteria def _merge_criteria_processor_list( self, default_list: Union[LogitsProcessorList, StoppingCriteriaList], custom_list: Union[LogitsProcessorList, StoppingCriteriaList], ) -> Union[LogitsProcessorList, StoppingCriteriaList]: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def compute_transition_beam_scores( self, sequences: torch.Tensor, scores: Tuple[torch.Tensor], beam_indices: torch.Tensor, eos_token_id: int = None, ): """compute the transition probabilities of sequences given generation scores and beam indices""" # 1. reshape scores as [vocab_size * batch_size, # generation steps] # with batch_size being 2 * vocab_size and # generation steps being # seq_len - input_length scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1) # 2. cut beam_indices to longest beam length beam_indices_mask = beam_indices < 0 max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max() beam_indices = beam_indices[:, :max_beam_length] beam_indices_mask = beam_indices_mask[:, :max_beam_length] # 3. Set indices of beams that finished early to 0 # such indices will be masked correctly afterwards beam_indices[beam_indices_mask] = 0 # 4. multiply beam_indices with vocab size to gather correctly from scores beam_sequence_indices = beam_indices * self.config.vocab_size # 5. Define which indices contributed to scores cut_idx = sequences.shape[-1] - max_beam_length indices = sequences[:, cut_idx:] + beam_sequence_indices # 6. Compute scores transition_scores = scores.gather(0, indices) # 7. Mask out transition_scores of beams that stopped early transition_scores[beam_indices_mask] = 0 return transition_scores def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not hasattr(self, "prepare_inputs_for_generation"): generate_compatible_mappings = [ MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function if self.config.is_encoder_decoder: for key in ["decoder_input_ids"]: model_kwargs.pop(key, None) unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs` if often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept `kwargs`, then a stricter check can be made ;) if "kwargs" in model_args: model_args |= set(inspect.signature(self.forward).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) def generate( self, inputs: Optional[torch.Tensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, penalty_alpha: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, force_words_ids: Optional[Union[Iterable[int], Iterable[Iterable[int]]]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, encoder_no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, max_time: Optional[float] = None, max_new_tokens: Optional[int] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, num_beam_groups: Optional[int] = None, diversity_penalty: Optional[float] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, logits_processor: Optional[LogitsProcessorList] = None, renormalize_logits: Optional[bool] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, constraints: Optional[List[Constraint]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, forced_bos_token_id: Optional[int] = None, forced_eos_token_id: Optional[int] = None, remove_invalid_values: Optional[bool] = None, synced_gpus: Optional[bool] = False, exponential_decay_length_penalty: Optional[Tuple[int, float]] = None, suppress_tokens: Optional[List[int]] = None, begin_suppress_tokens: Optional[List[int]] = None, forced_decoder_ids: Optional[List[List[int]]] = None, **model_kwargs, ) -> Union[GenerateOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models: - *greedy decoding* by calling [`~generation_utils.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False`. - *contrastive search* by calling [`~generation_utils.GenerationMixin.contrastive_search`] if `penalty_alpha>0.` and `top_k>1` - *multinomial sampling* by calling [`~generation_utils.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True`. - *beam-search decoding* by calling [`~generation_utils.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False`. - *beam-search multinomial sampling* by calling [`~generation_utils.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True`. - *diverse beam-search decoding* by calling [`~generation_utils.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1`. - *constrained beam-search decoding* by calling [`~generation_utils.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None`. <Tip warning={true}> Apart from `inputs`, all the arguments below will default to the value of the attribute of the same name as defined in the model's config (`config.json`) which in turn defaults to the [`~modeling_utils.PretrainedConfig`] of the model. </Tip> Most of these parameters are explained in more detail in [this blog post](https://huggingface.co/blog/how-to-generate). Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. max_length (`int`, *optional*, defaults to `model.config.max_length`): The maximum length the generated tokens can have. Corresponds to the length of the input prompt + `max_new_tokens`. In general, prefer the use of `max_new_tokens`, which ignores the number of tokens in the prompt. max_new_tokens (`int`, *optional*): The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. min_length (`int`, *optional*, defaults to `model.config.min_length` or 10 if the config does not set any value): The minimum length of the sequence to be generated. do_sample (`bool`, *optional*, defaults to `model.config.do_sample` or `False` if the config does not set any value): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beams (`int`, *optional*, defaults to `model.config.num_beams` or 1 if the config does not set any value): Number of beams for beam search. 1 means no beam search. temperature (`float`, *optional*, defaults to `model.config.temperature` or 1.0 if the config does not set any value): The value used to module the next token probabilities. penalty_alpha (`float`, *optional*, defaults to `model.config.penalty_alpha` or None if the config does not set any value): The values balance the model confidence and the degeneration penalty in contrastive search decoding. top_k (`int`, *optional*, defaults to `model.config.top_k` or 50 if the config does not set any value): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`, *optional*, defaults to `model.config.top_p` or 1.0 if the config does not set any value): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. typical_p (`float`, *optional*, defaults to `model.config.typical_p` or 1.0 if the config does not set any value): The amount of probability mass from the original distribution to be considered in typical decoding. If set to 1.0 it takes no effect. See [this paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. repetition_penalty (`float`, *optional*, defaults to `model.config.repetition_penalty` or 1.0 if the config does not set any value): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. pad_token_id (`int`, *optional*, defaults to `model.config.pad_token_id`): The id of the *padding* token. bos_token_id (`int`, *optional*, defaults to `model.config.bos_token_id`): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*, defaults to `model.config.eos_token_id`): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to `model.config.length_penalty` or 1.0 if the config does not set any value): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.no_repeat_ngram_size` or 0 if the config does not set any value): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.encoder_no_repeat_ngram_size` or 0 if the config does not set any value): If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. bad_words_ids(`List[List[int]]`, *optional*, defaults to `model.config.bad_words_ids`): List of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use `tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids`. force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*): List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one can allow different forms of each word. num_return_sequences(`int`, *optional*, defaults to `model.config.num_return_sequences` or 1 if the config does not set any value): The number of independently computed returned sequences for each element in the batch. max_time(`float`, *optional*): The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values are in `[0, 1]`, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as `input_ids` that masks the pad token. [What are attention masks?](../glossary#attention-mask) decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. num_beam_groups (`int`, *optional*, defaults to `model.config.num_beam_groups` or 1 if the config does not set any value): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. diversity_penalty (`float`, *optional*, defaults to `model.config.diversity_penalty` or 0.0 if the config does not set any value): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. renormalize_logits (`bool`, *optional*, defaults to `False`): Whether to renormalize the logits after applying all the logits processors or warpers (including the custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits are normalized but some logit processors or warpers break the normalization. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. constraints (`List[Constraint]`, *optional*): Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by `Constraint` objects, in the most sensible way possible. output_attentions (`bool`, *optional*, defaults to `model.config.output_attentions` or `False` if the config does not set any value): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `model.config.output_hidden_states` or `False` if the config does not set any value): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `model.config.output_scores` or `False` if the config does not set any value): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `model.config.return_dict_in_generate` or `False` if the config does not set any value): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`): The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target language token. forced_eos_token_id (`int`, *optional*, defaults to `model.config.forced_eos_token_id`): The id of the token to force as the last generated token when `max_length` is reached. remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`): Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) exponential_decay_length_penalty (`tuple(int, float)`, *optional*, defaults to `model.config.exponential_decay_length_penalty`): This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty starts and `decay_factor` represents the factor of exponential decay suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`): A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set their log probs to `-inf` so that they are not sampled. begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`): A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens` logit processor will set their log probs to `-inf` so that they are not sampled. forced_decoder_ids (`List[List[int]]`, *optional*, defaults to `model.config.forced_decoder_ids`): A list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token of index 123. model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchDecoderOnlyOutput`], - [`~generation_utils.SampleDecoderOnlyOutput`], - [`~generation_utils.BeamSearchDecoderOnlyOutput`], - [`~generation_utils.BeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchEncoderDecoderOutput`], - [`~generation_utils.SampleEncoderDecoderOutput`], - [`~generation_utils.BeamSearchEncoderDecoderOutput`], - [`~generation_utils.BeamSampleEncoderDecoderOutput`] Examples: Greedy Decoding: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> prompt = "Today I believe we can finally" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> # generate up to 30 tokens >>> outputs = model.generate(input_ids, do_sample=False, max_length=30) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today I believe we can finally get to the point where we can make a difference in the lives of the people of the United States of America.\n'] ``` Multinomial Sampling: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> prompt = "Today I believe we can finally" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> # sample up to 30 tokens >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> outputs = model.generate(input_ids, do_sample=True, max_length=30) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today I believe we can finally get rid of discrimination," said Rep. Mark Pocan (D-Wis.).\n\n"Just look at the'] ``` Beam-search decoding: ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> sentence = "Paris is one of the densest populated areas in Europe." >>> input_ids = tokenizer(sentence, return_tensors="pt").input_ids >>> outputs = model.generate(input_ids, num_beams=5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Paris ist eines der dichtesten besiedelten Gebiete Europas.'] ```""" # 0. Validate the `.generate()` call self._validate_model_class() self._validate_model_kwargs(model_kwargs.copy()) # 1. Set generation parameters if not already defined bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id num_beams = num_beams if num_beams is not None else self.config.num_beams length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups do_sample = do_sample if do_sample is not None else self.config.do_sample num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id if eos_token_id is None and hasattr(self.config, "decoder"): eos_token_id = self.config.decoder.eos_token_id if pad_token_id is None and eos_token_id is not None: if model_kwargs.get("attention_mask", None) is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # 2. Define model inputs # inputs_tensor has to be defined # model_input_name is defined if model-specific keyword input is passed # otherwise model_input_name is None # all model-specific keyword inputs are removed from `model_kwargs` inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, bos_token_id, model_kwargs) batch_size = inputs_tensor.shape[0] # 3. Define other model kwargs model_kwargs["output_attentions"] = output_attentions model_kwargs["output_hidden_states"] = output_hidden_states model_kwargs["use_cache"] = use_cache accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys()) requires_attention_mask = "encoder_outputs" not in model_kwargs if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, pad_token_id, eos_token_id ) # decoder-only models should use left-padding for generation if not self.config.is_encoder_decoder: if pad_token_id is not None and torch.sum(inputs_tensor[:, -1] == pad_token_id) > 0: logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: # if model is encoder decoder encoder_outputs are created # and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 4. Prepare `input_ids` which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids = self._prepare_decoder_input_ids_for_generation( batch_size, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, model_kwargs=model_kwargs, device=inputs_tensor.device, ) else: # if decoder-only then inputs_tensor has to be `input_ids` input_ids = inputs_tensor # 5. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] if max_length is None and max_new_tokens is None: warnings.warn( "Neither `max_length` nor `max_new_tokens` has been set, `max_length` will default to " f"{self.config.max_length} (`self.config.max_length`). Controlling `max_length` via the config is " "deprecated and `max_length` will be removed from the config in v5 of Transformers -- we recommend " "using `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif max_length is None and max_new_tokens is not None: max_length = max_new_tokens + input_ids_seq_length elif max_length is not None and max_new_tokens is not None: raise ValueError( "Both `max_new_tokens` and `max_length` have been set but they serve the same purpose -- setting a" " limit to the generated output length. Remove one of those arguments. Please refer to the" " documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) # default to config if still None max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length if min_length is not None and min_length > max_length: raise ValueError( f"Unfeasible length constraints: the minimum length ({min_length}) is larger than the maximum " f"length ({max_length})" ) if input_ids_seq_length >= max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {max_length}. This can lead to unexpected behavior. You should consider increasing " "`max_new_tokens`." ) # 6. determine generation mode is_constraint_gen_mode = constraints is not None or force_words_ids is not None is_contrastive_search_gen_mode = ( top_k is not None and top_k > 1 and do_sample is False and penalty_alpha is not None and penalty_alpha > 0 ) is_greedy_gen_mode = ( (num_beams == 1) and (num_beam_groups == 1) and do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_sample_gen_mode = ( (num_beams == 1) and (num_beam_groups == 1) and do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_gen_mode = ( (num_beams > 1) and (num_beam_groups == 1) and do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_sample_gen_mode = ( (num_beams > 1) and (num_beam_groups == 1) and do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_group_beam_gen_mode = ( (num_beams > 1) and (num_beam_groups > 1) and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) if num_beam_groups > num_beams: raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`") if is_group_beam_gen_mode and do_sample is True: raise ValueError( "Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`." ) if self.device.type != input_ids.device.type: warnings.warn( "You are calling .generate() with the `input_ids` being on a device type different" f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model" f" is on {self.device.type}. You may experience unexpected behaviors or slower generation." " Please make sure that you have put `input_ids` to the" f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before" " running `.generate()`.", UserWarning, ) # 7. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=inputs_tensor, bad_words_ids=bad_words_ids, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, num_beams=num_beams, num_beam_groups=num_beam_groups, diversity_penalty=diversity_penalty, remove_invalid_values=remove_invalid_values, exponential_decay_length_penalty=exponential_decay_length_penalty, logits_processor=logits_processor, renormalize_logits=renormalize_logits, suppress_tokens=suppress_tokens, begin_suppress_tokens=begin_suppress_tokens, forced_decoder_ids=forced_decoder_ids, ) # 8. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria ) # 9. go into different generation modes if is_greedy_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search." ) # 10. run greedy search return self.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_contrastive_search_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing contrastive search." ) return self.contrastive_search( input_ids, top_k=top_k, penalty_alpha=penalty_alpha, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, typical_p=typical_p, temperature=temperature, num_beams=num_beams, renormalize_logits=renormalize_logits, ) # 11. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 12. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, typical_p=typical_p, temperature=temperature, num_beams=num_beams, renormalize_logits=renormalize_logits, ) if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 11. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size * num_return_sequences, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams * num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam sample return self.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_group_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if num_beams % num_beam_groups != 0: raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") if typical_p is not None: raise ValueError("Decoder argument `typical_p` is not supported with beam groups.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, max_length=stopping_criteria.max_length, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_constraint_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") if num_beams <= 1: raise ValueError("`num_beams` needs to be greater than 1 for constrained generation.") if do_sample: raise ValueError("`do_sample` needs to be false for constrained generation.") if num_beam_groups is not None and num_beam_groups > 1: raise ValueError("`num_beam_groups` not supported yet for constrained generation.") final_constraints = [] if constraints is not None: final_constraints = constraints if force_words_ids is not None: def typeerror(): raise ValueError( "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`" f"of positive integers, but is {force_words_ids}." ) if not isinstance(force_words_ids, list) or len(force_words_ids) == 0: typeerror() for word_ids in force_words_ids: if isinstance(word_ids[0], list): if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any(not isinstance(token_ids, list) for token_ids in word_ids): typeerror() if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in word_ids ): typeerror() constraint = DisjunctiveConstraint(word_ids) else: if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids): typeerror() constraint = PhrasalConstraint(word_ids) final_constraints.append(constraint) # 10. prepare beam search scorer constrained_beam_scorer = ConstrainedBeamSearchScorer( constraints=final_constraints, batch_size=batch_size, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.constrained_beam_search( input_ids, constrained_beam_scorer=constrained_beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) def contrastive_search( self, input_ids: torch.LongTensor, top_k: Optional[int] = 1, penalty_alpha: Optional[float] = 0, logits_processor: Optional[LogitsProcessorList] = None, logits_warper: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[ContrastiveSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **contrastive search** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. top_k (`int`, *optional*, defaults to 1): The size of the candidate set that is used to re-rank for contrastive search penalty_alpha (`float`, *optional*, defaults to 0): The degeneration penalty for contrastive search; activate when it is larger than 0 logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.ContrastiveSearchDecoderOnlyOutput`], [`~generation_utils.ContrastiveSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.ContrastiveSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.ContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "DeepMind Company is" >>> input_ids = tokenizer(input_prompt, return_tensors="pt") >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)]) >>> outputs = model.contrastive_search( ... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare inputs model_kwargs["use_cache"] = True model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # if the first step in the loop, encode all the prefix and obtain three parameters: (1) past_key_values; # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step if model_kwargs.get("past") is None: # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save # the `encoder_outputs` outputs = self( **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) # past_key_values is required for fast decoding if "past_key_values" not in outputs: raise ValueError( f"{self.__class__.__name__} cannot return `past_key_values` and can therefore **not** be used " "for contrastive search." ) past_key_values = outputs.past_key_values # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with # previous tokens) if self.config.is_encoder_decoder: last_hidden_states = outputs.decoder_hidden_states[-1] else: last_hidden_states = outputs.hidden_states[-1] # next logit for contrastive search to select top-k candidate tokens logit_for_next_step = outputs.logits[:, -1, :] model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # contrastive_search main logic start: # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by # degeneration penalty bsz, seqlen, embed_dim = last_hidden_states.size() # logits processor logit_for_next_step = logits_processor(input_ids, logit_for_next_step) logit_for_next_step = logits_warper(input_ids, logit_for_next_step) next_probs = nn.functional.softmax(logit_for_next_step, dim=-1) _, top_k_ids = torch.topk(logit_for_next_step, dim=-1, k=top_k) top_k_probs = torch.gather(next_probs, dim=1, index=top_k_ids) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logit_for_next_step,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # enlarge the past_key_values new_key_values = [] for layer in past_key_values: items = [] # item is either the key or the value matrix for item in layer: bsz, num_head, seq_len, esz = item.size() item = ( item.unsqueeze(1) .expand(-1, top_k, -1, -1, -1) .reshape(bsz * top_k, num_head, seq_len, esz) .contiguous() ) # [bsz*beam, num_head, seq_len, esz] items.append(item) new_key_values.append(items) past_key_values = new_key_values # build next attention mask if "attention_mask" in model_inputs: attention_mask = model_kwargs["attention_mask"] # [B, S] attention_mask = attention_mask.unsqueeze(1).expand(-1, top_k, -1).reshape(-1, attention_mask.size(-1)) else: attention_mask = None # encoder-decoder model also contains the `encoder_outputs` if self.config.is_encoder_decoder and "encoder_outputs" in model_inputs: encoder_outputs = model_inputs["encoder_outputs"] else: encoder_outputs = None next_model_inputs = self.prepare_inputs_for_generation( top_k_ids.view(-1, 1), past=past_key_values, attention_mask=attention_mask, use_cache=True, encoder_outputs=encoder_outputs, ) # compute the candidate tokens by the language model and collects their hidden_states outputs = self( **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) past_key_values = outputs.past_key_values logits = outputs.logits[:, -1, :] # name is different for encoder-decoder and decoder-only models if self.config.is_encoder_decoder: next_hidden = outputs.decoder_hidden_states[-1] full_hidden_states = outputs.decoder_hidden_states else: next_hidden = outputs.hidden_states[-1] full_hidden_states = outputs.hidden_states context_hidden = ( last_hidden_states.unsqueeze(1).expand(-1, top_k, -1, -1).reshape(bsz * top_k, seqlen, embed_dim) ) # compute the degeneratin penalty and re-rank the candidates based on the degeneration penalty and the # model confidence selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores # (model confidence minus degeneration penalty); (6) decoder hidden_states next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx] next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k)) next_hidden = next_hidden[range(bsz), selected_idx, :] last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1) decoder_hidden_states = [] for layer in full_hidden_states: layer = torch.stack(torch.split(layer.squeeze(dim=1), top_k)) layer = layer[range(bsz), selected_idx, :] decoder_hidden_states.append(layer) # select the past_key_value new_key_values = [] for layer in past_key_values: items = [] # item is either the key or the value matrix for item in layer: bsz_and_beam, num_head, seq_len, esz = item.size() bsz = int(bsz_and_beam // top_k) item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz] item = item[range(bsz), selected_idx, :, :, :] # [B, num_head, seq_len, esz] items.append(item) new_key_values.append(items) past_key_values = new_key_values logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(bsz), selected_idx, :] # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration if self.config.is_encoder_decoder: outputs = Seq2SeqLMOutput( past_key_values=past_key_values, decoder_hidden_states=decoder_hidden_states, ) else: outputs = CausalLMOutputWithPast( past_key_values=past_key_values, hidden_states=decoder_hidden_states, attentions=model_kwargs["attention_mask"], ) # contrastive_search main logic end if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return ContrastiveSearchEncoderDecoderOutput( sequences=input_ids, scores=scores, decoder_hidden_states=decoder_hidden_states, ) else: return ContrastiveSearchDecoderOnlyOutput( sequences=input_ids, scores=scores, hidden_states=decoder_hidden_states, ) else: return input_ids def greedy_search( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[GreedySearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.GreedySearchDecoderOnlyOutput`], [`~generation_utils.GreedySearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.GreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "It might be possible to" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> outputs = model.greedy_search( ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["It might be possible to get a better understanding of the nature of the problem, but it's not"] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only sync_func = torch.cuda.synchronize while True: timers("generate-forward").start(sync_func) if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_tokens_scores = logits_processor(input_ids, next_token_logits) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_tokens_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # argmax next_tokens = torch.argmax(next_tokens_scores, dim=-1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: timers("generate-forward").stop(sync_func) break else: this_peer_finished = True timers("generate-forward").stop(sync_func) if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def sample( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[SampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.SampleDecoderOnlyOutput`], [`~generation_utils.SampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.SampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> outputs = model.sample( ... input_ids, ... logits_processor=logits_processor, ... logits_warper=logits_warper, ... stopping_criteria=stopping_criteria, ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today is a beautiful day, and a wonderful day.\n\nI was lucky enough to meet the'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only # auto-regressive generation while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_token_scores = logits_processor(input_ids, next_token_logits) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # sample probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return SampleEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return SampleDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores_processed,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def beam_sample( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[BeamSampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSampleDecoderOnlyOutput`], [`~generation_utils.BeamSampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> outputs = model.beam_sample( ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logits_warper(input_ids, next_token_scores_processed),) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) next_token_scores = torch.gather(next_token_scores, -1, next_tokens) next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, _indices) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSampleEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSampleDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def group_beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ): r""" Generates sequences of token ids for models with a language modeling head using **diverse beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs that will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... HammingDiversityLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run diverse beam search using 6 beams >>> num_beams = 6 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... num_beam_groups=3, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.group_beam_search( ... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams num_beam_groups = beam_scorer.num_beam_groups num_sub_beams = num_beams // num_beam_groups device = input_ids.device batch_beam_size, cur_len = input_ids.shape if return_dict_in_generate and output_scores: beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)] else: beam_indices = None if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in # the same group don't produce same tokens everytime. beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # predicted tokens in cur_len step current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) # indices which will form the beams in the next time step reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) # do one decoder step on all beams of all sentences in batch model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need if output_scores: processed_score = torch.zeros_like(outputs.logits[:, -1, :]) for beam_group_idx in range(num_beam_groups): group_start_idx = beam_group_idx * num_sub_beams group_end_idx = min(group_start_idx + num_sub_beams, num_beams) group_size = group_end_idx - group_start_idx # indices of beams of current group among all sentences in batch batch_group_indices = [] for batch_idx in range(batch_size): batch_group_indices.extend( [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] ) group_input_ids = input_ids[batch_group_indices] # select outputs of beams of current group only next_token_logits = outputs.logits[batch_group_indices, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * group_size, vocab_size) vocab_size = next_token_scores.shape[-1] next_token_scores_processed = logits_processor( group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx ) next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1) next_token_scores = next_token_scores.expand_as(next_token_scores_processed) if output_scores: processed_score[batch_group_indices] = next_token_scores_processed # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None beam_outputs = beam_scorer.process( group_input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=process_beam_indices, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] if return_dict_in_generate and output_scores: beam_indices[beam_group_idx] = tuple( beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0])) ) input_ids[batch_group_indices] = group_input_ids[beam_idx] group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) current_tokens[batch_group_indices] = group_input_ids[:, -1] # (beam_idx // group_size) -> batch_idx # (beam_idx % group_size) -> offset of idx inside the group reordering_indices[batch_group_indices] = ( num_beams * torch_int_div(beam_idx, group_size) + group_start_idx + (beam_idx % group_size) ) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (processed_score,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=final_beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def constrained_beam_search( self, input_ids: torch.LongTensor, constrained_beam_scorer: ConstrainedBeamSearchScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **constrained beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. constrained_beam_scorer (`ConstrainedBeamSearchScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of [`ConstrainedBeamSearchScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... ConstrainedBeamSearchScorer, ... PhrasalConstraint, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> constraint_str = "Sie" >>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token >>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] >>> # instantiate beam scorer >>> beam_scorer = ConstrainedBeamSearchScorer( ... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.constrained_beam_search( ... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt sind Sie?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(constrained_beam_scorer._beam_hyps) num_beams = constrained_beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) scores_for_all_vocab = next_token_scores.clone() # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = (next_tokens / vocab_size).long() next_tokens = next_tokens % vocab_size # stateless beam_outputs = constrained_beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, scores_for_all_vocab, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) # increase cur_len cur_len = cur_len + 1 if constrained_beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = constrained_beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): try: return next(parameter.parameters()).device except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device
null
10,727
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: class GenerationMixin: """ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`]. The class exposes [`~generation_utils.GenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation_utils.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False`. - *contrastive search* by calling [`~generation_utils.GenerationMixin.contrastive_search`] if `penalty_alpha>0` and `top_k>1` - *multinomial sampling* by calling [`~generation_utils.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True`. - *beam-search decoding* by calling [`~generation_utils.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False`. - *beam-search multinomial sampling* by calling [`~generation_utils.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True`. - *diverse beam-search decoding* by calling [`~generation_utils.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1`. - *constrained beam-search decoding* by calling [`~generation_utils.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None`. """ def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ # 1. retrieve all kwargs that are non-None or non-model input related. # some encoder-decoder models have different names for model and encoder if ( self.config.is_encoder_decoder and hasattr(self, "encoder") and self.encoder.main_input_name != self.main_input_name ): input_name = self.encoder.main_input_name else: input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} # 2. check whether model_input_name is passed as kwarg # if yes and `inputs` is None use kwarg inputs inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside " f"{input_name} which is not allowed." f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg # 3. models with `input_ids` can also make use of `inputs_embeds` if self._can_retrieve_inputs_from_name(inputs, "inputs_embeds", model_kwargs): inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" # 4. Only encoder-decoder models can have non `input_ids` input format if not self.config.is_encoder_decoder and input_name != "input_ids": raise ValueError( f"If {input_name} is passed as model-specific keyword " "input then model has to be an encoder-decoder and not a " f"{self.__class__.__name__}." ) # 5. if `inputs` is still None, try to create `input_ids` from BOS token if inputs is None: inputs = self._prepare_input_ids_for_generation(bos_token_id, model_kwargs.get("encoder_outputs")) return inputs, input_name, model_kwargs def _can_retrieve_inputs_from_name( self, inputs: Optional[torch.Tensor], name: str, model_kwargs: Dict[str, torch.Tensor] ) -> torch.Tensor: """ If `inputs` is None and `name` is in both forward function and keyword arguments, then inputs can be retrieved from name """ can_retrieve_inputs = model_kwargs.get(name, None) is not None and name in set( inspect.signature(self.forward).parameters.keys() ) if can_retrieve_inputs and inputs is not None: raise ValueError(f"Cannot only pass one of {name} and {self.main_input_name}") return can_retrieve_inputs def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. """ return logits def _prepare_input_ids_for_generation( self, bos_token_id: Optional[int], encoder_outputs: Optional[ModelOutput] ) -> torch.LongTensor: if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id def _prepare_attention_mask_for_generation( self, inputs: torch.Tensor, pad_token_id: Optional[int], eos_token_id: Optional[int], ) -> torch.LongTensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return inputs.ne(pad_token_id).long() else: return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: # 1. get encoder encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } # 3. make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.main_input_name encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, decoder_start_token_id: int = None, bos_token_id: int = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, device: torch.device = None, ) -> torch.LongTensor: if model_kwargs is not None and "decoder_input_ids" in model_kwargs: return model_kwargs.pop("decoder_input_ids") else: decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device return torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) def _expand_inputs_for_generation( input_ids: torch.LongTensor, expand_size: int = 1, is_encoder_decoder: bool = False, attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[ModelOutput] = None, **model_kwargs, ) -> Tuple[torch.LongTensor, Dict[str, Any]]: expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) if is_encoder_decoder: if encoder_outputs is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device) ) model_kwargs["encoder_outputs"] = encoder_outputs return input_ids, model_kwargs def _update_model_kwargs_for_generation( outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past if "past_key_values" in outputs: model_kwargs["past"] = outputs.past_key_values elif "mems" in outputs: model_kwargs["past"] = outputs.mems elif "past_buckets_states" in outputs: model_kwargs["past"] = outputs.past_buckets_states else: model_kwargs["past"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) return model_kwargs def _reorder_cache(self, past, beam_idx): raise NotImplementedError( f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to" f" enable beam search for {self.__class__}" ) def _get_logits_warper( self, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, temperature: Optional[float] = None, num_beams: Optional[int] = None, renormalize_logits: Optional[bool] = None, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances used for multinomial sampling. """ # init warp parameters top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p typical_p = typical_p if typical_p is not None else self.config.typical_p temperature = temperature if temperature is not None else self.config.temperature # instantiate warpers list warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if temperature is not None and temperature != 1.0: warpers.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if typical_p is not None and typical_p < 1.0: warpers.append(TypicalLogitsWarper(mass=typical_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) # `LogitNormalization` should always be the last logit processor, when present if renormalize_logits is True: warpers.append(LogitNormalization()) return warpers def _get_logits_processor( self, repetition_penalty: float, no_repeat_ngram_size: int, encoder_no_repeat_ngram_size: int, input_ids_seq_length: int, encoder_input_ids: torch.LongTensor, bad_words_ids: List[List[int]], min_length: int, max_length: int, eos_token_id: int, forced_bos_token_id: int, forced_eos_token_id: int, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int, num_beam_groups: int, diversity_penalty: float, remove_invalid_values: bool, exponential_decay_length_penalty: Tuple, logits_processor: Optional[LogitsProcessorList], renormalize_logits: Optional[bool], suppress_tokens: Optional[List[int]] = None, begin_suppress_tokens: Optional[List[int]] = None, forced_decoder_ids: Optional[List[List[int]]] = None, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] instances used to modify the scores of the language model head. """ processors = LogitsProcessorList() # init warp parameters repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) encoder_no_repeat_ngram_size = ( encoder_no_repeat_ngram_size if encoder_no_repeat_ngram_size is not None else self.config.encoder_no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty forced_bos_token_id = ( forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id ) forced_eos_token_id = ( forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id ) remove_invalid_values = ( remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values ) exponential_decay_length_penalty = ( exponential_decay_length_penalty if exponential_decay_length_penalty is not None else self.config.exponential_decay_length_penalty ) suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens begin_suppress_tokens = ( begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens ) if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"): forced_decoder_ids = self.config.forced_decoder_ids # instantiate processors list # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if diversity_penalty is not None and diversity_penalty > 0.0: processors.append( HammingDiversityLogitsProcessor( diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups ) ) if repetition_penalty is not None and repetition_penalty != 1.0: processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0: processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size)) if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0: if self.config.is_encoder_decoder: processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids)) else: raise ValueError( "It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture" ) if bad_words_ids is not None: processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id)) if min_length is not None and eos_token_id is not None and min_length > 0: processors.append(MinLengthLogitsProcessor(min_length, eos_token_id)) if prefix_allowed_tokens_fn is not None: processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams // num_beam_groups)) if forced_bos_token_id is not None: processors.append(ForcedBOSTokenLogitsProcessor(forced_bos_token_id)) if forced_eos_token_id is not None: processors.append(ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)) if remove_invalid_values is True: processors.append(InfNanRemoveLogitsProcessor()) if exponential_decay_length_penalty is not None: processors.append( ExponentialDecayLengthPenalty(exponential_decay_length_penalty, eos_token_id, input_ids_seq_length) ) if suppress_tokens is not None: processors.append(SuppressTokensLogitsProcessor(suppress_tokens)) if begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = begin_index if (input_ids_seq_length > 1 or forced_bos_token_id is None) else begin_index + 1 if forced_decoder_ids is not None: begin_index += forced_decoder_ids[-1][0] # generation starts after the last token that is forced processors.append(SuppressTokensAtBeginLogitsProcessor(begin_suppress_tokens, begin_index)) if forced_decoder_ids is not None: processors.append(ForceTokensLogitsProcessor(forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) # `LogitNormalization` should always be the last logit processor, when present if renormalize_logits is True: processors.append(LogitNormalization()) return processors def _get_stopping_criteria( self, max_length: Optional[int], max_time: Optional[float], stopping_criteria: Optional[StoppingCriteriaList] ) -> StoppingCriteriaList: criteria = StoppingCriteriaList() if max_length is not None: criteria.append(MaxLengthCriteria(max_length=max_length)) if max_time is not None: criteria.append(MaxTimeCriteria(max_time=max_time)) criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) return criteria def _merge_criteria_processor_list( self, default_list: Union[LogitsProcessorList, StoppingCriteriaList], custom_list: Union[LogitsProcessorList, StoppingCriteriaList], ) -> Union[LogitsProcessorList, StoppingCriteriaList]: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def compute_transition_beam_scores( self, sequences: torch.Tensor, scores: Tuple[torch.Tensor], beam_indices: torch.Tensor, eos_token_id: int = None, ): """compute the transition probabilities of sequences given generation scores and beam indices""" # 1. reshape scores as [vocab_size * batch_size, # generation steps] # with batch_size being 2 * vocab_size and # generation steps being # seq_len - input_length scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1) # 2. cut beam_indices to longest beam length beam_indices_mask = beam_indices < 0 max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max() beam_indices = beam_indices[:, :max_beam_length] beam_indices_mask = beam_indices_mask[:, :max_beam_length] # 3. Set indices of beams that finished early to 0 # such indices will be masked correctly afterwards beam_indices[beam_indices_mask] = 0 # 4. multiply beam_indices with vocab size to gather correctly from scores beam_sequence_indices = beam_indices * self.config.vocab_size # 5. Define which indices contributed to scores cut_idx = sequences.shape[-1] - max_beam_length indices = sequences[:, cut_idx:] + beam_sequence_indices # 6. Compute scores transition_scores = scores.gather(0, indices) # 7. Mask out transition_scores of beams that stopped early transition_scores[beam_indices_mask] = 0 return transition_scores def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not hasattr(self, "prepare_inputs_for_generation"): generate_compatible_mappings = [ MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function if self.config.is_encoder_decoder: for key in ["decoder_input_ids"]: model_kwargs.pop(key, None) unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs` if often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept `kwargs`, then a stricter check can be made ;) if "kwargs" in model_args: model_args |= set(inspect.signature(self.forward).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) def generate( self, inputs: Optional[torch.Tensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, penalty_alpha: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, force_words_ids: Optional[Union[Iterable[int], Iterable[Iterable[int]]]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, encoder_no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, max_time: Optional[float] = None, max_new_tokens: Optional[int] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, num_beam_groups: Optional[int] = None, diversity_penalty: Optional[float] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, logits_processor: Optional[LogitsProcessorList] = None, renormalize_logits: Optional[bool] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, constraints: Optional[List[Constraint]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, forced_bos_token_id: Optional[int] = None, forced_eos_token_id: Optional[int] = None, remove_invalid_values: Optional[bool] = None, synced_gpus: Optional[bool] = False, exponential_decay_length_penalty: Optional[Tuple[int, float]] = None, suppress_tokens: Optional[List[int]] = None, begin_suppress_tokens: Optional[List[int]] = None, forced_decoder_ids: Optional[List[List[int]]] = None, **model_kwargs, ) -> Union[GenerateOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models: - *greedy decoding* by calling [`~generation_utils.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False`. - *contrastive search* by calling [`~generation_utils.GenerationMixin.contrastive_search`] if `penalty_alpha>0.` and `top_k>1` - *multinomial sampling* by calling [`~generation_utils.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True`. - *beam-search decoding* by calling [`~generation_utils.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False`. - *beam-search multinomial sampling* by calling [`~generation_utils.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True`. - *diverse beam-search decoding* by calling [`~generation_utils.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1`. - *constrained beam-search decoding* by calling [`~generation_utils.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None`. <Tip warning={true}> Apart from `inputs`, all the arguments below will default to the value of the attribute of the same name as defined in the model's config (`config.json`) which in turn defaults to the [`~modeling_utils.PretrainedConfig`] of the model. </Tip> Most of these parameters are explained in more detail in [this blog post](https://huggingface.co/blog/how-to-generate). Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. max_length (`int`, *optional*, defaults to `model.config.max_length`): The maximum length the generated tokens can have. Corresponds to the length of the input prompt + `max_new_tokens`. In general, prefer the use of `max_new_tokens`, which ignores the number of tokens in the prompt. max_new_tokens (`int`, *optional*): The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. min_length (`int`, *optional*, defaults to `model.config.min_length` or 10 if the config does not set any value): The minimum length of the sequence to be generated. do_sample (`bool`, *optional*, defaults to `model.config.do_sample` or `False` if the config does not set any value): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beams (`int`, *optional*, defaults to `model.config.num_beams` or 1 if the config does not set any value): Number of beams for beam search. 1 means no beam search. temperature (`float`, *optional*, defaults to `model.config.temperature` or 1.0 if the config does not set any value): The value used to module the next token probabilities. penalty_alpha (`float`, *optional*, defaults to `model.config.penalty_alpha` or None if the config does not set any value): The values balance the model confidence and the degeneration penalty in contrastive search decoding. top_k (`int`, *optional*, defaults to `model.config.top_k` or 50 if the config does not set any value): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`, *optional*, defaults to `model.config.top_p` or 1.0 if the config does not set any value): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. typical_p (`float`, *optional*, defaults to `model.config.typical_p` or 1.0 if the config does not set any value): The amount of probability mass from the original distribution to be considered in typical decoding. If set to 1.0 it takes no effect. See [this paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. repetition_penalty (`float`, *optional*, defaults to `model.config.repetition_penalty` or 1.0 if the config does not set any value): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. pad_token_id (`int`, *optional*, defaults to `model.config.pad_token_id`): The id of the *padding* token. bos_token_id (`int`, *optional*, defaults to `model.config.bos_token_id`): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*, defaults to `model.config.eos_token_id`): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to `model.config.length_penalty` or 1.0 if the config does not set any value): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.no_repeat_ngram_size` or 0 if the config does not set any value): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.encoder_no_repeat_ngram_size` or 0 if the config does not set any value): If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. bad_words_ids(`List[List[int]]`, *optional*, defaults to `model.config.bad_words_ids`): List of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use `tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids`. force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*): List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one can allow different forms of each word. num_return_sequences(`int`, *optional*, defaults to `model.config.num_return_sequences` or 1 if the config does not set any value): The number of independently computed returned sequences for each element in the batch. max_time(`float`, *optional*): The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values are in `[0, 1]`, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as `input_ids` that masks the pad token. [What are attention masks?](../glossary#attention-mask) decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. num_beam_groups (`int`, *optional*, defaults to `model.config.num_beam_groups` or 1 if the config does not set any value): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. diversity_penalty (`float`, *optional*, defaults to `model.config.diversity_penalty` or 0.0 if the config does not set any value): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. renormalize_logits (`bool`, *optional*, defaults to `False`): Whether to renormalize the logits after applying all the logits processors or warpers (including the custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits are normalized but some logit processors or warpers break the normalization. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. constraints (`List[Constraint]`, *optional*): Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by `Constraint` objects, in the most sensible way possible. output_attentions (`bool`, *optional*, defaults to `model.config.output_attentions` or `False` if the config does not set any value): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `model.config.output_hidden_states` or `False` if the config does not set any value): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `model.config.output_scores` or `False` if the config does not set any value): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `model.config.return_dict_in_generate` or `False` if the config does not set any value): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`): The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target language token. forced_eos_token_id (`int`, *optional*, defaults to `model.config.forced_eos_token_id`): The id of the token to force as the last generated token when `max_length` is reached. remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`): Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) exponential_decay_length_penalty (`tuple(int, float)`, *optional*, defaults to `model.config.exponential_decay_length_penalty`): This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty starts and `decay_factor` represents the factor of exponential decay suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`): A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set their log probs to `-inf` so that they are not sampled. begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`): A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens` logit processor will set their log probs to `-inf` so that they are not sampled. forced_decoder_ids (`List[List[int]]`, *optional*, defaults to `model.config.forced_decoder_ids`): A list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token of index 123. model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchDecoderOnlyOutput`], - [`~generation_utils.SampleDecoderOnlyOutput`], - [`~generation_utils.BeamSearchDecoderOnlyOutput`], - [`~generation_utils.BeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchEncoderDecoderOutput`], - [`~generation_utils.SampleEncoderDecoderOutput`], - [`~generation_utils.BeamSearchEncoderDecoderOutput`], - [`~generation_utils.BeamSampleEncoderDecoderOutput`] Examples: Greedy Decoding: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> prompt = "Today I believe we can finally" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> # generate up to 30 tokens >>> outputs = model.generate(input_ids, do_sample=False, max_length=30) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today I believe we can finally get to the point where we can make a difference in the lives of the people of the United States of America.\n'] ``` Multinomial Sampling: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> prompt = "Today I believe we can finally" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> # sample up to 30 tokens >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> outputs = model.generate(input_ids, do_sample=True, max_length=30) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today I believe we can finally get rid of discrimination," said Rep. Mark Pocan (D-Wis.).\n\n"Just look at the'] ``` Beam-search decoding: ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> sentence = "Paris is one of the densest populated areas in Europe." >>> input_ids = tokenizer(sentence, return_tensors="pt").input_ids >>> outputs = model.generate(input_ids, num_beams=5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Paris ist eines der dichtesten besiedelten Gebiete Europas.'] ```""" # 0. Validate the `.generate()` call self._validate_model_class() self._validate_model_kwargs(model_kwargs.copy()) # 1. Set generation parameters if not already defined bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id num_beams = num_beams if num_beams is not None else self.config.num_beams length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups do_sample = do_sample if do_sample is not None else self.config.do_sample num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id if eos_token_id is None and hasattr(self.config, "decoder"): eos_token_id = self.config.decoder.eos_token_id if pad_token_id is None and eos_token_id is not None: if model_kwargs.get("attention_mask", None) is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # 2. Define model inputs # inputs_tensor has to be defined # model_input_name is defined if model-specific keyword input is passed # otherwise model_input_name is None # all model-specific keyword inputs are removed from `model_kwargs` inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, bos_token_id, model_kwargs) batch_size = inputs_tensor.shape[0] # 3. Define other model kwargs model_kwargs["output_attentions"] = output_attentions model_kwargs["output_hidden_states"] = output_hidden_states model_kwargs["use_cache"] = use_cache accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys()) requires_attention_mask = "encoder_outputs" not in model_kwargs if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, pad_token_id, eos_token_id ) # decoder-only models should use left-padding for generation if not self.config.is_encoder_decoder: if pad_token_id is not None and torch.sum(inputs_tensor[:, -1] == pad_token_id) > 0: logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: # if model is encoder decoder encoder_outputs are created # and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 4. Prepare `input_ids` which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids = self._prepare_decoder_input_ids_for_generation( batch_size, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, model_kwargs=model_kwargs, device=inputs_tensor.device, ) else: # if decoder-only then inputs_tensor has to be `input_ids` input_ids = inputs_tensor # 5. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] if max_length is None and max_new_tokens is None: warnings.warn( "Neither `max_length` nor `max_new_tokens` has been set, `max_length` will default to " f"{self.config.max_length} (`self.config.max_length`). Controlling `max_length` via the config is " "deprecated and `max_length` will be removed from the config in v5 of Transformers -- we recommend " "using `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif max_length is None and max_new_tokens is not None: max_length = max_new_tokens + input_ids_seq_length elif max_length is not None and max_new_tokens is not None: raise ValueError( "Both `max_new_tokens` and `max_length` have been set but they serve the same purpose -- setting a" " limit to the generated output length. Remove one of those arguments. Please refer to the" " documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) # default to config if still None max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length if min_length is not None and min_length > max_length: raise ValueError( f"Unfeasible length constraints: the minimum length ({min_length}) is larger than the maximum " f"length ({max_length})" ) if input_ids_seq_length >= max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {max_length}. This can lead to unexpected behavior. You should consider increasing " "`max_new_tokens`." ) # 6. determine generation mode is_constraint_gen_mode = constraints is not None or force_words_ids is not None is_contrastive_search_gen_mode = ( top_k is not None and top_k > 1 and do_sample is False and penalty_alpha is not None and penalty_alpha > 0 ) is_greedy_gen_mode = ( (num_beams == 1) and (num_beam_groups == 1) and do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_sample_gen_mode = ( (num_beams == 1) and (num_beam_groups == 1) and do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_gen_mode = ( (num_beams > 1) and (num_beam_groups == 1) and do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_sample_gen_mode = ( (num_beams > 1) and (num_beam_groups == 1) and do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_group_beam_gen_mode = ( (num_beams > 1) and (num_beam_groups > 1) and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) if num_beam_groups > num_beams: raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`") if is_group_beam_gen_mode and do_sample is True: raise ValueError( "Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`." ) if self.device.type != input_ids.device.type: warnings.warn( "You are calling .generate() with the `input_ids` being on a device type different" f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model" f" is on {self.device.type}. You may experience unexpected behaviors or slower generation." " Please make sure that you have put `input_ids` to the" f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before" " running `.generate()`.", UserWarning, ) # 7. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=inputs_tensor, bad_words_ids=bad_words_ids, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, num_beams=num_beams, num_beam_groups=num_beam_groups, diversity_penalty=diversity_penalty, remove_invalid_values=remove_invalid_values, exponential_decay_length_penalty=exponential_decay_length_penalty, logits_processor=logits_processor, renormalize_logits=renormalize_logits, suppress_tokens=suppress_tokens, begin_suppress_tokens=begin_suppress_tokens, forced_decoder_ids=forced_decoder_ids, ) # 8. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria ) # 9. go into different generation modes if is_greedy_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search." ) # 10. run greedy search return self.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_contrastive_search_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing contrastive search." ) return self.contrastive_search( input_ids, top_k=top_k, penalty_alpha=penalty_alpha, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, typical_p=typical_p, temperature=temperature, num_beams=num_beams, renormalize_logits=renormalize_logits, ) # 11. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 12. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, typical_p=typical_p, temperature=temperature, num_beams=num_beams, renormalize_logits=renormalize_logits, ) if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 11. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size * num_return_sequences, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams * num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam sample return self.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_group_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if num_beams % num_beam_groups != 0: raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") if typical_p is not None: raise ValueError("Decoder argument `typical_p` is not supported with beam groups.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, max_length=stopping_criteria.max_length, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_constraint_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") if num_beams <= 1: raise ValueError("`num_beams` needs to be greater than 1 for constrained generation.") if do_sample: raise ValueError("`do_sample` needs to be false for constrained generation.") if num_beam_groups is not None and num_beam_groups > 1: raise ValueError("`num_beam_groups` not supported yet for constrained generation.") final_constraints = [] if constraints is not None: final_constraints = constraints if force_words_ids is not None: def typeerror(): raise ValueError( "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`" f"of positive integers, but is {force_words_ids}." ) if not isinstance(force_words_ids, list) or len(force_words_ids) == 0: typeerror() for word_ids in force_words_ids: if isinstance(word_ids[0], list): if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any(not isinstance(token_ids, list) for token_ids in word_ids): typeerror() if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in word_ids ): typeerror() constraint = DisjunctiveConstraint(word_ids) else: if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids): typeerror() constraint = PhrasalConstraint(word_ids) final_constraints.append(constraint) # 10. prepare beam search scorer constrained_beam_scorer = ConstrainedBeamSearchScorer( constraints=final_constraints, batch_size=batch_size, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.constrained_beam_search( input_ids, constrained_beam_scorer=constrained_beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) def contrastive_search( self, input_ids: torch.LongTensor, top_k: Optional[int] = 1, penalty_alpha: Optional[float] = 0, logits_processor: Optional[LogitsProcessorList] = None, logits_warper: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[ContrastiveSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **contrastive search** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. top_k (`int`, *optional*, defaults to 1): The size of the candidate set that is used to re-rank for contrastive search penalty_alpha (`float`, *optional*, defaults to 0): The degeneration penalty for contrastive search; activate when it is larger than 0 logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.ContrastiveSearchDecoderOnlyOutput`], [`~generation_utils.ContrastiveSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.ContrastiveSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.ContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "DeepMind Company is" >>> input_ids = tokenizer(input_prompt, return_tensors="pt") >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)]) >>> outputs = model.contrastive_search( ... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare inputs model_kwargs["use_cache"] = True model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # if the first step in the loop, encode all the prefix and obtain three parameters: (1) past_key_values; # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step if model_kwargs.get("past") is None: # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save # the `encoder_outputs` outputs = self( **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) # past_key_values is required for fast decoding if "past_key_values" not in outputs: raise ValueError( f"{self.__class__.__name__} cannot return `past_key_values` and can therefore **not** be used " "for contrastive search." ) past_key_values = outputs.past_key_values # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with # previous tokens) if self.config.is_encoder_decoder: last_hidden_states = outputs.decoder_hidden_states[-1] else: last_hidden_states = outputs.hidden_states[-1] # next logit for contrastive search to select top-k candidate tokens logit_for_next_step = outputs.logits[:, -1, :] model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # contrastive_search main logic start: # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by # degeneration penalty bsz, seqlen, embed_dim = last_hidden_states.size() # logits processor logit_for_next_step = logits_processor(input_ids, logit_for_next_step) logit_for_next_step = logits_warper(input_ids, logit_for_next_step) next_probs = nn.functional.softmax(logit_for_next_step, dim=-1) _, top_k_ids = torch.topk(logit_for_next_step, dim=-1, k=top_k) top_k_probs = torch.gather(next_probs, dim=1, index=top_k_ids) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logit_for_next_step,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # enlarge the past_key_values new_key_values = [] for layer in past_key_values: items = [] # item is either the key or the value matrix for item in layer: bsz, num_head, seq_len, esz = item.size() item = ( item.unsqueeze(1) .expand(-1, top_k, -1, -1, -1) .reshape(bsz * top_k, num_head, seq_len, esz) .contiguous() ) # [bsz*beam, num_head, seq_len, esz] items.append(item) new_key_values.append(items) past_key_values = new_key_values # build next attention mask if "attention_mask" in model_inputs: attention_mask = model_kwargs["attention_mask"] # [B, S] attention_mask = attention_mask.unsqueeze(1).expand(-1, top_k, -1).reshape(-1, attention_mask.size(-1)) else: attention_mask = None # encoder-decoder model also contains the `encoder_outputs` if self.config.is_encoder_decoder and "encoder_outputs" in model_inputs: encoder_outputs = model_inputs["encoder_outputs"] else: encoder_outputs = None next_model_inputs = self.prepare_inputs_for_generation( top_k_ids.view(-1, 1), past=past_key_values, attention_mask=attention_mask, use_cache=True, encoder_outputs=encoder_outputs, ) # compute the candidate tokens by the language model and collects their hidden_states outputs = self( **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) past_key_values = outputs.past_key_values logits = outputs.logits[:, -1, :] # name is different for encoder-decoder and decoder-only models if self.config.is_encoder_decoder: next_hidden = outputs.decoder_hidden_states[-1] full_hidden_states = outputs.decoder_hidden_states else: next_hidden = outputs.hidden_states[-1] full_hidden_states = outputs.hidden_states context_hidden = ( last_hidden_states.unsqueeze(1).expand(-1, top_k, -1, -1).reshape(bsz * top_k, seqlen, embed_dim) ) # compute the degeneratin penalty and re-rank the candidates based on the degeneration penalty and the # model confidence selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores # (model confidence minus degeneration penalty); (6) decoder hidden_states next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx] next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k)) next_hidden = next_hidden[range(bsz), selected_idx, :] last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1) decoder_hidden_states = [] for layer in full_hidden_states: layer = torch.stack(torch.split(layer.squeeze(dim=1), top_k)) layer = layer[range(bsz), selected_idx, :] decoder_hidden_states.append(layer) # select the past_key_value new_key_values = [] for layer in past_key_values: items = [] # item is either the key or the value matrix for item in layer: bsz_and_beam, num_head, seq_len, esz = item.size() bsz = int(bsz_and_beam // top_k) item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz] item = item[range(bsz), selected_idx, :, :, :] # [B, num_head, seq_len, esz] items.append(item) new_key_values.append(items) past_key_values = new_key_values logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(bsz), selected_idx, :] # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration if self.config.is_encoder_decoder: outputs = Seq2SeqLMOutput( past_key_values=past_key_values, decoder_hidden_states=decoder_hidden_states, ) else: outputs = CausalLMOutputWithPast( past_key_values=past_key_values, hidden_states=decoder_hidden_states, attentions=model_kwargs["attention_mask"], ) # contrastive_search main logic end if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return ContrastiveSearchEncoderDecoderOutput( sequences=input_ids, scores=scores, decoder_hidden_states=decoder_hidden_states, ) else: return ContrastiveSearchDecoderOnlyOutput( sequences=input_ids, scores=scores, hidden_states=decoder_hidden_states, ) else: return input_ids def greedy_search( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[GreedySearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.GreedySearchDecoderOnlyOutput`], [`~generation_utils.GreedySearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.GreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "It might be possible to" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> outputs = model.greedy_search( ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["It might be possible to get a better understanding of the nature of the problem, but it's not"] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only sync_func = torch.cuda.synchronize while True: timers("generate-forward").start(sync_func) if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_tokens_scores = logits_processor(input_ids, next_token_logits) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_tokens_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # argmax next_tokens = torch.argmax(next_tokens_scores, dim=-1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: timers("generate-forward").stop(sync_func) break else: this_peer_finished = True timers("generate-forward").stop(sync_func) if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def sample( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[SampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.SampleDecoderOnlyOutput`], [`~generation_utils.SampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.SampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> outputs = model.sample( ... input_ids, ... logits_processor=logits_processor, ... logits_warper=logits_warper, ... stopping_criteria=stopping_criteria, ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today is a beautiful day, and a wonderful day.\n\nI was lucky enough to meet the'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only # auto-regressive generation while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_token_scores = logits_processor(input_ids, next_token_logits) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # sample probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return SampleEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return SampleDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores_processed,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def beam_sample( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[BeamSampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSampleDecoderOnlyOutput`], [`~generation_utils.BeamSampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> outputs = model.beam_sample( ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logits_warper(input_ids, next_token_scores_processed),) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) next_token_scores = torch.gather(next_token_scores, -1, next_tokens) next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, _indices) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSampleEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSampleDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def group_beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ): r""" Generates sequences of token ids for models with a language modeling head using **diverse beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs that will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... HammingDiversityLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run diverse beam search using 6 beams >>> num_beams = 6 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... num_beam_groups=3, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.group_beam_search( ... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams num_beam_groups = beam_scorer.num_beam_groups num_sub_beams = num_beams // num_beam_groups device = input_ids.device batch_beam_size, cur_len = input_ids.shape if return_dict_in_generate and output_scores: beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)] else: beam_indices = None if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in # the same group don't produce same tokens everytime. beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # predicted tokens in cur_len step current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) # indices which will form the beams in the next time step reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) # do one decoder step on all beams of all sentences in batch model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need if output_scores: processed_score = torch.zeros_like(outputs.logits[:, -1, :]) for beam_group_idx in range(num_beam_groups): group_start_idx = beam_group_idx * num_sub_beams group_end_idx = min(group_start_idx + num_sub_beams, num_beams) group_size = group_end_idx - group_start_idx # indices of beams of current group among all sentences in batch batch_group_indices = [] for batch_idx in range(batch_size): batch_group_indices.extend( [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] ) group_input_ids = input_ids[batch_group_indices] # select outputs of beams of current group only next_token_logits = outputs.logits[batch_group_indices, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * group_size, vocab_size) vocab_size = next_token_scores.shape[-1] next_token_scores_processed = logits_processor( group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx ) next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1) next_token_scores = next_token_scores.expand_as(next_token_scores_processed) if output_scores: processed_score[batch_group_indices] = next_token_scores_processed # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None beam_outputs = beam_scorer.process( group_input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=process_beam_indices, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] if return_dict_in_generate and output_scores: beam_indices[beam_group_idx] = tuple( beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0])) ) input_ids[batch_group_indices] = group_input_ids[beam_idx] group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) current_tokens[batch_group_indices] = group_input_ids[:, -1] # (beam_idx // group_size) -> batch_idx # (beam_idx % group_size) -> offset of idx inside the group reordering_indices[batch_group_indices] = ( num_beams * torch_int_div(beam_idx, group_size) + group_start_idx + (beam_idx % group_size) ) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (processed_score,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=final_beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def constrained_beam_search( self, input_ids: torch.LongTensor, constrained_beam_scorer: ConstrainedBeamSearchScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **constrained beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. constrained_beam_scorer (`ConstrainedBeamSearchScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of [`ConstrainedBeamSearchScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... ConstrainedBeamSearchScorer, ... PhrasalConstraint, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> constraint_str = "Sie" >>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token >>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] >>> # instantiate beam scorer >>> beam_scorer = ConstrainedBeamSearchScorer( ... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.constrained_beam_search( ... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt sind Sie?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(constrained_beam_scorer._beam_hyps) num_beams = constrained_beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) scores_for_all_vocab = next_token_scores.clone() # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = (next_tokens / vocab_size).long() next_tokens = next_tokens % vocab_size # stateless beam_outputs = constrained_beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, scores_for_all_vocab, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) # increase cur_len cur_len = cur_len + 1 if constrained_beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = constrained_beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] The provided code snippet includes necessary dependencies for implementing the `get_first_parameter_dtype` function. Write a Python function `def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"])` to solve the following problem: Returns the first parameter dtype (can be non-floating) or asserts if none were found. Here is the function: def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): """ Returns the first parameter dtype (can be non-floating) or asserts if none were found. """ try: return next(parameter.parameters()).dtype except StopIteration: # For nn.DataParallel compatibility in PyTorch > 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype
Returns the first parameter dtype (can be non-floating) or asserts if none were found.
10,728
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: class GenerationMixin: """ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`]. The class exposes [`~generation_utils.GenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation_utils.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False`. - *contrastive search* by calling [`~generation_utils.GenerationMixin.contrastive_search`] if `penalty_alpha>0` and `top_k>1` - *multinomial sampling* by calling [`~generation_utils.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True`. - *beam-search decoding* by calling [`~generation_utils.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False`. - *beam-search multinomial sampling* by calling [`~generation_utils.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True`. - *diverse beam-search decoding* by calling [`~generation_utils.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1`. - *constrained beam-search decoding* by calling [`~generation_utils.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None`. """ def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ # 1. retrieve all kwargs that are non-None or non-model input related. # some encoder-decoder models have different names for model and encoder if ( self.config.is_encoder_decoder and hasattr(self, "encoder") and self.encoder.main_input_name != self.main_input_name ): input_name = self.encoder.main_input_name else: input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} # 2. check whether model_input_name is passed as kwarg # if yes and `inputs` is None use kwarg inputs inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside " f"{input_name} which is not allowed." f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg # 3. models with `input_ids` can also make use of `inputs_embeds` if self._can_retrieve_inputs_from_name(inputs, "inputs_embeds", model_kwargs): inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" # 4. Only encoder-decoder models can have non `input_ids` input format if not self.config.is_encoder_decoder and input_name != "input_ids": raise ValueError( f"If {input_name} is passed as model-specific keyword " "input then model has to be an encoder-decoder and not a " f"{self.__class__.__name__}." ) # 5. if `inputs` is still None, try to create `input_ids` from BOS token if inputs is None: inputs = self._prepare_input_ids_for_generation(bos_token_id, model_kwargs.get("encoder_outputs")) return inputs, input_name, model_kwargs def _can_retrieve_inputs_from_name( self, inputs: Optional[torch.Tensor], name: str, model_kwargs: Dict[str, torch.Tensor] ) -> torch.Tensor: """ If `inputs` is None and `name` is in both forward function and keyword arguments, then inputs can be retrieved from name """ can_retrieve_inputs = model_kwargs.get(name, None) is not None and name in set( inspect.signature(self.forward).parameters.keys() ) if can_retrieve_inputs and inputs is not None: raise ValueError(f"Cannot only pass one of {name} and {self.main_input_name}") return can_retrieve_inputs def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. """ return logits def _prepare_input_ids_for_generation( self, bos_token_id: Optional[int], encoder_outputs: Optional[ModelOutput] ) -> torch.LongTensor: if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id def _prepare_attention_mask_for_generation( self, inputs: torch.Tensor, pad_token_id: Optional[int], eos_token_id: Optional[int], ) -> torch.LongTensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return inputs.ne(pad_token_id).long() else: return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: # 1. get encoder encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } # 3. make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.main_input_name encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, decoder_start_token_id: int = None, bos_token_id: int = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, device: torch.device = None, ) -> torch.LongTensor: if model_kwargs is not None and "decoder_input_ids" in model_kwargs: return model_kwargs.pop("decoder_input_ids") else: decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device return torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) def _expand_inputs_for_generation( input_ids: torch.LongTensor, expand_size: int = 1, is_encoder_decoder: bool = False, attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[ModelOutput] = None, **model_kwargs, ) -> Tuple[torch.LongTensor, Dict[str, Any]]: expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) if is_encoder_decoder: if encoder_outputs is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device) ) model_kwargs["encoder_outputs"] = encoder_outputs return input_ids, model_kwargs def _update_model_kwargs_for_generation( outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past if "past_key_values" in outputs: model_kwargs["past"] = outputs.past_key_values elif "mems" in outputs: model_kwargs["past"] = outputs.mems elif "past_buckets_states" in outputs: model_kwargs["past"] = outputs.past_buckets_states else: model_kwargs["past"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) return model_kwargs def _reorder_cache(self, past, beam_idx): raise NotImplementedError( f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to" f" enable beam search for {self.__class__}" ) def _get_logits_warper( self, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, temperature: Optional[float] = None, num_beams: Optional[int] = None, renormalize_logits: Optional[bool] = None, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances used for multinomial sampling. """ # init warp parameters top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p typical_p = typical_p if typical_p is not None else self.config.typical_p temperature = temperature if temperature is not None else self.config.temperature # instantiate warpers list warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if temperature is not None and temperature != 1.0: warpers.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if typical_p is not None and typical_p < 1.0: warpers.append(TypicalLogitsWarper(mass=typical_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) # `LogitNormalization` should always be the last logit processor, when present if renormalize_logits is True: warpers.append(LogitNormalization()) return warpers def _get_logits_processor( self, repetition_penalty: float, no_repeat_ngram_size: int, encoder_no_repeat_ngram_size: int, input_ids_seq_length: int, encoder_input_ids: torch.LongTensor, bad_words_ids: List[List[int]], min_length: int, max_length: int, eos_token_id: int, forced_bos_token_id: int, forced_eos_token_id: int, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int, num_beam_groups: int, diversity_penalty: float, remove_invalid_values: bool, exponential_decay_length_penalty: Tuple, logits_processor: Optional[LogitsProcessorList], renormalize_logits: Optional[bool], suppress_tokens: Optional[List[int]] = None, begin_suppress_tokens: Optional[List[int]] = None, forced_decoder_ids: Optional[List[List[int]]] = None, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] instances used to modify the scores of the language model head. """ processors = LogitsProcessorList() # init warp parameters repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) encoder_no_repeat_ngram_size = ( encoder_no_repeat_ngram_size if encoder_no_repeat_ngram_size is not None else self.config.encoder_no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty forced_bos_token_id = ( forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id ) forced_eos_token_id = ( forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id ) remove_invalid_values = ( remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values ) exponential_decay_length_penalty = ( exponential_decay_length_penalty if exponential_decay_length_penalty is not None else self.config.exponential_decay_length_penalty ) suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens begin_suppress_tokens = ( begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens ) if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"): forced_decoder_ids = self.config.forced_decoder_ids # instantiate processors list # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if diversity_penalty is not None and diversity_penalty > 0.0: processors.append( HammingDiversityLogitsProcessor( diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups ) ) if repetition_penalty is not None and repetition_penalty != 1.0: processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0: processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size)) if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0: if self.config.is_encoder_decoder: processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids)) else: raise ValueError( "It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture" ) if bad_words_ids is not None: processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id)) if min_length is not None and eos_token_id is not None and min_length > 0: processors.append(MinLengthLogitsProcessor(min_length, eos_token_id)) if prefix_allowed_tokens_fn is not None: processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams // num_beam_groups)) if forced_bos_token_id is not None: processors.append(ForcedBOSTokenLogitsProcessor(forced_bos_token_id)) if forced_eos_token_id is not None: processors.append(ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)) if remove_invalid_values is True: processors.append(InfNanRemoveLogitsProcessor()) if exponential_decay_length_penalty is not None: processors.append( ExponentialDecayLengthPenalty(exponential_decay_length_penalty, eos_token_id, input_ids_seq_length) ) if suppress_tokens is not None: processors.append(SuppressTokensLogitsProcessor(suppress_tokens)) if begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = begin_index if (input_ids_seq_length > 1 or forced_bos_token_id is None) else begin_index + 1 if forced_decoder_ids is not None: begin_index += forced_decoder_ids[-1][0] # generation starts after the last token that is forced processors.append(SuppressTokensAtBeginLogitsProcessor(begin_suppress_tokens, begin_index)) if forced_decoder_ids is not None: processors.append(ForceTokensLogitsProcessor(forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) # `LogitNormalization` should always be the last logit processor, when present if renormalize_logits is True: processors.append(LogitNormalization()) return processors def _get_stopping_criteria( self, max_length: Optional[int], max_time: Optional[float], stopping_criteria: Optional[StoppingCriteriaList] ) -> StoppingCriteriaList: criteria = StoppingCriteriaList() if max_length is not None: criteria.append(MaxLengthCriteria(max_length=max_length)) if max_time is not None: criteria.append(MaxTimeCriteria(max_time=max_time)) criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) return criteria def _merge_criteria_processor_list( self, default_list: Union[LogitsProcessorList, StoppingCriteriaList], custom_list: Union[LogitsProcessorList, StoppingCriteriaList], ) -> Union[LogitsProcessorList, StoppingCriteriaList]: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def compute_transition_beam_scores( self, sequences: torch.Tensor, scores: Tuple[torch.Tensor], beam_indices: torch.Tensor, eos_token_id: int = None, ): """compute the transition probabilities of sequences given generation scores and beam indices""" # 1. reshape scores as [vocab_size * batch_size, # generation steps] # with batch_size being 2 * vocab_size and # generation steps being # seq_len - input_length scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1) # 2. cut beam_indices to longest beam length beam_indices_mask = beam_indices < 0 max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max() beam_indices = beam_indices[:, :max_beam_length] beam_indices_mask = beam_indices_mask[:, :max_beam_length] # 3. Set indices of beams that finished early to 0 # such indices will be masked correctly afterwards beam_indices[beam_indices_mask] = 0 # 4. multiply beam_indices with vocab size to gather correctly from scores beam_sequence_indices = beam_indices * self.config.vocab_size # 5. Define which indices contributed to scores cut_idx = sequences.shape[-1] - max_beam_length indices = sequences[:, cut_idx:] + beam_sequence_indices # 6. Compute scores transition_scores = scores.gather(0, indices) # 7. Mask out transition_scores of beams that stopped early transition_scores[beam_indices_mask] = 0 return transition_scores def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not hasattr(self, "prepare_inputs_for_generation"): generate_compatible_mappings = [ MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function if self.config.is_encoder_decoder: for key in ["decoder_input_ids"]: model_kwargs.pop(key, None) unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs` if often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept `kwargs`, then a stricter check can be made ;) if "kwargs" in model_args: model_args |= set(inspect.signature(self.forward).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) def generate( self, inputs: Optional[torch.Tensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, penalty_alpha: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, force_words_ids: Optional[Union[Iterable[int], Iterable[Iterable[int]]]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, encoder_no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, max_time: Optional[float] = None, max_new_tokens: Optional[int] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, num_beam_groups: Optional[int] = None, diversity_penalty: Optional[float] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, logits_processor: Optional[LogitsProcessorList] = None, renormalize_logits: Optional[bool] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, constraints: Optional[List[Constraint]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, forced_bos_token_id: Optional[int] = None, forced_eos_token_id: Optional[int] = None, remove_invalid_values: Optional[bool] = None, synced_gpus: Optional[bool] = False, exponential_decay_length_penalty: Optional[Tuple[int, float]] = None, suppress_tokens: Optional[List[int]] = None, begin_suppress_tokens: Optional[List[int]] = None, forced_decoder_ids: Optional[List[List[int]]] = None, **model_kwargs, ) -> Union[GenerateOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models: - *greedy decoding* by calling [`~generation_utils.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False`. - *contrastive search* by calling [`~generation_utils.GenerationMixin.contrastive_search`] if `penalty_alpha>0.` and `top_k>1` - *multinomial sampling* by calling [`~generation_utils.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True`. - *beam-search decoding* by calling [`~generation_utils.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False`. - *beam-search multinomial sampling* by calling [`~generation_utils.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True`. - *diverse beam-search decoding* by calling [`~generation_utils.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1`. - *constrained beam-search decoding* by calling [`~generation_utils.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None`. <Tip warning={true}> Apart from `inputs`, all the arguments below will default to the value of the attribute of the same name as defined in the model's config (`config.json`) which in turn defaults to the [`~modeling_utils.PretrainedConfig`] of the model. </Tip> Most of these parameters are explained in more detail in [this blog post](https://huggingface.co/blog/how-to-generate). Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. max_length (`int`, *optional*, defaults to `model.config.max_length`): The maximum length the generated tokens can have. Corresponds to the length of the input prompt + `max_new_tokens`. In general, prefer the use of `max_new_tokens`, which ignores the number of tokens in the prompt. max_new_tokens (`int`, *optional*): The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. min_length (`int`, *optional*, defaults to `model.config.min_length` or 10 if the config does not set any value): The minimum length of the sequence to be generated. do_sample (`bool`, *optional*, defaults to `model.config.do_sample` or `False` if the config does not set any value): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beams (`int`, *optional*, defaults to `model.config.num_beams` or 1 if the config does not set any value): Number of beams for beam search. 1 means no beam search. temperature (`float`, *optional*, defaults to `model.config.temperature` or 1.0 if the config does not set any value): The value used to module the next token probabilities. penalty_alpha (`float`, *optional*, defaults to `model.config.penalty_alpha` or None if the config does not set any value): The values balance the model confidence and the degeneration penalty in contrastive search decoding. top_k (`int`, *optional*, defaults to `model.config.top_k` or 50 if the config does not set any value): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`, *optional*, defaults to `model.config.top_p` or 1.0 if the config does not set any value): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. typical_p (`float`, *optional*, defaults to `model.config.typical_p` or 1.0 if the config does not set any value): The amount of probability mass from the original distribution to be considered in typical decoding. If set to 1.0 it takes no effect. See [this paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. repetition_penalty (`float`, *optional*, defaults to `model.config.repetition_penalty` or 1.0 if the config does not set any value): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. pad_token_id (`int`, *optional*, defaults to `model.config.pad_token_id`): The id of the *padding* token. bos_token_id (`int`, *optional*, defaults to `model.config.bos_token_id`): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*, defaults to `model.config.eos_token_id`): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to `model.config.length_penalty` or 1.0 if the config does not set any value): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.no_repeat_ngram_size` or 0 if the config does not set any value): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.encoder_no_repeat_ngram_size` or 0 if the config does not set any value): If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. bad_words_ids(`List[List[int]]`, *optional*, defaults to `model.config.bad_words_ids`): List of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use `tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids`. force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*): List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one can allow different forms of each word. num_return_sequences(`int`, *optional*, defaults to `model.config.num_return_sequences` or 1 if the config does not set any value): The number of independently computed returned sequences for each element in the batch. max_time(`float`, *optional*): The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values are in `[0, 1]`, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as `input_ids` that masks the pad token. [What are attention masks?](../glossary#attention-mask) decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. num_beam_groups (`int`, *optional*, defaults to `model.config.num_beam_groups` or 1 if the config does not set any value): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. diversity_penalty (`float`, *optional*, defaults to `model.config.diversity_penalty` or 0.0 if the config does not set any value): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. renormalize_logits (`bool`, *optional*, defaults to `False`): Whether to renormalize the logits after applying all the logits processors or warpers (including the custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits are normalized but some logit processors or warpers break the normalization. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. This feature is intended for advanced users. constraints (`List[Constraint]`, *optional*): Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by `Constraint` objects, in the most sensible way possible. output_attentions (`bool`, *optional*, defaults to `model.config.output_attentions` or `False` if the config does not set any value): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `model.config.output_hidden_states` or `False` if the config does not set any value): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `model.config.output_scores` or `False` if the config does not set any value): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `model.config.return_dict_in_generate` or `False` if the config does not set any value): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`): The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target language token. forced_eos_token_id (`int`, *optional*, defaults to `model.config.forced_eos_token_id`): The id of the token to force as the last generated token when `max_length` is reached. remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`): Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) exponential_decay_length_penalty (`tuple(int, float)`, *optional*, defaults to `model.config.exponential_decay_length_penalty`): This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty starts and `decay_factor` represents the factor of exponential decay suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`): A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set their log probs to `-inf` so that they are not sampled. begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`): A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens` logit processor will set their log probs to `-inf` so that they are not sampled. forced_decoder_ids (`List[List[int]]`, *optional*, defaults to `model.config.forced_decoder_ids`): A list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token of index 123. model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchDecoderOnlyOutput`], - [`~generation_utils.SampleDecoderOnlyOutput`], - [`~generation_utils.BeamSearchDecoderOnlyOutput`], - [`~generation_utils.BeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation_utils.GreedySearchEncoderDecoderOutput`], - [`~generation_utils.SampleEncoderDecoderOutput`], - [`~generation_utils.BeamSearchEncoderDecoderOutput`], - [`~generation_utils.BeamSampleEncoderDecoderOutput`] Examples: Greedy Decoding: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> prompt = "Today I believe we can finally" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> # generate up to 30 tokens >>> outputs = model.generate(input_ids, do_sample=False, max_length=30) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today I believe we can finally get to the point where we can make a difference in the lives of the people of the United States of America.\n'] ``` Multinomial Sampling: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> prompt = "Today I believe we can finally" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> # sample up to 30 tokens >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> outputs = model.generate(input_ids, do_sample=True, max_length=30) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today I believe we can finally get rid of discrimination," said Rep. Mark Pocan (D-Wis.).\n\n"Just look at the'] ``` Beam-search decoding: ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> sentence = "Paris is one of the densest populated areas in Europe." >>> input_ids = tokenizer(sentence, return_tensors="pt").input_ids >>> outputs = model.generate(input_ids, num_beams=5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Paris ist eines der dichtesten besiedelten Gebiete Europas.'] ```""" # 0. Validate the `.generate()` call self._validate_model_class() self._validate_model_kwargs(model_kwargs.copy()) # 1. Set generation parameters if not already defined bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id num_beams = num_beams if num_beams is not None else self.config.num_beams length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups do_sample = do_sample if do_sample is not None else self.config.do_sample num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id if eos_token_id is None and hasattr(self.config, "decoder"): eos_token_id = self.config.decoder.eos_token_id if pad_token_id is None and eos_token_id is not None: if model_kwargs.get("attention_mask", None) is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # 2. Define model inputs # inputs_tensor has to be defined # model_input_name is defined if model-specific keyword input is passed # otherwise model_input_name is None # all model-specific keyword inputs are removed from `model_kwargs` inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, bos_token_id, model_kwargs) batch_size = inputs_tensor.shape[0] # 3. Define other model kwargs model_kwargs["output_attentions"] = output_attentions model_kwargs["output_hidden_states"] = output_hidden_states model_kwargs["use_cache"] = use_cache accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys()) requires_attention_mask = "encoder_outputs" not in model_kwargs if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, pad_token_id, eos_token_id ) # decoder-only models should use left-padding for generation if not self.config.is_encoder_decoder: if pad_token_id is not None and torch.sum(inputs_tensor[:, -1] == pad_token_id) > 0: logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: # if model is encoder decoder encoder_outputs are created # and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 4. Prepare `input_ids` which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids = self._prepare_decoder_input_ids_for_generation( batch_size, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, model_kwargs=model_kwargs, device=inputs_tensor.device, ) else: # if decoder-only then inputs_tensor has to be `input_ids` input_ids = inputs_tensor # 5. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] if max_length is None and max_new_tokens is None: warnings.warn( "Neither `max_length` nor `max_new_tokens` has been set, `max_length` will default to " f"{self.config.max_length} (`self.config.max_length`). Controlling `max_length` via the config is " "deprecated and `max_length` will be removed from the config in v5 of Transformers -- we recommend " "using `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif max_length is None and max_new_tokens is not None: max_length = max_new_tokens + input_ids_seq_length elif max_length is not None and max_new_tokens is not None: raise ValueError( "Both `max_new_tokens` and `max_length` have been set but they serve the same purpose -- setting a" " limit to the generated output length. Remove one of those arguments. Please refer to the" " documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) # default to config if still None max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length if min_length is not None and min_length > max_length: raise ValueError( f"Unfeasible length constraints: the minimum length ({min_length}) is larger than the maximum " f"length ({max_length})" ) if input_ids_seq_length >= max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {max_length}. This can lead to unexpected behavior. You should consider increasing " "`max_new_tokens`." ) # 6. determine generation mode is_constraint_gen_mode = constraints is not None or force_words_ids is not None is_contrastive_search_gen_mode = ( top_k is not None and top_k > 1 and do_sample is False and penalty_alpha is not None and penalty_alpha > 0 ) is_greedy_gen_mode = ( (num_beams == 1) and (num_beam_groups == 1) and do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_sample_gen_mode = ( (num_beams == 1) and (num_beam_groups == 1) and do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_gen_mode = ( (num_beams > 1) and (num_beam_groups == 1) and do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_sample_gen_mode = ( (num_beams > 1) and (num_beam_groups == 1) and do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_group_beam_gen_mode = ( (num_beams > 1) and (num_beam_groups > 1) and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) if num_beam_groups > num_beams: raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`") if is_group_beam_gen_mode and do_sample is True: raise ValueError( "Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`." ) if self.device.type != input_ids.device.type: warnings.warn( "You are calling .generate() with the `input_ids` being on a device type different" f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model" f" is on {self.device.type}. You may experience unexpected behaviors or slower generation." " Please make sure that you have put `input_ids` to the" f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before" " running `.generate()`.", UserWarning, ) # 7. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=inputs_tensor, bad_words_ids=bad_words_ids, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, num_beams=num_beams, num_beam_groups=num_beam_groups, diversity_penalty=diversity_penalty, remove_invalid_values=remove_invalid_values, exponential_decay_length_penalty=exponential_decay_length_penalty, logits_processor=logits_processor, renormalize_logits=renormalize_logits, suppress_tokens=suppress_tokens, begin_suppress_tokens=begin_suppress_tokens, forced_decoder_ids=forced_decoder_ids, ) # 8. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria ) # 9. go into different generation modes if is_greedy_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search." ) # 10. run greedy search return self.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_contrastive_search_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing contrastive search." ) return self.contrastive_search( input_ids, top_k=top_k, penalty_alpha=penalty_alpha, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, typical_p=typical_p, temperature=temperature, num_beams=num_beams, renormalize_logits=renormalize_logits, ) # 11. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 12. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_sample_gen_mode: # 10. prepare logits warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, typical_p=typical_p, temperature=temperature, num_beams=num_beams, renormalize_logits=renormalize_logits, ) if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 11. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size * num_return_sequences, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams * num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam sample return self.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_group_beam_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if num_beams % num_beam_groups != 0: raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") if typical_p is not None: raise ValueError("Decoder argument `typical_p` is not supported with beam groups.") # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, max_length=stopping_criteria.max_length, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_constraint_gen_mode: if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") if num_beams <= 1: raise ValueError("`num_beams` needs to be greater than 1 for constrained generation.") if do_sample: raise ValueError("`do_sample` needs to be false for constrained generation.") if num_beam_groups is not None and num_beam_groups > 1: raise ValueError("`num_beam_groups` not supported yet for constrained generation.") final_constraints = [] if constraints is not None: final_constraints = constraints if force_words_ids is not None: def typeerror(): raise ValueError( "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`" f"of positive integers, but is {force_words_ids}." ) if not isinstance(force_words_ids, list) or len(force_words_ids) == 0: typeerror() for word_ids in force_words_ids: if isinstance(word_ids[0], list): if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any(not isinstance(token_ids, list) for token_ids in word_ids): typeerror() if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in word_ids ): typeerror() constraint = DisjunctiveConstraint(word_ids) else: if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids): typeerror() constraint = PhrasalConstraint(word_ids) final_constraints.append(constraint) # 10. prepare beam search scorer constrained_beam_scorer = ConstrainedBeamSearchScorer( constraints=final_constraints, batch_size=batch_size, num_beams=num_beams, device=inputs_tensor.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # 11. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) # 12. run beam search return self.constrained_beam_search( input_ids, constrained_beam_scorer=constrained_beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) def contrastive_search( self, input_ids: torch.LongTensor, top_k: Optional[int] = 1, penalty_alpha: Optional[float] = 0, logits_processor: Optional[LogitsProcessorList] = None, logits_warper: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[ContrastiveSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **contrastive search** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. top_k (`int`, *optional*, defaults to 1): The size of the candidate set that is used to re-rank for contrastive search penalty_alpha (`float`, *optional*, defaults to 0): The degeneration penalty for contrastive search; activate when it is larger than 0 logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.ContrastiveSearchDecoderOnlyOutput`], [`~generation_utils.ContrastiveSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.ContrastiveSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.ContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "DeepMind Company is" >>> input_ids = tokenizer(input_prompt, return_tensors="pt") >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)]) >>> outputs = model.contrastive_search( ... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare inputs model_kwargs["use_cache"] = True model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # if the first step in the loop, encode all the prefix and obtain three parameters: (1) past_key_values; # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step if model_kwargs.get("past") is None: # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save # the `encoder_outputs` outputs = self( **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) # past_key_values is required for fast decoding if "past_key_values" not in outputs: raise ValueError( f"{self.__class__.__name__} cannot return `past_key_values` and can therefore **not** be used " "for contrastive search." ) past_key_values = outputs.past_key_values # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with # previous tokens) if self.config.is_encoder_decoder: last_hidden_states = outputs.decoder_hidden_states[-1] else: last_hidden_states = outputs.hidden_states[-1] # next logit for contrastive search to select top-k candidate tokens logit_for_next_step = outputs.logits[:, -1, :] model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # contrastive_search main logic start: # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by # degeneration penalty bsz, seqlen, embed_dim = last_hidden_states.size() # logits processor logit_for_next_step = logits_processor(input_ids, logit_for_next_step) logit_for_next_step = logits_warper(input_ids, logit_for_next_step) next_probs = nn.functional.softmax(logit_for_next_step, dim=-1) _, top_k_ids = torch.topk(logit_for_next_step, dim=-1, k=top_k) top_k_probs = torch.gather(next_probs, dim=1, index=top_k_ids) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logit_for_next_step,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # enlarge the past_key_values new_key_values = [] for layer in past_key_values: items = [] # item is either the key or the value matrix for item in layer: bsz, num_head, seq_len, esz = item.size() item = ( item.unsqueeze(1) .expand(-1, top_k, -1, -1, -1) .reshape(bsz * top_k, num_head, seq_len, esz) .contiguous() ) # [bsz*beam, num_head, seq_len, esz] items.append(item) new_key_values.append(items) past_key_values = new_key_values # build next attention mask if "attention_mask" in model_inputs: attention_mask = model_kwargs["attention_mask"] # [B, S] attention_mask = attention_mask.unsqueeze(1).expand(-1, top_k, -1).reshape(-1, attention_mask.size(-1)) else: attention_mask = None # encoder-decoder model also contains the `encoder_outputs` if self.config.is_encoder_decoder and "encoder_outputs" in model_inputs: encoder_outputs = model_inputs["encoder_outputs"] else: encoder_outputs = None next_model_inputs = self.prepare_inputs_for_generation( top_k_ids.view(-1, 1), past=past_key_values, attention_mask=attention_mask, use_cache=True, encoder_outputs=encoder_outputs, ) # compute the candidate tokens by the language model and collects their hidden_states outputs = self( **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) past_key_values = outputs.past_key_values logits = outputs.logits[:, -1, :] # name is different for encoder-decoder and decoder-only models if self.config.is_encoder_decoder: next_hidden = outputs.decoder_hidden_states[-1] full_hidden_states = outputs.decoder_hidden_states else: next_hidden = outputs.hidden_states[-1] full_hidden_states = outputs.hidden_states context_hidden = ( last_hidden_states.unsqueeze(1).expand(-1, top_k, -1, -1).reshape(bsz * top_k, seqlen, embed_dim) ) # compute the degeneratin penalty and re-rank the candidates based on the degeneration penalty and the # model confidence selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores # (model confidence minus degeneration penalty); (6) decoder hidden_states next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx] next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k)) next_hidden = next_hidden[range(bsz), selected_idx, :] last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1) decoder_hidden_states = [] for layer in full_hidden_states: layer = torch.stack(torch.split(layer.squeeze(dim=1), top_k)) layer = layer[range(bsz), selected_idx, :] decoder_hidden_states.append(layer) # select the past_key_value new_key_values = [] for layer in past_key_values: items = [] # item is either the key or the value matrix for item in layer: bsz_and_beam, num_head, seq_len, esz = item.size() bsz = int(bsz_and_beam // top_k) item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz] item = item[range(bsz), selected_idx, :, :, :] # [B, num_head, seq_len, esz] items.append(item) new_key_values.append(items) past_key_values = new_key_values logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(bsz), selected_idx, :] # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration if self.config.is_encoder_decoder: outputs = Seq2SeqLMOutput( past_key_values=past_key_values, decoder_hidden_states=decoder_hidden_states, ) else: outputs = CausalLMOutputWithPast( past_key_values=past_key_values, hidden_states=decoder_hidden_states, attentions=model_kwargs["attention_mask"], ) # contrastive_search main logic end if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return ContrastiveSearchEncoderDecoderOutput( sequences=input_ids, scores=scores, decoder_hidden_states=decoder_hidden_states, ) else: return ContrastiveSearchDecoderOnlyOutput( sequences=input_ids, scores=scores, hidden_states=decoder_hidden_states, ) else: return input_ids def greedy_search( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[GreedySearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.GreedySearchDecoderOnlyOutput`], [`~generation_utils.GreedySearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.GreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "It might be possible to" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> outputs = model.greedy_search( ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["It might be possible to get a better understanding of the nature of the problem, but it's not"] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only sync_func = torch.cuda.synchronize while True: timers("generate-forward").start(sync_func) if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_tokens_scores = logits_processor(input_ids, next_token_logits) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_tokens_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # argmax next_tokens = torch.argmax(next_tokens_scores, dim=-1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: timers("generate-forward").stop(sync_func) break else: this_peer_finished = True timers("generate-forward").stop(sync_func) if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def sample( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[SampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.SampleDecoderOnlyOutput`], [`~generation_utils.SampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.SampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> outputs = model.sample( ... input_ids, ... logits_processor=logits_processor, ... logits_warper=logits_warper, ... stopping_criteria=stopping_criteria, ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today is a beautiful day, and a wonderful day.\n\nI was lucky enough to meet the'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) this_peer_finished = False # used by synced_gpus only # auto-regressive generation while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_token_scores = logits_processor(input_ids, next_token_logits) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # sample probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) # stop when each sentence is finished, or if we exceed the maximum length if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True if return_dict_in_generate: if self.config.is_encoder_decoder: return SampleEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return SampleDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores_processed,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def beam_sample( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ) -> Union[BeamSampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSampleDecoderOnlyOutput`], [`~generation_utils.BeamSampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> outputs = model.beam_sample( ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logits_warper(input_ids, next_token_scores_processed),) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) next_token_scores = torch.gather(next_token_scores, -1, next_tokens) next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, _indices) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSampleEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSampleDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def group_beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = False, **model_kwargs, ): r""" Generates sequences of token ids for models with a language modeling head using **diverse beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs that will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation_utils.BeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... HammingDiversityLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run diverse beam search using 6 beams >>> num_beams = 6 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... num_beam_groups=3, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.group_beam_search( ... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams num_beam_groups = beam_scorer.num_beam_groups num_sub_beams = num_beams // num_beam_groups device = input_ids.device batch_beam_size, cur_len = input_ids.shape if return_dict_in_generate and output_scores: beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)] else: beam_indices = None if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in # the same group don't produce same tokens everytime. beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # predicted tokens in cur_len step current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) # indices which will form the beams in the next time step reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) # do one decoder step on all beams of all sentences in batch model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need if output_scores: processed_score = torch.zeros_like(outputs.logits[:, -1, :]) for beam_group_idx in range(num_beam_groups): group_start_idx = beam_group_idx * num_sub_beams group_end_idx = min(group_start_idx + num_sub_beams, num_beams) group_size = group_end_idx - group_start_idx # indices of beams of current group among all sentences in batch batch_group_indices = [] for batch_idx in range(batch_size): batch_group_indices.extend( [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] ) group_input_ids = input_ids[batch_group_indices] # select outputs of beams of current group only next_token_logits = outputs.logits[batch_group_indices, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * group_size, vocab_size) vocab_size = next_token_scores.shape[-1] next_token_scores_processed = logits_processor( group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx ) next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1) next_token_scores = next_token_scores.expand_as(next_token_scores_processed) if output_scores: processed_score[batch_group_indices] = next_token_scores_processed # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) next_indices = torch_int_div(next_tokens, vocab_size) next_tokens = next_tokens % vocab_size # stateless process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None beam_outputs = beam_scorer.process( group_input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=process_beam_indices, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] if return_dict_in_generate and output_scores: beam_indices[beam_group_idx] = tuple( beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0])) ) input_ids[batch_group_indices] = group_input_ids[beam_idx] group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) current_tokens[batch_group_indices] = group_input_ids[:, -1] # (beam_idx // group_size) -> batch_idx # (beam_idx % group_size) -> offset of idx inside the group reordering_indices[batch_group_indices] = ( num_beams * torch_int_div(beam_idx, group_size) + group_start_idx + (beam_idx % group_size) ) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (processed_score,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=final_beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def constrained_beam_search( self, input_ids: torch.LongTensor, constrained_beam_scorer: ConstrainedBeamSearchScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **constrained beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. constrained_beam_scorer (`ConstrainedBeamSearchScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of [`ConstrainedBeamSearchScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation_utilsBeamSearchDecoderOnlyOutput`], [`~generation_utils.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation_utils.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation_utils.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... ConstrainedBeamSearchScorer, ... PhrasalConstraint, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> constraint_str = "Sie" >>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token >>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] >>> # instantiate beam scorer >>> beam_scorer = ConstrainedBeamSearchScorer( ... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.constrained_beam_search( ... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt sind Sie?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(constrained_beam_scorer._beam_hyps) num_beams = constrained_beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) scores_for_all_vocab = next_token_scores.clone() # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = (next_tokens / vocab_size).long() next_tokens = next_tokens % vocab_size # stateless beam_outputs = constrained_beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, scores_for_all_vocab, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) # increase cur_len cur_len = cur_len + 1 if constrained_beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = constrained_beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] The provided code snippet includes necessary dependencies for implementing the `get_parameter_dtype` function. Write a Python function `def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"])` to solve the following problem: Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found. Here is the function: def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): """ Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found. """ last_dtype = None for t in parameter.parameters(): last_dtype = t.dtype if t.is_floating_point(): return t.dtype if last_dtype is not None: # if no floating dtype was found return whatever the first dtype is return last_dtype else: # For nn.DataParallel compatibility in PyTorch > 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) last_tuple = None for tuple in gen: last_tuple = tuple if tuple[1].is_floating_point(): return tuple[1].dtype # fallback to the last dtype return last_tuple[1].dtype
Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found.
10,729
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core The provided code snippet includes necessary dependencies for implementing the `get_state_dict_float_dtype` function. Write a Python function `def get_state_dict_float_dtype(state_dict)` to solve the following problem: Returns the first found floating dtype in `state_dict` or asserts if none were found. Here is the function: def get_state_dict_float_dtype(state_dict): """ Returns the first found floating dtype in `state_dict` or asserts if none were found. """ for t in state_dict.values(): if t.is_floating_point(): return t.dtype raise ValueError("couldn't find any floating point dtypes in state_dict")
Returns the first found floating dtype in `state_dict` or asserts if none were found.
10,730
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core The provided code snippet includes necessary dependencies for implementing the `get_state_dict_dtype` function. Write a Python function `def get_state_dict_dtype(state_dict)` to solve the following problem: Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype. Here is the function: def get_state_dict_dtype(state_dict): """ Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype. """ for t in state_dict.values(): if t.is_floating_point(): return t.dtype # if no floating dtype was found return whatever the first dtype is else: return next(state_dict.values()).dtype
Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype.
10,731
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: def dtype_byte_size(dtype): """ Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: ```py >>> dtype_byte_size(torch.float32) 4 ``` """ if dtype == torch.bool: return 1 / 8 bit_search = re.search(r"[^\d](\d+)$", str(dtype)) if bit_search is None: raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") bit_size = int(bit_search.groups()[0]) return bit_size // 8 WEIGHTS_NAME = "pytorch_model.bin" The provided code snippet includes necessary dependencies for implementing the `shard_checkpoint` function. Write a Python function `def shard_checkpoint( state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME )` to solve the following problem: Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): The name of the model save file. Here is the function: def shard_checkpoint( state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME ): """ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): The name of the model save file. """ max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = {} current_block_size = 0 total_size = 0 for key, weight in state_dict.items(): weight_size = weight.numel() * dtype_byte_size(weight.dtype) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = {} current_block_size = 0 current_block[key] = weight current_block_size += weight_size total_size += weight_size # Add the last block sharded_state_dicts.append(current_block) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin") shard_file = shard_file.replace( ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" ) shards[shard_file] = shard for key in shard.keys(): weight_map[key] = shard_file # Add the metadata metadata = {"total_size": total_size} index = {"metadata": metadata, "weight_map": weight_map} return shards, index
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): The name of the model save file.
10,732
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: def load_state_dict(checkpoint_file: Union[str, os.PathLike]): """ Reads a PyTorch checkpoint file, returning properly formatted errors if they arise. """ if checkpoint_file.endswith(".safetensors") and is_safetensors_available(): # Check format of the archive with safe_open(checkpoint_file, framework="pt") as f: metadata = f.metadata() if metadata.get("format") not in ["pt", "tf", "flax"]: raise OSError( f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " "you save your model with the `save_pretrained` method." ) elif metadata["format"] != "pt": raise NotImplementedError( f"Conversion from a {metadata['format']} safetensors archive to PyTorch is not implemented yet." ) return safe_load_file(checkpoint_file) try: return torch.load(checkpoint_file, map_location="cpu") except Exception as e: try: with open(checkpoint_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError( f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " "model. Make sure you have saved the model properly." ) from e except (UnicodeDecodeError, ValueError): raise OSError( f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' " f"at '{checkpoint_file}'. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True." ) WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" The provided code snippet includes necessary dependencies for implementing the `load_sharded_checkpoint` function. Write a Python function `def load_sharded_checkpoint(model, folder, strict=True)` to solve the following problem: This is the same as [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) but for a sharded checkpoint. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`torch.nn.Module`): The model in which to load the checkpoint. folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. strict (`bool`, *optional`, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields - `missing_keys` is a list of str containing the missing keys - `unexpected_keys` is a list of str containing the unexpected keys Here is the function: def load_sharded_checkpoint(model, folder, strict=True): """ This is the same as [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) but for a sharded checkpoint. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`torch.nn.Module`): The model in which to load the checkpoint. folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. strict (`bool`, *optional`, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields - `missing_keys` is a list of str containing the missing keys - `unexpected_keys` is a list of str containing the unexpected keys """ # Load the index index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) if not os.path.isfile(index_file): raise ValueError(f"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.") with open(index_file, "r", encoding="utf-8") as f: index = json.load(f) shard_files = list(set(index["weight_map"].values())) # If strict=True, error before loading any of the state dicts. loaded_keys = index["weight_map"].keys() model_keys = model.state_dict().keys() missing_keys = [key for key in model_keys if key not in loaded_keys] unexpected_keys = [key for key in loaded_keys if key not in model_keys] if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" if len(missing_keys) > 0: str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) error_message += f"\nMissing key(s): {str_missing_keys}." if len(unexpected_keys) > 0: str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) error_message += f"\nMissing key(s): {str_unexpected_keys}." raise RuntimeError(error_message) for shard_file in shard_files: state_dict = torch.load(os.path.join(folder, shard_file)) model.load_state_dict(state_dict, strict=False) # Make sure memory is fred before we load the next state dict. del state_dict gc.collect() # Return the same thing as PyTorch load_state_dict function. return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys)
This is the same as [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) but for a sharded checkpoint. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`torch.nn.Module`): The model in which to load the checkpoint. folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. strict (`bool`, *optional`, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields - `missing_keys` is a list of str containing the missing keys - `unexpected_keys` is a list of str containing the unexpected keys
10,733
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: def is_deepspeed_zero3_enabled(): if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: return _hf_deepspeed_config_weak_ref().is_zero3() else: return False def _load_state_dict_into_model(model_to_load, state_dict, start_prefix): # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata error_msgs = [] # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module: nn.Module, state_dict, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) # Parameters of module and children will start with prefix. We can exit early if there are none in this # state_dict if len([key for key in state_dict if key.startswith(prefix)]) > 0: if is_deepspeed_zero3_enabled(): import deepspeed # In sharded models, each shard has only part of the full state_dict, so only gather # parameters that are in the current state_dict. named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] if len(params_to_gather) > 0: # because zero3 puts placeholders in model params, this context # manager gathers (unpartitions) the params of the current layer, then loads from # the state dict and then re-partitions them again with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): if torch.distributed.get_rank() == 0: module._load_from_state_dict(*args) else: module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: load(child, state_dict, prefix + name + ".") load(model_to_load, state_dict, prefix=start_prefix) # Delete `state_dict` so it could be collected by GC earlier. Note that `state_dict` is a copy of the argument, so # it's safe to delete it. del state_dict return error_msgs
null
10,734
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: def find_submodule_and_param_name(model, long_key, start_prefix): """ A helper util to find the last sub-module and the param/buffer name. If `start_prefix` is supplied it'll be removed from the start of the key """ if len(start_prefix) > 0 and long_key.startswith(start_prefix): long_key = ".".join(long_key.split(".")[1:]) split_key = long_key.split(".") submodule = model while len(split_key) > 1: if hasattr(submodule, split_key[0]): submodule = getattr(submodule, split_key[0]) del split_key[0] else: submodule = None break if submodule == model: submodule = None return submodule, split_key[0] def require_version_core(requirement): """require_version wrapper which emits a core-specific hint on failure""" hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(requirement, hint) The provided code snippet includes necessary dependencies for implementing the `_move_model_to_meta` function. Write a Python function `def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix)` to solve the following problem: Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params. `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in `bert.pooler.dense.weight` Here is the function: def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix): """ Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params. `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in `bert.pooler.dense.weight` """ # meta device was added in pt=1.9 require_version_core("torch>=1.9") # dematerialize param storage for keys that are going to be replaced by state_dict, by # putting those on the meta device for k in loaded_state_dict_keys: submodule, param_name = find_submodule_and_param_name(model, k, start_prefix) if submodule is not None: # selectively switch to the meta device only those params/buffers that will # be next replaced from state_dict. This a complex way to do p.to_("meta") # since we have no in-place to_ for tensors. new_val = getattr(submodule, param_name) if isinstance(new_val, torch.nn.Parameter): # isinstance returns False for Params on meta device, so switch after the check new_val = torch.nn.Parameter(new_val.to("meta")) else: new_val = new_val.to("meta") setattr(submodule, param_name, new_val)
Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params. `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in `bert.pooler.dense.weight`
10,735
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core try: from torch.nn import Identity except ImportError: def set_module_8bit_tensor_to_device(module, tensor_name, device, value=None): """ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the class `Int8Params` from `bitsandbytes`. Args: module (`torch.nn.Module`): The module in which the tensor we want to move lives. tensor_name (`str`): The full name of the parameter/buffer. device (`int`, `str` or `torch.device`): The device on which to set the tensor. value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any other device). """ # Recurse if needed if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") is_buffer = tensor_name in module._buffers old_value = getattr(module, tensor_name) if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") if is_buffer: has_fp16_weights = None else: has_fp16_weights = getattr(module._parameters[tensor_name], "has_fp16_weights", None) if has_fp16_weights is not None: param = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: new_value = old_value.to(device) elif isinstance(value, torch.Tensor): new_value = value.to("cpu") if value.dtype == torch.int8: raise ValueError( "You cannot load weights that are saved in int8 using `load_in_8bit=True`, make sure you are", " using `load_in_8bit=True` on float32/float16/bfloat16 weights.", ) else: new_value = torch.tensor(value, device="cpu") new_value = bnb.nn.Int8Params(new_value, requires_grad=False, has_fp16_weights=has_fp16_weights).to(device) module._parameters[tensor_name] = new_value else: if value is None: new_value = old_value.to(device) elif isinstance(value, torch.Tensor): new_value = value.to(device) else: new_value = torch.tensor(value, device=device) if is_buffer: module._buffers[tensor_name] = new_value else: new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad) module._parameters[tensor_name] = new_value The provided code snippet includes necessary dependencies for implementing the `_load_state_dict_into_meta_model` function. Write a Python function `def _load_state_dict_into_meta_model( model, state_dict, loaded_state_dict_keys, # left for now but could be removed, see below start_prefix, expected_keys, device_map=None, offload_folder=None, offload_index=None, state_dict_folder=None, state_dict_index=None, dtype=None, load_in_8bit=False, )` to solve the following problem: This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its params on a `meta` device. It replaces the model params with the data from the `state_dict`, while moving the params back to the normal device, but only for `loaded_state_dict_keys`. `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in `bert.pooler.dense.weight` Here is the function: def _load_state_dict_into_meta_model( model, state_dict, loaded_state_dict_keys, # left for now but could be removed, see below start_prefix, expected_keys, device_map=None, offload_folder=None, offload_index=None, state_dict_folder=None, state_dict_index=None, dtype=None, load_in_8bit=False, ): """ This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its params on a `meta` device. It replaces the model params with the data from the `state_dict`, while moving the params back to the normal device, but only for `loaded_state_dict_keys`. `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in `bert.pooler.dense.weight` """ # XXX: remaining features to implement to be fully compatible with _load_state_dict_into_model # - deepspeed zero 3 support # - need to copy metadata if any - see _load_state_dict_into_model # - handling error_msgs - mimicking the error handling in module._load_from_state_dict() # - Is there a situation where some keys aren't in `loaded_state_dict_keys` and in which case # they won't get loaded. if load_in_8bit: from .utils.bitsandbytes import set_module_8bit_tensor_to_device error_msgs = [] old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) for param_name, param in state_dict.items(): # First part of the test is always true as load_state_dict_keys always contains state_dict keys. if param_name not in loaded_state_dict_keys or param_name not in expected_keys: continue if param_name.startswith(start_prefix): param_name = param_name[len(start_prefix) :] module_name = param_name # We convert floating dtypes to the `dtype` passed.We want to keep the buffers/params # in int/uint/bool and not cast them. if dtype is not None and torch.is_floating_point(param): param = param.to(dtype) if device_map is None: param_device = "cpu" else: # find next higher level module that is defined in device_map: # bert.lm_head.weight -> bert.lm_head -> bert -> '' while len(module_name) > 0 and module_name not in device_map: module_name = ".".join(module_name.split(".")[:-1]) if module_name == "" and "" not in device_map: # TODO: group all errors and raise at the end. raise ValueError(f"{param_name} doesn't have any device set.") param_device = device_map[module_name] if param_device == "disk": offload_index = offload_weight(param, param_name, offload_folder, offload_index) elif param_device == "cpu" and state_dict_index is not None: state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index) elif not load_in_8bit: set_module_tensor_to_device(model, param_name, param_device, value=param) else: set_module_8bit_tensor_to_device(model, param_name, param_device, value=param) return error_msgs, offload_index, state_dict_index
This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its params on a `meta` device. It replaces the model params with the data from the `state_dict`, while moving the params back to the normal device, but only for `loaded_state_dict_keys`. `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in `bert.pooler.dense.weight`
10,736
import gc import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .generation_utils import GenerationMixin from .pytorch_utils import ( # noqa: F401 Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_conv1d_layer, prune_layer, prune_linear_layer, ) from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, is_remote_url, is_safetensors_available, logging, replace_return_docstrings, ) from .utils.versions import require_version_core The provided code snippet includes necessary dependencies for implementing the `unwrap_model` function. Write a Python function `def unwrap_model(model: nn.Module) -> nn.Module` to solve the following problem: Recursively unwraps a model from potential containers (as used in distributed training). Args: model (`torch.nn.Module`): The model to unwrap. Here is the function: def unwrap_model(model: nn.Module) -> nn.Module: """ Recursively unwraps a model from potential containers (as used in distributed training). Args: model (`torch.nn.Module`): The model to unwrap. """ # since there could be multiple levels of wrapping, unwrap recursively if hasattr(model, "module"): return unwrap_model(model.module) else: return model
Recursively unwraps a model from potential containers (as used in distributed training). Args: model (`torch.nn.Module`): The model to unwrap.
10,737
import inspect import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import torch import torch.distributed as dist from torch import nn from flexgen.timer import timers from .generation_beam_constraints import Constraint, DisjunctiveConstraint, PhrasalConstraint from .generation_beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer from .generation_logits_process import ( EncoderNoRepeatNGramLogitsProcessor, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, ForceTokensLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, ) from .generation_stopping_criteria import ( MaxLengthCriteria, MaxTimeCriteria, StoppingCriteria, StoppingCriteriaList, validate_stopping_criteria, ) from .modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput from .models.auto import ( MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, ) from .pytorch_utils import torch_int_div from .utils import ModelOutput, logging class TopPLogitsWarper(LogitsWarper): """ [`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): top_p = float(top_p) if top_p < 0 or top_p > 1.0: raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: sorted_logits, sorted_indices = torch.sort(scores, descending=False) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p) if self.min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class TopKLogitsWarper(LogitsWarper): r""" [`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = top_k self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: top_k = min(max(self.top_k, self.min_tokens_to_keep), scores.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None] scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores The provided code snippet includes necessary dependencies for implementing the `top_k_top_p_filtering` function. Write a Python function `def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor` to solve the following problem: Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 Here is the function: def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor: """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) if 0 <= top_p <= 1.0: logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) return logits
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
10,738
import inspect import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import torch import torch.distributed as dist from torch import nn from flexgen.timer import timers from .generation_beam_constraints import Constraint, DisjunctiveConstraint, PhrasalConstraint from .generation_beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer from .generation_logits_process import ( EncoderNoRepeatNGramLogitsProcessor, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, ForceTokensLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, ) from .generation_stopping_criteria import ( MaxLengthCriteria, MaxTimeCriteria, StoppingCriteria, StoppingCriteriaList, validate_stopping_criteria, ) from .modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput from .models.auto import ( MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, ) from .pytorch_utils import torch_int_div from .utils import ModelOutput, logging The provided code snippet includes necessary dependencies for implementing the `_ranking_fast` function. Write a Python function `def _ranking_fast( context_hidden: torch.FloatTensor, next_hidden: torch.FloatTensor, next_top_k_probs: torch.FloatTensor, alpha: float, beam_width: int, ) -> torch.FloatTensor` to solve the following problem: Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each row in the batch. Here is the function: def _ranking_fast( context_hidden: torch.FloatTensor, next_hidden: torch.FloatTensor, next_top_k_probs: torch.FloatTensor, alpha: float, beam_width: int, ) -> torch.FloatTensor: """ Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each row in the batch. """ norm_context_hidden = context_hidden / context_hidden.norm(dim=2, keepdim=True) norm_next_hidden = next_hidden / next_hidden.norm(dim=2, keepdim=True) cosine_matrix = torch.matmul(norm_context_hidden, norm_next_hidden.transpose(1, 2)).squeeze(-1) # [B*K, S] degeneration_penalty, _ = torch.max(cosine_matrix, dim=-1) # [B*K] next_top_k_probs = next_top_k_probs.view(-1) # [B*K] contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K] _, selected_idx = contrastive_score.max(dim=-1) # [B] return selected_idx
Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each row in the batch.
10,739
import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union, overload from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import PaddingStrategy, TensorType, add_end_docstrings, logging def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `char` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False The provided code snippet includes necessary dependencies for implementing the `_is_end_of_word` function. Write a Python function `def _is_end_of_word(text)` to solve the following problem: Checks whether the last character in text is one of a punctuation, control or whitespace character. Here is the function: def _is_end_of_word(text): """Checks whether the last character in text is one of a punctuation, control or whitespace character.""" last_char = text[-1] return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
Checks whether the last character in text is one of a punctuation, control or whitespace character.
10,740
import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union, overload from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import PaddingStrategy, TensorType, add_end_docstrings, logging def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `char` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False The provided code snippet includes necessary dependencies for implementing the `_is_start_of_word` function. Write a Python function `def _is_start_of_word(text)` to solve the following problem: Checks whether the first character in text is one of a punctuation, control or whitespace character. Here is the function: def _is_start_of_word(text): """Checks whether the first character in text is one of a punctuation, control or whitespace character.""" first_char = text[0] return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
Checks whether the first character in text is one of a punctuation, control or whitespace character.
10,741
import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union, overload from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import PaddingStrategy, TensorType, add_end_docstrings, logging The provided code snippet includes necessary dependencies for implementing the `_insert_one_token_to_ordered_list` function. Write a Python function `def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str)` to solve the following problem: Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted. Here is the function: def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str): """ Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted. """ insertion_idx = bisect.bisect_left(token_list, new_token) # Checks if new_token is already in the ordered token_list if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token: # new_token is in token_list, don't add return else: token_list.insert(insertion_idx, new_token)
Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
10,742
import copy import json import os import re import warnings from typing import Any, Dict, List, Optional, Tuple, Union from packaging import version from . import __version__ from .dynamic_module_utils import custom_object_save from .utils import ( CONFIG_NAME, PushToHubMixin, cached_file, copy_func, download_url, extract_commit_hash, is_remote_url, is_torch_available, logging, ) _re_configuration_file = re.compile(r"config\.(.*)\.json") CONFIG_NAME = "config.json" The provided code snippet includes necessary dependencies for implementing the `get_configuration_file` function. Write a Python function `def get_configuration_file(configuration_files: List[str]) -> str` to solve the following problem: Get the configuration file to use for this version of transformers. Args: configuration_files (`List[str]`): The list of available configuration files. Returns: `str`: The configuration file to use. Here is the function: def get_configuration_file(configuration_files: List[str]) -> str: """ Get the configuration file to use for this version of transformers. Args: configuration_files (`List[str]`): The list of available configuration files. Returns: `str`: The configuration file to use. """ configuration_files_map = {} for file_name in configuration_files: search = _re_configuration_file.search(file_name) if search is not None: v = search.groups()[0] configuration_files_map[v] = file_name available_versions = sorted(configuration_files_map.keys()) # Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions. configuration_file = CONFIG_NAME transformers_version = version.parse(__version__) for v in available_versions: if version.parse(v) <= transformers_version: configuration_file = configuration_files_map[v] else: # No point going further since the versions are sorted. break return configuration_file
Get the configuration file to use for this version of transformers. Args: configuration_files (`List[str]`): The list of available configuration files. Returns: `str`: The configuration file to use.
10,743
import os from pickle import UnpicklingError from typing import Dict, Tuple import numpy as np import jax import jax.numpy as jnp import transformers from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from .utils import logging logger = logging.get_logger(__name__) def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model): # convert pytorch tensor to numpy pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} model_prefix = flax_model.base_model_prefix random_flax_state_dict = flatten_dict(flax_model.params) flax_state_dict = {} load_model_with_head_into_base_model = (model_prefix not in flax_model.params) and ( model_prefix in set([k.split(".")[0] for k in pt_state_dict.keys()]) ) load_base_model_into_model_with_head = (model_prefix in flax_model.params) and ( model_prefix not in set([k.split(".")[0] for k in pt_state_dict.keys()]) ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): pt_tuple_key = tuple(pt_key.split(".")) # remove base model prefix if necessary has_base_model_prefix = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: pt_tuple_key = pt_tuple_key[1:] # Correctly rename weight parameters flax_key, flax_tensor = rename_key_and_reshape_tensor( pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix ) # add model prefix if necessary require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: flax_key = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown flax_state_dict[flax_key] = jnp.asarray(flax_tensor) return unflatten_dict(flax_state_dict) def convert_pytorch_sharded_state_dict_to_flax(shard_filenames, flax_model): import torch # Load the index flax_state_dict = {} for shard_file in shard_filenames: # load using msgpack utils pt_state_dict = torch.load(shard_file) pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} model_prefix = flax_model.base_model_prefix random_flax_state_dict = flatten_dict(flax_model.params) load_model_with_head_into_base_model = (model_prefix not in flax_model.params) and ( model_prefix in set([k.split(".")[0] for k in pt_state_dict.keys()]) ) load_base_model_into_model_with_head = (model_prefix in flax_model.params) and ( model_prefix not in set([k.split(".")[0] for k in pt_state_dict.keys()]) ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): pt_tuple_key = tuple(pt_key.split(".")) # remove base model prefix if necessary has_base_model_prefix = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: pt_tuple_key = pt_tuple_key[1:] # Correctly rename weight parameters flax_key, flax_tensor = rename_key_and_reshape_tensor( pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix ) # add model prefix if necessary require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: flax_key = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown flax_state_dict[flax_key] = jnp.asarray(flax_tensor) return unflatten_dict(flax_state_dict) The provided code snippet includes necessary dependencies for implementing the `load_pytorch_checkpoint_in_flax_state_dict` function. Write a Python function `def load_pytorch_checkpoint_in_flax_state_dict( flax_model, pytorch_checkpoint_path, is_sharded, allow_missing_keys=False )` to solve the following problem: Load pytorch checkpoints in a flax model Here is the function: def load_pytorch_checkpoint_in_flax_state_dict( flax_model, pytorch_checkpoint_path, is_sharded, allow_missing_keys=False ): """Load pytorch checkpoints in a flax model""" try: import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise if not is_sharded: pt_path = os.path.abspath(pytorch_checkpoint_path) logger.info(f"Loading PyTorch weights from {pt_path}") pt_state_dict = torch.load(pt_path, map_location="cpu") logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters.") flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files flax_state_dict = convert_pytorch_sharded_state_dict_to_flax(pytorch_checkpoint_path, flax_model) return flax_state_dict
Load pytorch checkpoints in a flax model
10,744
import os from pickle import UnpicklingError from typing import Dict, Tuple import numpy as np import jax import jax.numpy as jnp import transformers from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from .utils import logging logger = logging.get_logger(__name__) def load_flax_weights_in_pytorch_model(pt_model, flax_state): """Load flax checkpoints in a PyTorch model""" try: import torch # noqa: F401 except ImportError: logger.error( "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise # check if we have bf16 weights is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() if any(is_type_bf16): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) flax_state = jax.tree_util.tree_map( lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state ) flax_state_dict = flatten_dict(flax_state) pt_model_dict = pt_model.state_dict() load_model_with_head_into_base_model = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in set([k.split(".")[0] for k in pt_model_dict.keys()]) ) load_base_model_into_model_with_head = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in set([k.split(".")[0] for k in pt_model_dict.keys()]) ) # keep track of unexpected & missing keys unexpected_keys = [] missing_keys = set(pt_model_dict.keys()) for flax_key_tuple, flax_tensor in flax_state_dict.items(): has_base_model_prefix = flax_key_tuple[0] == pt_model.base_model_prefix require_base_model_prefix = ".".join((pt_model.base_model_prefix,) + flax_key_tuple) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: flax_key_tuple = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: flax_key_tuple = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(flax_key_tuple) not in pt_model_dict: # conv layer flax_key_tuple = flax_key_tuple[:-1] + ("weight",) flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1)) elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple) not in pt_model_dict: # linear layer flax_key_tuple = flax_key_tuple[:-1] + ("weight",) flax_tensor = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: flax_key_tuple = flax_key_tuple[:-1] + ("weight",) flax_key = ".".join(flax_key_tuple) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor pt_model_dict[flax_key] = torch.from_numpy(flax_tensor) # remove from missing keys missing_keys.remove(flax_key) else: # weight is not expected by PyTorch model unexpected_keys.append(flax_key) pt_model.load_state_dict(pt_model_dict) # re-transform missing_keys to list missing_keys = list(missing_keys) if len(unexpected_keys) > 0: logger.warning( "Some weights of the Flax model were not used when initializing the PyTorch model" f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" " FlaxBertForSequenceClassification model)." ) else: logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" " use it for predictions and inference." ) else: logger.warning( f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" "If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {pt_model.__class__.__name__} for predictions without further training." ) return pt_model The provided code snippet includes necessary dependencies for implementing the `load_flax_checkpoint_in_pytorch_model` function. Write a Python function `def load_flax_checkpoint_in_pytorch_model(model, flax_checkpoint_path)` to solve the following problem: Load flax checkpoints in a PyTorch model Here is the function: def load_flax_checkpoint_in_pytorch_model(model, flax_checkpoint_path): """Load flax checkpoints in a PyTorch model""" flax_checkpoint_path = os.path.abspath(flax_checkpoint_path) logger.info(f"Loading Flax weights from {flax_checkpoint_path}") # import correct flax class flax_cls = getattr(transformers, "Flax" + model.__class__.__name__) # load flax weight dict with open(flax_checkpoint_path, "rb") as state_f: try: flax_state_dict = from_bytes(flax_cls, state_f.read()) except UnpicklingError: raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. ") return load_flax_weights_in_pytorch_model(model, flax_state_dict)
Load flax checkpoints in a PyTorch model
10,745
import argparse import torch from transformers import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert from transformers.utils import logging def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path): # Initialise PyTorch model config = AlbertConfig.from_json_file(albert_config_file) print(f"Building PyTorch model from configuration: {config}") model = AlbertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_albert(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path)
null
10,746
import math import os from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_albert` function. Write a Python function `def load_tf_weights_in_albert(model, config, tf_checkpoint_path)` to solve the following problem: Load tf checkpoints in a pytorch model. Here is the function: def load_tf_weights_in_albert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): print(name) for name, array in zip(names, arrays): original_name = name # If saved from the TF HUB module name = name.replace("module/", "") # Renaming and simplifying name = name.replace("ffn_1", "ffn") name = name.replace("bert/", "albert/") name = name.replace("attention_1", "attention") name = name.replace("transform/", "") name = name.replace("LayerNorm_1", "full_layer_layer_norm") name = name.replace("LayerNorm", "attention/LayerNorm") name = name.replace("transformer/", "") # The feed forward layer had an 'intermediate' step which has been abstracted away name = name.replace("intermediate/dense/", "") name = name.replace("ffn/intermediate/output/dense/", "ffn_output/") # ALBERT attention was split between self and output which have been abstracted away name = name.replace("/output/", "/") name = name.replace("/self/", "/") # The pooler is a linear layer name = name.replace("pooler/dense", "pooler") # The classifier was simplified to predictions from cls/predictions name = name.replace("cls/predictions", "predictions") name = name.replace("predictions/attention", "predictions") # Naming was changed to be more explicit name = name.replace("embeddings/attention", "embeddings") name = name.replace("inner_group_", "albert_layers/") name = name.replace("group_", "albert_layer_groups/") # Classifier if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name): name = "classifier/" + name # No ALBERT model currently handles the next sentence prediction task if "seq_relationship" in name: name = name.replace("seq_relationship/output_", "sop_classifier/classifier/") name = name.replace("weights", "weight") name = name.split("/") # Ignore the gradients applied by the LAMB/ADAM optimizers. if ( "adam_m" in name or "adam_v" in name or "AdamWeightDecayOptimizer" in name or "AdamWeightDecayOptimizer_1" in name or "global_step" in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise print(f"Initialize PyTorch weight {name} from {original_name}") pointer.data = torch.from_numpy(array) return model
Load tf checkpoints in a pytorch model.
10,747
from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( DUMMY_INPUTS, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) from ...modeling_tf_outputs import ( TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutputWithPast, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSharedEmbeddings, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import logging from .configuration_gptj import GPTJConfig def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor: inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32) sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32) sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp) out = tf.concat((sin, cos), axis=1) return out
null
10,748
from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( DUMMY_INPUTS, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) from ...modeling_tf_outputs import ( TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutputWithPast, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSharedEmbeddings, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import logging from .configuration_gptj import GPTJConfig def rotate_every_two(x: tf.Tensor) -> tf.Tensor: rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1) new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])] rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape) return rotate_half_tensor def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor: sin_pos, cos_pos = sincos sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3) cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3) return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
null
10,749
from functools import partial from typing import Optional, Tuple import numpy as np import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gptj import GPTJConfig def create_sinusoidal_positions(num_pos, dim): inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim)) sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32") sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp) sentinel = dim // 2 + dim % 2 out = np.zeros((num_pos, dim)) out[:, 0:sentinel] = sin out[:, sentinel:] = cos return jnp.array(out)
null
10,750
from functools import partial from typing import Optional, Tuple import numpy as np import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gptj import GPTJConfig def rotate_every_two(tensor): rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1) rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,)) return rotate_half_tensor def apply_rotary_pos_emb(tensor, sincos): sin_pos, cos_pos = sincos sin_pos = sin_pos[:, :, None, :].repeat(2, 3) cos_pos = cos_pos[:, :, None, :].repeat(2, 3) return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
null
10,751
from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_gptj import GPTJConfig def fixed_pos_embedding(x, seq_dim=1, seq_len=None): dim = x.shape[-1] if seq_len is None: seq_len = x.shape[seq_dim] inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim)) sinusoid_inp = ( torch.einsum("i , j -> i j", torch.arange(seq_len, dtype=torch.float), inv_freq).to(x.device).float() ) return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
null
10,752
from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_gptj import GPTJConfig def rotate_every_two(x): def duplicate_interleave(m): def apply_rotary_pos_emb(x, sincos, offset=0): sin, cos = map(lambda t: duplicate_interleave(t)[None, offset : x.shape[1] + offset, None, :], sincos) # einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2) return (x * cos) + (rotate_every_two(x) * sin)
null
10,753
import warnings from dataclasses import dataclass from functools import partial from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import is_scipy_available from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_fnet import FNetConfig def _two_dim_matmul(x, matrix_dim_one, matrix_dim_two): """Applies 2D matrix multiplication to 3D input arrays.""" seq_length = x.shape[1] matrix_dim_one = matrix_dim_one[:seq_length, :seq_length] x = x.type(torch.complex64) return torch.einsum("bij,jk,ni->bnk", x, matrix_dim_two, matrix_dim_one) def two_dim_matmul(x, matrix_dim_one, matrix_dim_two): return _two_dim_matmul(x, matrix_dim_one, matrix_dim_two)
null
10,754
import warnings from dataclasses import dataclass from functools import partial from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import is_scipy_available from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_fnet import FNetConfig The provided code snippet includes necessary dependencies for implementing the `fftn` function. Write a Python function `def fftn(x)` to solve the following problem: Applies n-dimensional Fast Fourier Transform (FFT) to input array. Args: x: Input n-dimensional array. Returns: n-dimensional Fourier transform of input n-dimensional array. Here is the function: def fftn(x): """ Applies n-dimensional Fast Fourier Transform (FFT) to input array. Args: x: Input n-dimensional array. Returns: n-dimensional Fourier transform of input n-dimensional array. """ out = x for axis in reversed(range(x.ndim)[1:]): # We don't need to apply FFT to last axis out = torch.fft.fft(out, axis=axis) return out
Applies n-dimensional Fast Fourier Transform (FFT) to input array. Args: x: Input n-dimensional array. Returns: n-dimensional Fourier transform of input n-dimensional array.
10,755
import argparse import torch from flax.training.checkpoints import restore_checkpoint from transformers import FNetConfig, FNetForPreTraining from transformers.utils import logging def convert_flax_checkpoint_to_pytorch(flax_checkpoint_path, fnet_config_file, save_path): # Initialise PyTorch model config = FNetConfig.from_json_file(fnet_config_file) print(f"Building PyTorch model from configuration: {config}") fnet_pretraining_model = FNetForPreTraining(config) checkpoint_dict = restore_checkpoint(flax_checkpoint_path, None) pretrained_model_params = checkpoint_dict["target"] # Embeddings # Position IDs state_dict = fnet_pretraining_model.state_dict() position_ids = state_dict["fnet.embeddings.position_ids"] new_state_dict = {"fnet.embeddings.position_ids": position_ids} # Embedding Layers new_state_dict["fnet.embeddings.word_embeddings.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["word"]["embedding"] ) new_state_dict["fnet.embeddings.position_embeddings.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["position"]["embedding"][0] ) new_state_dict["fnet.embeddings.token_type_embeddings.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["type"]["embedding"] ) new_state_dict["fnet.embeddings.projection.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["kernel"] ).T new_state_dict["fnet.embeddings.projection.bias"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["bias"] ) new_state_dict["fnet.embeddings.LayerNorm.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["layer_norm"]["scale"] ) new_state_dict["fnet.embeddings.LayerNorm.bias"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["layer_norm"]["bias"] ) # Encoder Layers for layer in range(config.num_hidden_layers): new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.weight"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["scale"] ) new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.bias"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["bias"] ) new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.weight"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["kernel"] ).T new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.bias"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["bias"] ) new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.weight"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["kernel"] ).T new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.bias"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["bias"] ) new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.weight"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["scale"] ) new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.bias"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["bias"] ) # Pooler Layers new_state_dict["fnet.pooler.dense.weight"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["kernel"]).T new_state_dict["fnet.pooler.dense.bias"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["bias"]) # Masked LM Layers new_state_dict["cls.predictions.transform.dense.weight"] = torch.tensor( pretrained_model_params["predictions_dense"]["kernel"] ).T new_state_dict["cls.predictions.transform.dense.bias"] = torch.tensor( pretrained_model_params["predictions_dense"]["bias"] ) new_state_dict["cls.predictions.transform.LayerNorm.weight"] = torch.tensor( pretrained_model_params["predictions_layer_norm"]["scale"] ) new_state_dict["cls.predictions.transform.LayerNorm.bias"] = torch.tensor( pretrained_model_params["predictions_layer_norm"]["bias"] ) new_state_dict["cls.predictions.decoder.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["word"]["embedding"] ) new_state_dict["cls.predictions.decoder.bias"] = torch.tensor( pretrained_model_params["predictions_output"]["output_bias"] ) new_state_dict["cls.predictions.bias"] = torch.tensor(pretrained_model_params["predictions_output"]["output_bias"]) # Seq Relationship Layers new_state_dict["cls.seq_relationship.weight"] = torch.tensor( pretrained_model_params["classification"]["output_kernel"] ) new_state_dict["cls.seq_relationship.bias"] = torch.tensor( pretrained_model_params["classification"]["output_bias"] ) # Load State Dict fnet_pretraining_model.load_state_dict(new_state_dict) # Save PreTrained print(f"Saving pretrained model to {save_path}") fnet_pretraining_model.save_pretrained(save_path)
null