text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNetV1 model configuration"""
from collections import OrderedDict
from collections.abc import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class MobileNetV1Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a
MobileNetV1 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileNetV1
[google/mobilenet_v1_1.0_224](https://huggingface.co/google/mobilenet_v1_1.0_224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
depth_multiplier (`float`, *optional*, defaults to 1.0):
Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
channels. This is sometimes also called "alpha" or "width multiplier".
min_depth (`int`, *optional*, defaults to 8):
All layers will have at least this many channels.
hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
tf_padding (`bool`, *optional*, defaults to `True`):
Whether to use TensorFlow padding rules on the convolution layers.
classifier_dropout_prob (`float`, *optional*, defaults to 0.999):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 0.001):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import MobileNetV1Config, MobileNetV1Model
>>> # Initializing a "mobilenet_v1_1.0_224" style configuration
>>> configuration = MobileNetV1Config()
>>> # Initializing a model from the "mobilenet_v1_1.0_224" style configuration
>>> model = MobileNetV1Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mobilenet_v1"
def __init__(
self,
num_channels=3,
image_size=224,
depth_multiplier=1.0,
min_depth=8,
hidden_act="relu6",
tf_padding=True,
classifier_dropout_prob=0.999,
initializer_range=0.02,
layer_norm_eps=0.001,
**kwargs,
):
super().__init__(**kwargs)
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero.")
self.num_channels = num_channels
self.image_size = image_size
self.depth_multiplier = depth_multiplier
self.min_depth = min_depth
self.hidden_act = hidden_act
self.tf_padding = tf_padding
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
class MobileNetV1OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.11")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})])
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})])
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
@property
def atol_for_validation(self) -> float:
return 1e-4
__all__ = ["MobileNetV1Config", "MobileNetV1OnnxConfig"]
| transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py/0 | {
"file_path": "transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py",
"repo_id": "transformers",
"token_count": 1795
} | 463 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert MusicGen checkpoints from the original repository."""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
T5EncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
EXPECTED_MISSING_KEYS = ["model.decoder.embed_positions.weights"]
def rename_keys(name):
if "emb" in name:
name = name.replace("emb", "model.decoder.embed_tokens")
if "transformer" in name:
name = name.replace("transformer", "model.decoder")
if "cross_attention" in name:
name = name.replace("cross_attention", "encoder_attn")
if "linear1" in name:
name = name.replace("linear1", "fc1")
if "linear2" in name:
name = name.replace("linear2", "fc2")
if "norm1" in name:
name = name.replace("norm1", "self_attn_layer_norm")
if "norm_cross" in name:
name = name.replace("norm_cross", "encoder_attn_layer_norm")
if "norm2" in name:
name = name.replace("norm2", "final_layer_norm")
if "out_norm" in name:
name = name.replace("out_norm", "model.decoder.layer_norm")
if "linears" in name:
name = name.replace("linears", "lm_heads")
if "condition_provider.conditioners.description.output_proj" in name:
name = name.replace("condition_provider.conditioners.description.output_proj", "enc_to_dec_proj")
return name
def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> tuple[dict, dict]:
"""Function that takes the fairseq Musicgen state dict and renames it according to the HF
module names. It further partitions the state dict into the decoder (LM) state dict, and that for the
encoder-decoder projection."""
keys = list(state_dict.keys())
enc_dec_proj_state_dict = {}
for key in keys:
val = state_dict.pop(key)
key = rename_keys(key)
if "in_proj_weight" in key:
# split fused qkv proj
state_dict[key.replace("in_proj_weight", "q_proj.weight")] = val[:hidden_size, :]
state_dict[key.replace("in_proj_weight", "k_proj.weight")] = val[hidden_size : 2 * hidden_size, :]
state_dict[key.replace("in_proj_weight", "v_proj.weight")] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
enc_dec_proj_state_dict[key[len("enc_to_dec_proj.") :]] = val
else:
state_dict[key] = val
return state_dict, enc_dec_proj_state_dict
def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenDecoderConfig:
if checkpoint.endswith("small"):
# default config values
hidden_size = 1024
num_hidden_layers = 24
num_attention_heads = 16
elif checkpoint.endswith("medium"):
hidden_size = 1536
num_hidden_layers = 48
num_attention_heads = 24
elif checkpoint.endswith("large"):
hidden_size = 2048
num_hidden_layers = 48
num_attention_heads = 32
else:
raise ValueError(
"Checkpoint should be one of `['small', 'medium', 'large']` for the mono checkpoints, "
"`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` "
f"for the stereo checkpoints, or a custom checkpoint with the checkpoint size as a suffix, got {checkpoint}."
)
if "stereo" in checkpoint:
audio_channels = 2
num_codebooks = 8
else:
audio_channels = 1
num_codebooks = 4
config = MusicgenDecoderConfig(
hidden_size=hidden_size,
ffn_dim=hidden_size * 4,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_codebooks=num_codebooks,
audio_channels=audio_channels,
)
return config
@torch.no_grad()
def convert_musicgen_checkpoint(
checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", safe_serialization=False
):
fairseq_model = MusicGen.get_pretrained(checkpoint, device=device)
decoder_config = decoder_config_from_checkpoint(checkpoint)
decoder_state_dict = fairseq_model.lm.state_dict()
decoder_state_dict, enc_dec_proj_state_dict = rename_state_dict(
decoder_state_dict, hidden_size=decoder_config.hidden_size
)
text_encoder = T5EncoderModel.from_pretrained("google-t5/t5-base")
audio_encoder = EncodecModel.from_pretrained("facebook/encodec_32khz")
decoder = MusicgenForCausalLM(decoder_config).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
missing_keys, unexpected_keys = decoder.load_state_dict(decoder_state_dict, strict=False)
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(key)
if len(missing_keys) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}")
if len(unexpected_keys) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}")
# init the composite model
model = MusicgenForConditionalGeneration(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder)
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict)
# check we can do a forward pass
input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1)
decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1)
with torch.no_grad():
logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits
if logits.shape != (2 * decoder_config.num_codebooks, 1, 2048):
raise ValueError("Incorrect shape for logits")
# now construct the processor
tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
feature_extractor = AutoFeatureExtractor.from_pretrained(
"facebook/encodec_32khz", padding_side="left", feature_size=decoder_config.audio_channels
)
processor = MusicgenProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# set the appropriate bos/pad token ids
model.generation_config.decoder_start_token_id = 2048
model.generation_config.pad_token_id = 2048
# set other default generation config params
model.generation_config.max_length = int(30 * audio_encoder.config.frame_rate)
model.generation_config.do_sample = True
model.generation_config.guidance_scale = 3.0
if pytorch_dump_folder is not None:
Path(pytorch_dump_folder).mkdir(exist_ok=True)
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}")
model.save_pretrained(pytorch_dump_folder, safe_serialization=safe_serialization)
processor.save_pretrained(pytorch_dump_folder)
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}")
model.push_to_hub(repo_id, safe_serialization=safe_serialization)
processor.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: "
"`['small', 'medium', 'large']` for the mono checkpoints, "
"`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` "
"for the stereo checkpoints, or a custom checkpoint with the checkpoint size as a suffix.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
parser.add_argument(
"--safe_serialization",
action="store_true",
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).",
)
args = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| transformers/src/transformers/models/musicgen/convert_musicgen_transformers.py/0 | {
"file_path": "transformers/src/transformers/models/musicgen/convert_musicgen_transformers.py",
"repo_id": "transformers",
"token_count": 3635
} | 464 |
# coding=utf-8
# Copyright 2024
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for model MyT5."""
import json
import os
import warnings
from collections import defaultdict
from typing import Optional, Union
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "byte_maps.json"}
class ByteRewriter:
"""
Byte rewriter class for MyT5 tokenizer.
This class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules.
Args:
rewriting_rules (`str` or `dict[str, str]`):
A path to a json file containing the rewriting rules or a dictionary containing the rewriting rules.
"""
LEAF = "[LEAF]"
def __init__(self, rewriting_rules: Union[str, dict[str, str]]):
if isinstance(rewriting_rules, str):
with open(rewriting_rules, "r") as f:
rewriting_rules = json.load(f)
elif not isinstance(rewriting_rules, dict):
raise TypeError(
f"rewriting_rules should be either a path to json file or a dict, got {type(rewriting_rules)}"
)
self.hash_tree = self.construct_hash_tree(rewriting_rules)
reverse_rewriting_rules = {v: k for k, v in rewriting_rules.items()}
self.reverse_hash_tree = self.construct_hash_tree(reverse_rewriting_rules)
def add_leaf(self, hash_tree: dict[str, Union[dict, list[str]]], byte_in_sequence: str, byte_out_sequence: str):
"""
Add a leaf with the output byte sequence to the hash tree.
"""
byte_in_list = byte_in_sequence.split(" ")
byte_out_list = byte_out_sequence.split(" ")
tree_pointer = hash_tree
for b in byte_in_list:
if b not in tree_pointer:
tree_pointer[b] = {}
tree_pointer = tree_pointer[b]
tree_pointer[self.LEAF] = byte_out_list
def construct_hash_tree(self, rewriting_rules: dict[str, str]) -> dict[str, Union[dict, list[str]]]:
"""
Construct a hash tree for rewritten byte sequences.
"""
hash_tree = defaultdict(dict)
for b in (f"{x:02x}" for x in range(256)):
hash_tree[b][self.LEAF] = [b]
for in_sequence, out_sequence in rewriting_rules.items():
self.add_leaf(hash_tree, in_sequence, out_sequence)
return hash_tree
def search_hash_tree(self, byte_sequence: list[str]) -> Union[None, list[str]]:
"""
Search the hash tree and return the rewritten byte sequence if found.
"""
tree_pointer = self.hash_tree
for b in byte_sequence:
if b in tree_pointer:
tree_pointer = tree_pointer[b]
else:
return None
return tree_pointer[self.LEAF]
def rewrite_bytes(self, in_bytes: list[str], reverse=False) -> list[str]:
"""
Rewrite a sequence of bytes using the hash tree.
Args:
in_bytes (`list[str]`): A list of bytes to be rewritten.
reverse (`bool`): If True, decoding is performed with the reverse hash tree.
Returns:
`list[str]`: The rewritten byte sequence.
"""
out_bytes = []
b_start = 0
b_end = 0
while b_start < len(in_bytes):
tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree
for j in range(b_start, len(in_bytes)):
b = in_bytes[j]
if b in tree_pointer:
tree_pointer = tree_pointer[b]
elif j == b_start:
cur_leaf = [b]
b_end = j
break
else:
break
if self.LEAF in tree_pointer:
cur_leaf = tree_pointer[self.LEAF]
b_end = j
out_bytes.extend(cur_leaf)
b_start = b_end + 1
return out_bytes
class MyT5Tokenizer(PreTrainedTokenizer):
"""
Construct a MyT5 tokenizer.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`): The file containing the byte rewriting rules.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 125):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
like in ByT5 preprocessing see
[here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
"""
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=125,
additional_special_tokens=None,
**kwargs,
) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0:
# Check that we have the right number of extra_id special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to MyT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens"
)
pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token
eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token
# unk token needs to be in the vocab with correct index
self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token}
self.offset = len(self._added_tokens_decoder)
self._utf_vocab_size = 2**8 # utf is 8 bits
# Load byte maps
self.byte_maps = json.load(open(vocab_file, "r"))
self.decompose_rewriter = ByteRewriter(self.byte_maps["decompose_map"])
self.merge_rewriter = ByteRewriter(self.byte_maps["merge_map"])
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
extra_ids=0,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
@property
def vocab_size(self):
return self._utf_vocab_size
# Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_vocab
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
vocab.update(self.added_tokens_encoder)
return vocab
# Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# normal case: some special tokens
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]:
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added."
)
return token_ids
else:
return token_ids + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
# Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
def _tokenize(self, text: str, **kwargs) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words.
Represents tokens in two character hex format"""
tokens = [f"{i:02x}" for i in text.encode("utf-8")]
tokens = self.morphological_encode(tokens)
return tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if len(token) != 2:
token_id = None
else:
token_id = int(token, 16) + self.offset
return token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = f"{index - self.offset:02x}"
return token
def morphological_encode(self, indices: list[str]) -> list[str]:
# Decompose and merge morphological sequences
indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=False)
indices = self.merge_rewriter.rewrite_bytes(indices, reverse=False)
return indices
def morphological_decode(self, indices: list[str]) -> list[str]:
# Demerge and compose morphological sequences
indices = self.merge_rewriter.rewrite_bytes(indices, reverse=True)
indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=True)
return indices
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
bstring = b""
out_tokens = []
for token in tokens:
if token in self.added_tokens_decoder:
out_tokens.append(self.added_tokens_decoder[token])
elif token in self.added_tokens_encoder:
out_tokens.append(token)
else:
out_tokens.append(token)
out_tokens = self.morphological_decode(out_tokens)
_added_tokens = set(self.added_tokens_decoder.values()) | set(self.added_tokens_encoder)
for token in out_tokens:
if token in _added_tokens:
bstring += bytes(token, "utf-8")
else:
bstring += bytes.fromhex(token)
string = bstring.decode("utf-8", errors="ignore")
return string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
writer.write(json.dumps(self.byte_maps, indent=2, ensure_ascii=False))
return (vocab_file,)
__all__ = ["MyT5Tokenizer"]
| transformers/src/transformers/models/myt5/tokenization_myt5.py/0 | {
"file_path": "transformers/src/transformers/models/myt5/tokenization_myt5.py",
"repo_id": "transformers",
"token_count": 6817
} | 465 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Nougat.
"""
from typing import Optional, Union
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput, TruncationStrategy
from ...processing_utils import ProcessorMixin
from ...utils import PaddingStrategy, TensorType
class NougatProcessor(ProcessorMixin):
r"""
Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor.
[`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the
[`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information.
Args:
image_processor ([`NougatImageProcessor`]):
An instance of [`NougatImageProcessor`]. The image processor is a required input.
tokenizer ([`NougatTokenizerFast`]):
An instance of [`NougatTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(
self,
images=None,
text=None,
do_crop_margin: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: "PILImageResampling" = None, # noqa: F821
do_thumbnail: Optional[bool] = None,
do_align_long_axis: Optional[bool] = None,
do_pad: Optional[bool] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[Union[int, float]] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
input_data_format: Optional[Union[str, "ChannelDimension"]] = None, # noqa: F821
text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
text_pair_target: Optional[
Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]
] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
):
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
inputs = self.image_processor(
images,
do_crop_margin=do_crop_margin,
do_resize=do_resize,
size=size,
resample=resample,
do_thumbnail=do_thumbnail,
do_align_long_axis=do_align_long_axis,
do_pad=do_pad,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
return_tensors=return_tensors,
data_format=data_format,
input_data_format=input_data_format,
)
if text is not None:
encodings = self.tokenizer(
text,
text_pair=text_pair,
text_target=text_target,
text_pair_target=text_pair_target,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
)
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def post_process_generation(self, *args, **kwargs):
"""
This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`].
Please refer to the docstring of this method for more information.
"""
return self.tokenizer.post_process_generation(*args, **kwargs)
__all__ = ["NougatProcessor"]
| transformers/src/transformers/models/nougat/processing_nougat.py/0 | {
"file_path": "transformers/src/transformers/models/nougat/processing_nougat.py",
"repo_id": "transformers",
"token_count": 2733
} | 466 |
# coding=utf-8
# Copyright 2023 IBM & Hugging Face. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PatchTST model."""
import math
from dataclasses import dataclass
from typing import Callable, Optional, Union
import torch
from torch import nn
from ...activations import ACT2CLS
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_patchtst import PatchTSTConfig
logger = logging.get_logger(__name__)
# Copied from transformers.models.bart.modeling_bart.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
head_mask: Optional[torch.Tensor] = None,
**kwargs,
):
if scaling is None:
scaling = query.size(-1) ** -0.5
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if head_mask is not None:
attn_weights = attn_weights * head_mask.view(1, -1, 1, 1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Attention with Wav2Vec2->PatchTST
class PatchTSTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[PatchTSTConfig] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
current_states = key_value_states if is_cross_attention else hidden_states
key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
head_mask=layer_head_mask,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights, None
class PatchTSTBatchNorm(nn.Module):
"""
Compute batch normalization over the sequence length (time) dimension.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps)
def forward(self, inputs: torch.Tensor):
"""
Parameters:
inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`):
input for Batch norm calculation
Returns:
`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`
"""
output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length)
output = self.batchnorm(output)
return output.transpose(1, 2)
def random_masking(
inputs: torch.Tensor,
mask_ratio: float,
unmasked_channel_indices: Optional[list] = None,
channel_consistent_masking: bool = False,
mask_value: int = 0,
):
"""random_masking: Mask the input considering the control variables.
Args:
inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`):
The input tensor to mask.
mask_ratio (`float`):
Masking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1.
unmasked_channel_indices (list, *optional*):
Indices of channels that will not be masked.
channel_consistent_masking (bool, *optional*, defaults to `False`):
When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary
across channels.
mask_value (int, *optional*, defaults to 0):
Define the value of masked patches for pretraining.
Returns:
`tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x
n]
"""
if mask_ratio < 0 or mask_ratio >= 1:
raise ValueError(f"Mask ratio {mask_ratio} has to be between 0 and 1.")
batch_size, num_channels, sequence_length, num_features = inputs.shape
device = inputs.device
len_keep = int(sequence_length * (1 - mask_ratio))
if channel_consistent_masking:
noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L
noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time
else:
# noise in [0, 1], bs x num_channels x L
noise = torch.rand(batch_size, num_channels, sequence_length, device=device)
# mask: [bs x num_channels x num_patch]
mask = torch.ones(batch_size, num_channels, sequence_length, device=device)
mask[:, :, :len_keep] = 0
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L]
mask = torch.gather(mask, dim=-1, index=ids_restore)
mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length]
if unmasked_channel_indices is not None:
mask[:, unmasked_channel_indices, :, :] = 0
inputs_mask = inputs.masked_fill(mask.bool(), mask_value)
return inputs_mask, mask[..., 0]
def forecast_masking(
inputs: torch.Tensor,
num_forecast_mask_patches: Union[list, int],
unmasked_channel_indices: Optional[list] = None,
mask_value: int = 0,
):
"""Forecast masking that masks the last K patches where K is from the num_forecast_mask_patches.
If num_forecast_mask_patches is a list, samples in the batch will be randomly masked by numbers defined in the list.
Parameters:
inputs (`torch.Tensor`):
Input of shape `(bs, num_channels, num_patch, patch_length)`
num_forecast_mask_patches (`list`):
Number of patches to be masked at the end of each batch sample. e.g. 4 or [3, 5].
unmasked_channel_indices (`list`, *optional*):
Indices of channels that are not masked.
mask_value (`int`, *optional*, defaults to 0):
Values in the masked patches will be filled by `mask_value`.
Returns:
`tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs,
num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)`
"""
if isinstance(num_forecast_mask_patches, int):
num_forecast_mask_patches = [num_forecast_mask_patches]
forecast_mask_ratios = [1 for _ in num_forecast_mask_patches]
batch_size, num_channels, sequence_length, num_features = inputs.shape
mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device)
t_list = []
total_length = 0
total_ratio = sum(forecast_mask_ratios)
for patch_length, ratio in zip(num_forecast_mask_patches, forecast_mask_ratios):
if patch_length <= 0 or patch_length >= sequence_length:
raise ValueError(
f"num_forecast_mask_patches {patch_length} should be greater than 0 and less than total patches."
)
temp_len = int(batch_size * ratio / total_ratio)
t_list.append([patch_length, ratio, temp_len])
total_length += temp_len
t_list = sorted(t_list, key=lambda x: x[2])
if total_length < batch_size:
t_list[0][2] = t_list[0][2] + (batch_size - total_length)
elif total_length > batch_size:
t_list[-1][2] = t_list[-1][2] + (total_length - batch_size)
batch1 = 0
for patch_len, _, temp_len in t_list:
batch2 = batch1 + temp_len
mask[batch1:batch2, :, -patch_len:] = 1
batch1 = batch2
perm = torch.randperm(mask.shape[0])
mask = mask[perm]
mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len]
if unmasked_channel_indices is not None:
mask[:, unmasked_channel_indices, :, :] = 0
inputs_mask = inputs.masked_fill(mask.bool(), mask_value)
return inputs_mask, mask[..., 0]
class PatchTSTPatchify(nn.Module):
"""
A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.sequence_length = config.context_length
self.patch_length = config.patch_length
self.patch_stride = config.patch_stride
if self.sequence_length <= self.patch_length:
raise ValueError(
f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})"
)
# get the number of patches
self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1
new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)
self.sequence_start = self.sequence_length - new_sequence_length
def forward(self, past_values: torch.Tensor):
"""
Parameters:
past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*):
Input for patchification
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
sequence_length = past_values.shape[-2]
if sequence_length != self.sequence_length:
raise ValueError(
f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})."
)
# output: [bs x new_sequence_length x num_channels]
output = past_values[:, self.sequence_start :, :]
# output: [bs x num_patches x num_input_channels x patch_length]
output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)
# output: [bs x num_input_channels x num_patches x patch_length]
output = output.transpose(-2, -3).contiguous()
return output
class PatchTSTMasking(nn.Module):
"""
Class to perform random or forecast masking.
Parameters:
config (`PatchTSTConfig`): model config
Returns:
x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.random_mask_ratio = config.random_mask_ratio
self.channel_consistent_masking = config.channel_consistent_masking
self.mask_type = config.mask_type
self.num_forecast_mask_patches = config.num_forecast_mask_patches
self.unmasked_channel_indices = config.unmasked_channel_indices
self.mask_value = config.mask_value
if self.unmasked_channel_indices is not None:
self.unmasked_channel_indices = sorted(self.unmasked_channel_indices)
def forward(self, patch_input: torch.Tensor):
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input
Return:
masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
if self.mask_type == "random":
masked_input, mask = random_masking(
inputs=patch_input,
mask_ratio=self.random_mask_ratio,
unmasked_channel_indices=self.unmasked_channel_indices,
channel_consistent_masking=self.channel_consistent_masking,
mask_value=self.mask_value,
)
elif self.mask_type == "forecast":
masked_input, mask = forecast_masking(
inputs=patch_input,
num_forecast_mask_patches=self.num_forecast_mask_patches,
unmasked_channel_indices=self.unmasked_channel_indices,
mask_value=self.mask_value,
)
else:
raise ValueError(f"Invalid mask type {self.mask_type}.")
# mask: [bs x num_input_channels x num_patch]
mask = mask.bool()
return masked_input, mask
class PatchTSTEncoderLayer(nn.Module):
"""
PatchTST encoder layer
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.channel_attention = config.channel_attention
# Multi-Head attention
self.self_attn = PatchTSTAttention(
embed_dim=config.d_model,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
config=config,
)
# Add & Norm of the sublayer 1
self.dropout_path1 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity()
if config.norm_type == "batchnorm":
self.norm_sublayer1 = PatchTSTBatchNorm(config)
elif config.norm_type == "layernorm":
self.norm_sublayer1 = nn.LayerNorm(config.d_model, eps=config.norm_eps)
else:
raise ValueError(f"{config.norm_type} is not a supported norm layer type.")
# Add & Norm of the sublayer 2
if self.channel_attention:
self.dropout_path2 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity()
if config.norm_type == "batchnorm":
self.norm_sublayer2 = PatchTSTBatchNorm(config)
elif config.norm_type == "layernorm":
self.norm_sublayer2 = nn.LayerNorm(config.d_model, eps=config.norm_eps)
else:
raise ValueError(f"{config.norm_type} is not a supported norm layer type.")
# Position-wise Feed-Forward
self.ff = nn.Sequential(
nn.Linear(config.d_model, config.ffn_dim, bias=config.bias),
ACT2CLS[config.activation_function](),
nn.Dropout(config.ff_dropout) if config.ff_dropout > 0 else nn.Identity(),
nn.Linear(config.ffn_dim, config.d_model, bias=config.bias),
)
# Add & Norm of sublayer 3
self.dropout_path3 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity()
if config.norm_type == "batchnorm":
self.norm_sublayer3 = PatchTSTBatchNorm(config)
elif config.norm_type == "layernorm":
self.norm_sublayer3 = nn.LayerNorm(config.d_model, eps=config.norm_eps)
else:
raise ValueError(f"{config.norm_type} is not a supported norm layer type.")
self.pre_norm = config.pre_norm
def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool] = None):
"""
Parameters:
hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*):
Past values of the time series
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
Return:
`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`
"""
batch_size, num_input_channels, sequence_length, d_model = hidden_state.shape
# First sublayer: attention across time
# hidden_states: [(bs*num_channels) x sequence_length x d_model]
hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model)
if self.pre_norm:
## Norm and Multi-Head attention and Add residual connection
attn_output, attn_weights, _ = self.self_attn(
hidden_states=self.norm_sublayer1(hidden_state), output_attentions=output_attentions
)
# Add: residual connection with residual dropout
hidden_state = hidden_state + self.dropout_path1(attn_output)
else:
## Multi-Head attention and Add residual connection and Norm - Standard Transformer from BERT
attn_output, attn_weights, _ = self.self_attn(
hidden_states=hidden_state, output_attentions=output_attentions
)
# hidden_states: [(bs*num_channels) x sequence_length x d_model]
hidden_state = self.norm_sublayer1(hidden_state + self.dropout_path1(attn_output))
# hidden_state: [bs x num_channels x sequence_length x d_model]
hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model)
# second sublayer: attention across variable at any given time
if self.channel_attention:
# hidden_state: [bs x sequence_length x num_channels x d_model]
hidden_state = hidden_state.transpose(2, 1).contiguous()
# hidden_state: [(bs*sequence_length) x num_channels x d_model]
hidden_state = hidden_state.view(batch_size * sequence_length, num_input_channels, d_model)
if self.pre_norm:
## Norm and Multi-Head attention and Add residual connection
attn_output, channel_attn_weights, _ = self.self_attn(
hidden_states=self.norm_sublayer2(hidden_state), output_attentions=output_attentions
)
# Add: residual connection with residual dropout
hidden_state = hidden_state + self.dropout_path2(attn_output)
else:
## Multi-Head attention and Add residual connection and Norm
attn_output, channel_attn_weights, _ = self.self_attn(
hidden_states=hidden_state, output_attentions=output_attentions
)
# hidden_states: [(bs*sequence_length) x num_channels x d_model]
hidden_state = self.norm_sublayer2(hidden_state + self.dropout_path2(attn_output))
# Reshape hidden state
# hidden_state: [bs x sequence_length x num_channels x d_model]
hidden_state = hidden_state.reshape(batch_size, sequence_length, num_input_channels, d_model)
# hidden_state: [bs x num_channels x sequence_length x d_model]
hidden_state = hidden_state.transpose(1, 2).contiguous()
# Third sublayer: mixing across hidden
# hidden_state: [(batch_size*num_channels) x sequence_length x d_model]
hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model)
if self.pre_norm:
## Norm and Position-wise Feed-Forward and Add residual connection
# Add: residual connection with residual dropout
hidden_state = hidden_state + self.dropout_path3(self.ff(self.norm_sublayer3(hidden_state)))
else:
## Position-wise Feed-Forward and Add residual connection and Norm
# Add: residual connection with residual dropout
hidden_state = self.norm_sublayer3(hidden_state + self.dropout_path3(self.ff(hidden_state)))
# [bs x num_channels x sequence_length x d_model]
hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model)
outputs = (hidden_state,)
if output_attentions:
outputs += (attn_weights, channel_attn_weights) if self.channel_attention else (attn_weights,)
return outputs
@auto_docstring
class PatchTSTPreTrainedModel(PreTrainedModel):
config: PatchTSTConfig
base_model_prefix = "model"
main_input_name = "past_values"
supports_gradient_checkpointing = False
def _init_weights(self, module: nn.Module):
"""
Initialize weights
"""
if isinstance(module, PatchTSTPositionalEncoding):
# get the number of patches
num_patches = (
max(self.config.context_length, self.config.patch_length) - self.config.patch_length
) // self.config.patch_stride + 1
# initialize cls_token
if self.config.use_cls_token:
nn.init.normal_(module.cls_token, std=0.02)
num_patches += 1
# initialize positional encoding
module.position_enc = module._init_pe(self.config, num_patches)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, PatchTSTBatchNorm):
module.batchnorm.bias.data.zero_()
module.batchnorm.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (PatchTSTEncoder)):
module.gradient_checkpointing = value
class PatchTSTEmbedding(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.num_input_channels = config.num_input_channels
self.share_embedding = config.share_embedding
# Input encoding: projection of feature vectors onto a d-dim vector space
if self.share_embedding:
self.input_embedding = nn.Linear(config.patch_length, config.d_model)
else:
self.input_embedding = nn.ModuleList()
for _ in range(config.num_input_channels):
self.input_embedding.append(nn.Linear(config.patch_length, config.d_model))
def forward(self, patch_input: torch.Tensor):
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input for embedding
return:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)`
"""
# Input encoding
num_input_channels = patch_input.shape[1]
if num_input_channels != self.num_input_channels:
raise ValueError(
f"The defined number of input channels ({self.num_input_channels}) in the config "
f"has to be the same as the number of channels in the batch input ({num_input_channels})"
)
if self.share_embedding:
embeddings = self.input_embedding(patch_input) # x: [bs x num_channels x num_patches x d_model]
else:
embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)]
embeddings = torch.stack(embeddings, dim=1)
return embeddings
class PatchTSTPositionalEncoding(nn.Module):
"""
Class for positional encoding
"""
def __init__(self, config: PatchTSTConfig, num_patches: int):
super().__init__()
self.use_cls_token = config.use_cls_token
self.num_input_channels = config.num_input_channels
if config.use_cls_token:
# cls_token: [1 x num_input_channels x 1 x d_model]
self.cls_token = nn.Parameter(torch.zeros(1, 1, 1, config.d_model))
num_patches += 1
# positional encoding: [num_patches x d_model]
self.position_enc = self._init_pe(config, num_patches)
# Positional dropout
self.positional_dropout = (
nn.Dropout(config.positional_dropout) if config.positional_dropout > 0 else nn.Identity()
)
@staticmethod
def _init_pe(config: PatchTSTConfig, num_patches: int) -> nn.Parameter:
# Positional encoding
if config.positional_encoding_type == "random":
position_enc = nn.Parameter(torch.randn(num_patches, config.d_model), requires_grad=True)
elif config.positional_encoding_type == "sincos":
position_enc = torch.zeros(num_patches, config.d_model)
position = torch.arange(0, num_patches).unsqueeze(1)
div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model))
position_enc[:, 0::2] = torch.sin(position * div_term)
position_enc[:, 1::2] = torch.cos(position * div_term)
position_enc = position_enc - position_enc.mean()
position_enc = position_enc / (position_enc.std() * 10)
position_enc = nn.Parameter(position_enc, requires_grad=False)
else:
raise ValueError(
f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'."
)
return position_enc
def forward(self, patch_input: torch.Tensor):
if self.use_cls_token:
# patch_input: [bs x num_channels x num_patches x d_model]
patch_input = self.positional_dropout(patch_input + self.position_enc[1:, :])
# append cls token where cls_token: [1 x num_channels x 1 x d_model]
cls_token = self.cls_token + self.position_enc[:1, :]
# get the same copy of cls_token for all the samples in batch: [bs x num_channels x 1 x d_model]
cls_tokens = cls_token.expand(patch_input.shape[0], self.num_input_channels, -1, -1)
# hidden_state: [bs x num_channels x (num_patches+1) x d_model]
hidden_state = torch.cat((cls_tokens, patch_input), dim=2)
else:
# hidden_state: [bs x num_channels x num_patches x d_model]
hidden_state = self.positional_dropout(patch_input + self.position_enc)
return hidden_state
class PatchTSTEncoder(PatchTSTPreTrainedModel):
"""
PatchTST Encoder
"""
def __init__(self, config: PatchTSTConfig, num_patches: int):
super().__init__(config)
self.gradient_checkpointing = False
# Input embedding: projection of feature vectors onto a d-dim vector space
self.embedder = PatchTSTEmbedding(config)
# Positional encoding
self.positional_encoder = PatchTSTPositionalEncoding(config, num_patches)
# Encoder
self.layers = nn.ModuleList([PatchTSTEncoderLayer(config) for i in range(config.num_hidden_layers)])
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
patch_input: torch.Tensor,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
) -> BaseModelOutput:
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Past values of the time series
output_hidden_states (bool, optional): Indicates if hidden states should be outputted.
output_attentions (bool, optional): Indicates if attentions should be outputted.
return:
`BaseModelOutput`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# Input embedding
patch_input = self.embedder(patch_input)
# Positional encoding
hidden_state = self.positional_encoder(patch_input)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_state,)
layer_outputs = encoder_layer(hidden_state=hidden_state, output_attentions=output_attentions)
# get hidden state. hidden_state shape is [bs x num_channels x num_patches x d_model]
# or [bs x num_channels x (num_patches+1) x d_model] if use cls_token
hidden_state = layer_outputs[0]
# append attention matrix at each layer
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# return past_values, hidden_states
return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=encoder_states, attentions=all_attentions)
@dataclass
@auto_docstring(
custom_intro="""
Base class for model's outputs, with potential hidden states.
"""
)
class PatchTSTModelOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
mask (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`, *optional*):
Bool masked tensor indicating which patches are masked
loc (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length
scale (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length
patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Patched input to the Transformer
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
mask: Optional[torch.FloatTensor] = None
loc: Optional[torch.FloatTensor] = None
scale: Optional[torch.FloatTensor] = None
patch_input: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`PatchTSTForPretraining`].
"""
)
class PatchTSTForPretrainingOutput(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
prediction_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction outputs of the time series modeling heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`PatchTSTForRegression`].
"""
)
class PatchTSTForRegressionOutput(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Regression outputs of the time series modeling heads.
"""
loss: Optional[torch.FloatTensor] = None
regression_outputs: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`PatchTSTForPrediction`].
"""
)
class PatchTSTForPredictionOutput(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, -1)`):
Prediction outputs of the time series modeling heads.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*)
Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length
scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*)
Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length
"""
loss: Optional[torch.FloatTensor] = None
prediction_outputs: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
loc: Optional[torch.FloatTensor] = None
scale: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`PatchTSTForClassification`].
"""
)
class PatchTSTForClassificationOutput(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Prediction scores of the PatchTST modeling head (scores before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for time series model's predictions outputs that contains the sampled values from the chosen
distribution.
"""
)
class SamplePatchTSTOutput(ModelOutput):
r"""
sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length, num_targets)`):
Sampled values from the chosen distribution.
"""
sequences: Optional[torch.FloatTensor] = None
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll
def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
"""
Computes the negative log likelihood loss from input distribution with respect to target.
"""
return -input.log_prob(target)
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average
def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
"""
Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
Args:
input_tensor (`torch.FloatTensor`):
Input tensor, of which the average must be computed.
weights (`torch.FloatTensor`, *optional*):
Weights tensor, of the same shape as `input_tensor`.
dim (`int`, *optional*):
The dim along which to average `input_tensor`.
Returns:
`torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
"""
if weights is not None:
weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
else:
return input_tensor.mean(dim=dim)
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST
class PatchTSTStdScaler(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return (data - loc) / scale, loc, scale
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST
class PatchTSTMeanScaler(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10
self.default_scale = config.default_scale if hasattr(config, "default_scale") else None
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
# If `default_scale` is provided, we use it, otherwise we use the scale
# of the batch.
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
# apply default scale where there are no observations
scale = torch.where(num_observed > 0, scale, default_scale)
# ensure the scale is at least `self.minimum_scale`
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return scaled_data, torch.zeros_like(scale), scale
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST
class PatchTSTNOPScaler(nn.Module):
"""
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
def forward(
self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor] = None
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
return data, loc, scale
class PatchTSTScaler(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
if config.scaling == "mean" or config.scaling is True:
self.scaler = PatchTSTMeanScaler(config)
elif config.scaling == "std":
self.scaler = PatchTSTStdScaler(config)
else:
self.scaler = PatchTSTNOPScaler(config)
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Input for scaler calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, um_input_channels)`)
"""
data, loc, scale = self.scaler(data, observed_indicator)
return data, loc, scale
@auto_docstring
class PatchTSTModel(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
self.scaler = PatchTSTScaler(config)
self.patchifier = PatchTSTPatchify(config)
self.do_mask_input = config.do_mask_input
# get num_patches information from PatchTSTPatchify
num_patches = self.patchifier.num_patches
if self.do_mask_input:
self.masking = PatchTSTMasking(config)
else:
self.masking = nn.Identity()
self.encoder = PatchTSTEncoder(config, num_patches=num_patches)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
past_values: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
future_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSTModelOutput]:
r"""
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.BoolTensor` of shape `(batch_size, prediction_length, num_input_channels)`, *optional*):
Future target values associated with the `past_values`
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*):
Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTModelOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = PatchTSTModel.from_pretrained("namctin/patchtst_etth1_pretrain")
>>> # during training, one provides both past and future values
>>> outputs = model(
... past_values=batch["past_values"],
... future_values=batch["future_values"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if past_observed_mask is None:
past_observed_mask = torch.ones_like(past_values)
# x: tensor [bs x sequence_length x num_input_channels]
scaled_past_values, loc, scale = self.scaler(past_values, past_observed_mask)
# patched_values: [bs x num_input_channels x num_patches x patch_length] for pretrain
patched_values = self.patchifier(scaled_past_values)
if self.do_mask_input:
masked_values, mask = self.masking(patched_values)
else:
masked_values, mask = self.masking(patched_values), None
encoder_output = self.encoder(
patch_input=masked_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
)
if not return_dict:
outputs = (encoder_output.last_hidden_state, encoder_output.hidden_states, encoder_output.attentions)
outputs = outputs + (mask, loc, scale, patched_values)
return tuple(v for v in outputs if v is not None)
return PatchTSTModelOutput(
last_hidden_state=encoder_output.last_hidden_state,
hidden_states=encoder_output.hidden_states,
attentions=encoder_output.attentions,
mask=mask,
loc=loc,
scale=scale,
patch_input=patched_values,
)
class PatchTSTMaskPretrainHead(nn.Module):
"""
Pretraining head for mask modelling
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
self.linear = nn.Linear(config.d_model, config.patch_length)
self.use_cls_token = config.use_cls_token
def forward(self, embedding: torch.Tensor) -> torch.Tensor:
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True
"""
embedding = self.linear(self.dropout(embedding)) # [bs x num_channels x num_patches x patch_length]
if self.use_cls_token:
embedding = embedding[:, :, 1:, :] # remove the first cls token
return embedding
@auto_docstring(
custom_intro="""
The PatchTST for pretrain model.
"""
)
class PatchTSTForPretraining(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
config.do_mask_input = True
self.model = PatchTSTModel(config=config)
self.head = PatchTSTMaskPretrainHead(config)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
past_values: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSTForPretrainingOutput]:
r"""
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or
`config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTConfig, PatchTSTForPretraining
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # Config for random mask pretraining
>>> config = PatchTSTConfig(
... num_input_channels=7,
... context_length=512,
... patch_length=12,
... stride=12,
... mask_type='random',
... random_mask_ratio=0.4,
... use_cls_token=True,
... )
>>> # Config for forecast mask pretraining
>>> config = PatchTSTConfig(
... num_input_channels=7,
... context_length=512,
... patch_length=12,
... stride=12,
... mask_type='forecast',
... num_forecast_mask_patches=5,
... use_cls_token=True,
... )
>>> model = PatchTSTForPretraining(config)
>>> # during training, one provides both past and future values
>>> outputs = model(past_values=batch["past_values"])
>>> loss = outputs.loss
>>> loss.backward()
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# past_values: [bs x num_channels x num_patches x d_model] or
# [bs x num_channels x (num_patches+1) x d_model] if use cls_token
model_output = self.model(
past_values=past_values,
past_observed_mask=past_observed_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=True,
)
# last_hidden_state: [bs x num_channels x num_patches x patch_length] or
# [bs x num_channels x (num_patches+1) x patch_length] if use cls_token
x_hat = self.head(model_output.last_hidden_state)
# calculate masked_loss
loss = nn.MSELoss(reduction="none")
loss_val = loss(x_hat, model_output.patch_input)
masked_loss = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10)
encoder_states = model_output.hidden_states
if not return_dict:
outputs = (x_hat,) + model_output[1:-4]
outputs = (masked_loss,) + outputs if masked_loss is not None else outputs
return outputs
return PatchTSTForPretrainingOutput(
loss=masked_loss, prediction_output=x_hat, hidden_states=encoder_states, attentions=model_output.attentions
)
class PatchTSTClassificationHead(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
self.flatten = nn.Flatten(start_dim=1)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
self.linear = nn.Linear(config.num_input_channels * config.d_model, config.num_targets)
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, num_targets)`
"""
if self.use_cls_token:
# use the first output token, pooled_embedding: bs x num_channels x d_model
pooled_embedding = embedding[:, :, 0, :]
elif self.pooling_type == "mean":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == "max":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.max(dim=2).values
else:
raise ValueError(f"pooling operator {self.pooling_type} is not implemented yet")
# pooled_embedding: bs x num_channels * d_model
pooled_embedding = self.flatten(pooled_embedding)
# output: bs x n_classes
output = self.linear(self.dropout(pooled_embedding))
return output
@auto_docstring(
custom_intro="""
The PatchTST for classification model.
"""
)
class PatchTSTForClassification(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
# Turn off masking
if config.do_mask_input:
logger.warning("Setting `do_mask_input` parameter to False.")
config.do_mask_input = False
self.model = PatchTSTModel(config)
self.head = PatchTSTClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
target_values: Optional[torch.Tensor] = None,
past_observed_mask: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSTForClassificationOutput]:
r"""
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
target_values (`torch.Tensor`, *optional*):
Labels associates with the `past_values`
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Examples:
```python
>>> from transformers import PatchTSTConfig, PatchTSTForClassification
>>> # classification task with two input channel2 and 3 classes
>>> config = PatchTSTConfig(
... num_input_channels=2,
... num_targets=3,
... context_length=512,
... patch_length=12,
... stride=12,
... use_cls_token=True,
... )
>>> model = PatchTSTForClassification(config=config)
>>> # during inference, one only provides past values
>>> past_values = torch.randn(20, 512, 2)
>>> outputs = model(past_values=past_values)
>>> labels = outputs.prediction_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.model(
past_values=past_values,
past_observed_mask=past_observed_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=True,
)
y_hat = self.head(model_output.last_hidden_state)
loss_val = None
if target_values is not None:
loss = nn.CrossEntropyLoss()
loss_val = loss(y_hat, target_values)
if not return_dict:
outputs = (y_hat,) + model_output[1:-3]
outputs = (loss_val,) + outputs if loss_val is not None else outputs
return outputs
return PatchTSTForClassificationOutput(
loss=loss_val,
prediction_logits=y_hat,
hidden_states=model_output.hidden_states,
attentions=model_output.attentions,
)
@auto_docstring(
custom_intro="""
The PatchTST for regression Model.
"""
)
class PatchTSTPredictionHead(nn.Module):
def __init__(self, config: PatchTSTConfig, num_patches: int, distribution_output=None):
r"""
num_patches (`int`):
The number of patches in the input sequence.
distribution_output (`DistributionOutput`, *optional*):
The distribution output layer for probabilistic forecasting. If None, a linear output layer is used.
"""
super().__init__()
self.share_projection = config.share_projection
self.num_input_channels = config.num_input_channels
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
if self.pooling_type or self.use_cls_token:
head_dim = config.d_model
else:
head_dim = config.d_model * num_patches
if not self.share_projection:
# if each channel has its own head
self.projections = nn.ModuleList()
self.dropouts = nn.ModuleList()
self.flattens = nn.ModuleList()
for i in range(self.num_input_channels):
self.flattens.append(nn.Flatten(start_dim=2))
if distribution_output is None:
# use linear head
self.projections.append(nn.Linear(head_dim, config.prediction_length))
else:
# use distribution head
self.projections.append(distribution_output.get_parameter_projection(head_dim))
self.dropouts.append(nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity())
else:
# all the channels share the same head
self.flatten = nn.Flatten(start_dim=2)
if distribution_output is None:
# use linear head
self.projection = nn.Linear(head_dim, config.prediction_length)
else:
# use distribution head
self.projection = distribution_output.get_parameter_projection(head_dim)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, forecast_len, num_channels)`
"""
if self.use_cls_token:
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding[:, :, 0, :]
else:
if self.pooling_type == "mean":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == "max":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.max(dim=2).values
else:
# pooled_embedding: [bs x num_channels x num_patches x d_model]
pooled_embedding = embedding
if not self.share_projection:
output = []
for i in range(self.num_input_channels):
# pooled_embedding: [bs x (d_model * num_patches)] or [bs x d_model)]
pooled_embedding = self.flattens[i](pooled_embedding[:, i, :])
pooled_embedding = self.dropouts[i](pooled_embedding)
# pooled_embedding: [bs x forecast_len]
# or tuple ([bs x forecast_len], [bs x forecast_len]) if using distribution head
pooled_embedding = self.projections[i](pooled_embedding)
output.append(pooled_embedding)
# output: [bs x num_channels x forecast_len]
output = torch.stack(output, dim=1)
else:
# pooled_embedding: [bs x num_channels x (d_model * num_patches)] or [bs x num_channels x d_model)]
pooled_embedding = self.flatten(pooled_embedding)
pooled_embedding = self.dropout(pooled_embedding)
# output: [bs x num_channels x forecast_len] or
# tuple ([bs x num_channels x forecast_len], [bs x num_channels x forecast_len]) if using distribution head
output = self.projection(pooled_embedding)
if isinstance(output, tuple):
# output: ([bs x forecast_len x num_channels], [bs x forecast_len x num_channels])
output = tuple(z.transpose(2, 1) for z in output)
else:
output = output.transpose(2, 1) # [bs x forecast_len x num_channels]
return output
@auto_docstring(
custom_intro="""
The PatchTST for prediction model.
"""
)
class PatchTSTForPrediction(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
# Turn off masking
if config.do_mask_input:
logger.warning("Setting `do_mask_input` parameter to False.")
config.do_mask_input = False
self.model = PatchTSTModel(config)
if config.loss == "mse":
self.distribution_output = None
else:
if config.distribution_output == "student_t":
self.distribution_output = StudentTOutput(dim=config.prediction_length)
elif config.distribution_output == "normal":
self.distribution_output = NormalOutput(dim=config.prediction_length)
elif config.distribution_output == "negative_binomial":
self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length)
else:
raise ValueError(f"Unknown distribution output {config.distribution_output}")
self.head = PatchTSTPredictionHead(
config, self.model.patchifier.num_patches, distribution_output=self.distribution_output
)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
past_values: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
future_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSTForPredictionOutput]:
r"""
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*):
Future target values associated with the `past_values`
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*):
Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or
`config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTConfig, PatchTSTForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # Prediction task with 7 input channels and prediction length is 96
>>> model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast")
>>> # during training, one provides both past and future values
>>> outputs = model(
... past_values=batch["past_values"],
... future_values=batch["future_values"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values, the model outputs future values
>>> outputs = model(past_values=batch["past_values"])
>>> prediction_outputs = outputs.prediction_outputs
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# get model output
model_output = self.model(
past_values=past_values,
past_observed_mask=past_observed_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=True,
)
# get output head
y_hat = self.head(model_output.last_hidden_state)
loss_val = None
if self.distribution_output:
y_hat_out = y_hat
else:
y_hat_out = y_hat * model_output.scale + model_output.loc
if future_values is not None:
if self.distribution_output:
distribution = self.distribution_output.distribution(
y_hat, loc=model_output.loc, scale=model_output.scale
)
loss_val = nll(distribution, future_values)
# take average of the loss
loss_val = weighted_average(loss_val)
else:
loss = nn.MSELoss(reduction="mean")
loss_val = loss(y_hat_out, future_values)
loc = model_output.loc
scale = model_output.scale
if not return_dict:
outputs = (y_hat_out,) + model_output[1:-1]
outputs = (loss_val,) + outputs if loss_val is not None else outputs
return outputs
return PatchTSTForPredictionOutput(
loss=loss_val,
prediction_outputs=y_hat_out,
hidden_states=model_output.hidden_states,
attentions=model_output.attentions,
loc=loc,
scale=scale,
)
@torch.no_grad()
def generate(
self,
past_values: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
) -> SamplePatchTSTOutput:
"""
Generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, num_input_channels)`
for multivariate predictions.
"""
# get number of samples
num_parallel_samples = self.config.num_parallel_samples
# get model output
outputs = self(
past_values=past_values,
future_values=None,
past_observed_mask=past_observed_mask,
output_hidden_states=False,
)
if self.distribution_output:
# get distribution
distribution = self.distribution_output.distribution(
outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale
)
# get samples: list of [bs x forecast_len x num_channels]
samples = [distribution.sample() for _ in range(num_parallel_samples)]
# samples: [bs x num_samples x forecast_len x num_channels]
samples = torch.stack(samples, dim=1)
else:
samples = outputs.prediction_outputs.unsqueeze(1)
return SamplePatchTSTOutput(sequences=samples)
class PatchTSTRegressionHead(nn.Module):
"""
Regression head
"""
def __init__(self, config: PatchTSTConfig, distribution_output=None):
super().__init__()
self.y_range = config.output_range
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
self.distribution_output = distribution_output
head_dim = config.num_input_channels * config.d_model
self.flatten = nn.Flatten(start_dim=1)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
if distribution_output is None:
self.projection = nn.Linear(head_dim, config.num_targets)
else:
self.projection = distribution_output.get_parameter_projection(head_dim)
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, output_dim)`
"""
if self.use_cls_token:
# use the first output token, pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding[:, :, 0, :]
elif self.pooling_type == "mean":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == "max":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.max(dim=2).values
else:
raise ValueError(f"pooling operator {self.pooling_type} is not implemented yet")
# flatten the input
# pooled_embedding: bs x (num_channels * d_model)
pooled_embedding = self.dropout(self.flatten(pooled_embedding))
# projection
# output: bs x output_dim or a tuple of this shape for distribution head
output = self.projection(pooled_embedding)
# apply sigmoid to bound the output if required
if (self.distribution_output is None) & (self.y_range is not None): # linear head
output = torch.sigmoid(output) * (self.y_range[1] - self.y_range[0]) + self.y_range[0]
return output
@auto_docstring(
custom_intro="""
The PatchTST for regression model.
"""
)
class PatchTSTForRegression(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
# Turn off masking
if config.do_mask_input:
logger.warning("Setting `do_mask_input` parameter to False.")
config.do_mask_input = False
self.model = PatchTSTModel(config)
if config.loss == "mse":
self.distribution_output = None
else:
if config.distribution_output == "student_t":
self.distribution_output = StudentTOutput(dim=config.num_targets)
elif config.distribution_output == "normal":
self.distribution_output = NormalOutput(dim=config.num_targets)
elif config.distribution_output == "negative_binomial":
self.distribution_output = NegativeBinomialOutput(dim=config.num_targets)
else:
raise ValueError(f"Unknown distribution output {config.distribution_output}")
self.head = PatchTSTRegressionHead(config, self.distribution_output)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
target_values: Optional[torch.Tensor] = None,
past_observed_mask: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSTForRegressionOutput]:
r"""
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
target_values (`torch.Tensor` of shape `(bs, num_input_channels)`):
Target values associates with the `past_values`
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Whether or not to return a `ModelOutput` instead of a plain tuple.
Examples:
```python
>>> from transformers import PatchTSTConfig, PatchTSTForRegression
>>> # Regression task with 6 input channels and regress 2 targets
>>> model = PatchTSTForRegression.from_pretrained("namctin/patchtst_etth1_regression")
>>> # during inference, one only provides past values, the model outputs future values
>>> past_values = torch.randn(20, 512, 6)
>>> outputs = model(past_values=past_values)
>>> regression_outputs = outputs.regression_outputs
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.model(
past_values=past_values,
past_observed_mask=past_observed_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=True,
)
# get output head. y_hat is of shape [bs x num_targets] or tuple of this shape
y_hat = self.head(model_output.last_hidden_state)
loss = None
if target_values is not None:
if self.distribution_output:
distribution = self.distribution_output.distribution(y_hat)
# y_hat should be a 2-tuple, each with dimension [bs, num_targets]
y_hat = tuple(item.view(-1, self.config.num_targets) for item in y_hat)
loss = nll(distribution, target_values)
# take average of the loss
loss = weighted_average(loss)
else:
loss = nn.MSELoss(reduction="mean")
loss = loss(y_hat, target_values)
if not return_dict:
# hidden_states, attentions, mask
outputs = (y_hat,) + model_output[1:-3]
outputs = (loss,) + outputs if loss is not None else outputs
return outputs
return PatchTSTForRegressionOutput(
loss=loss,
regression_outputs=y_hat,
hidden_states=model_output.hidden_states,
attentions=model_output.attentions,
)
@torch.no_grad()
def generate(
self,
past_values: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
) -> SamplePatchTSTOutput:
"""
Generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, num_targets)`.
"""
# get number of samples
num_parallel_samples = self.config.num_parallel_samples
# get model output
outputs = self(
past_values=past_values,
target_values=None,
past_observed_mask=past_observed_mask,
output_hidden_states=False,
)
# get distribution
distribution = self.distribution_output.distribution(outputs.regression_outputs)
# get samples: list of [bs x num_targets]
samples = [distribution.sample() for _ in range(num_parallel_samples)]
# samples: [bs x num_samples x num_targets]
samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets)
return SamplePatchTSTOutput(sequences=samples)
__all__ = [
"PatchTSTModel",
"PatchTSTPreTrainedModel",
"PatchTSTForPrediction",
"PatchTSTForPretraining",
"PatchTSTForRegression",
"PatchTSTForClassification",
]
| transformers/src/transformers/models/patchtst/modeling_patchtst.py/0 | {
"file_path": "transformers/src/transformers/models/patchtst/modeling_patchtst.py",
"repo_id": "transformers",
"token_count": 36153
} | 467 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Perceiver."""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from ...utils.import_utils import requires
if is_vision_available():
import PIL
logger = logging.get_logger(__name__)
@requires(backends=("vision",))
class PerceiverImageProcessor(BaseImageProcessor):
r"""
Constructs a Perceiver image processor.
Args:
do_center_crop (`bool`, `optional`, defaults to `True`):
Whether or not to center crop the image. If the input size if smaller than `crop_size` along any edge, the
image will be padded with zeros and then center cropped. Can be overridden by the `do_center_crop`
parameter in the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size when applying center-cropping. Can be overridden by the `crop_size` parameter in the
`preprocess` method.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image to `(size["height"], size["width"])`. Can be overridden by the `do_resize`
parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by the `size` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
in the `preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
crop_size = get_size_dict(crop_size, param_name="crop_size")
size = size if size is not None else {"height": 224, "width": 224}
size = get_size_dict(size)
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def center_crop(
self,
image: np.ndarray,
crop_size: dict[str, int],
size: Optional[int] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] *
min_dim)`. Where `min_dim = min(size["height"], size["width"])`.
If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then
center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
crop_size (`dict[str, int]`):
Desired output size after applying the center crop.
size (`dict[str, int]`, *optional*):
Size of the image after resizing. If not provided, the self.size attribute will be used.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = self.size if size is None else size
size = get_size_dict(size)
crop_size = get_size_dict(crop_size, param_name="crop_size")
height, width = get_image_size(image, channel_dim=input_data_format)
min_dim = min(height, width)
cropped_height = (size["height"] / crop_size["height"]) * min_dim
cropped_width = (size["width"] / crop_size["width"]) * min_dim
return center_crop(
image,
size=(cropped_height, cropped_width),
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image to `crop_size`.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Desired output size after applying the center crop.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_center_crop:
images = [
self.center_crop(image, crop_size, size=size, input_data_format=input_data_format) for image in images
]
if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["PerceiverImageProcessor"]
| transformers/src/transformers/models/perceiver/image_processing_perceiver.py/0 | {
"file_path": "transformers/src/transformers/models/perceiver/image_processing_perceiver.py",
"repo_id": "transformers",
"token_count": 7426
} | 468 |
# Copyright 2025 Microsoft and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Phi4Multimodal
"""
import re
from typing import Optional, Union
from ...audio_utils import AudioInput
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import TextInput
from ...utils import logging
logger = logging.get_logger(__name__)
class Phi4MultimodalProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"audio_kwargs": {
"device": "cpu",
},
}
class Phi4MultimodalProcessor(ProcessorMixin):
r"""
Constructs a Phi4Multimodal processor which raps an image processor, a audio processor, and a GPT tokenizer into a single processor.
[`Phi4MultimodalProcessor`] offers all the functionalities of [`Phi4MultimodalImageProcessorFast`] and [`GPT2Tokenizer`]. See the
[`~Phi4MultimodalProcessor.__call__`] and [`~Phi4MultimodalProcessor.decode`] for more information.
Args:
image_processor (`Phi4MultimodalImageProcessorFast`):
The image processor to use for images.
audio_processor (`Phi4MultimodalFeatureExtractor`):
The audio processor to use for audio inputs.
tokenizer (`GPT2TokenizerFast`):
The tokenizer to use for text.
fake_image_token_pattern (`str`, *optional*, defaults to `r"<\|image_\d+\|>"`):
The fake image token pattern.
fake_audio_token_pattern (`str`, *optional*, defaults to `r"<\|audio_\d+\|>"`):
The fake audio token pattern.
"""
attributes = ["image_processor", "audio_processor", "tokenizer"]
tokenizer_class = "GPT2TokenizerFast"
image_processor_class = "Phi4MultimodalImageProcessorFast"
audio_processor_class = "Phi4MultimodalFeatureExtractor"
def __init__(
self,
image_processor,
audio_processor,
tokenizer,
**kwargs,
):
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
self.audio_token = tokenizer.audio_token
self.audio_token_id = tokenizer.audio_token_id
super().__init__(image_processor, audio_processor, tokenizer, **kwargs)
def __call__(
self,
text: Union[TextInput, list[TextInput]],
images: Optional[ImageInput] = None,
audio: Optional[AudioInput] = None,
**kwargs: Unpack[ProcessingKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forards the `text`
and `kwargs` arguments to GPT2Tokenizer's [`~GPT2Tokenizer.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
Phi4MultimodalImageProcessorFast's [`~Phi4MultimodalImageProcessorFast.__call__`] if `images` is not `None`. Please refer to the doctsring
of the above two methods for more information.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
audio (`list[Union[np.ndarray, torch.Tensor]]`):
List of the audios to be prepared.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- **input_image_embeds** -- Pixel values to be fed to a model.
- **image_sizes** -- List of tuples specifying the size of each image in `input_image_embeds`.
- **image_attention_mask** -- List of attention masks for each image in `input_image_embeds`.
- **input_audio_embeds** -- Audio embeddings to be fed to a model.
- **audio_embed_sizes** -- List of integers specifying the size of each audio in `input_audio_embeds`.
"""
output_kwargs = self._merge_kwargs(Phi4MultimodalProcessorKwargs, self.tokenizer.init_kwargs, **kwargs)
image_kwargs = output_kwargs["images_kwargs"]
audio_kwargs = output_kwargs["audio_kwargs"]
image_inputs = self.image_processor(images, **image_kwargs) if images is not None else {}
audio_inputs = self.audio_processor(audio, **audio_kwargs) if audio is not None else {}
# We pop here for images as we don't need it later
num_img_tokens = image_inputs.pop("num_img_tokens", [])
audio_embed_sizes = audio_inputs.get("audio_embed_sizes", [])
# Replace certain special tokens for compatibility
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
image_token = self.tokenizer.image_token
audio_token = self.tokenizer.audio_token
# Check that the number of special tokens is sound
concatenated_prompt = "".join(text)
if concatenated_prompt.count(image_token) != len(num_img_tokens):
raise ValueError(
"You should add as much image tokens `<|image|>` in your prompt as you pass `images` to the processor. ",
f"Input contains {concatenated_prompt.count(image_token)} tokens != {len(num_img_tokens)} images",
)
if concatenated_prompt.count(audio_token) != len(audio_embed_sizes):
raise ValueError(
"You should add as much audio tokens `<|audio|>` in your prompt as you pass `audios` to the processor. "
f"Input contains {concatenated_prompt.count(audio_token)} tokens != {len(audio_embed_sizes)} audios"
)
# Add appropriate number of image/audio tokens (note that the count of replacement is dynamic)
image_count_iter = iter(num_img_tokens)
audio_count_iter = iter(audio_embed_sizes)
processed_text = [
re.sub(re.escape(image_token), lambda _: image_token * next(image_count_iter), t) for t in text
]
processed_text = [
re.sub(re.escape(audio_token), lambda _: audio_token * next(audio_count_iter), t) for t in processed_text
]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
text_inputs = self.tokenizer(processed_text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(processed_text, text_inputs, modalities=["image"])
# prepare batch feature
data = {
**text_inputs,
**image_inputs,
**audio_inputs,
}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["Phi4MultimodalProcessor"]
| transformers/src/transformers/models/phi4_multimodal/processing_phi4_multimodal.py/0 | {
"file_path": "transformers/src/transformers/models/phi4_multimodal/processing_phi4_multimodal.py",
"repo_id": "transformers",
"token_count": 3181
} | 469 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Pixtral."""
from typing import Optional, Union
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
group_images_by_shape,
reorder_images,
)
from ...image_utils import ImageInput, PILImageResampling, SizeDict
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
is_vision_available,
logging,
)
from .image_processing_pixtral import get_resize_output_image_size
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
if is_torchvision_available():
if is_vision_available():
pass
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
class PixtralFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
"""
patch_size (`dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`):
Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method.
"""
patch_size: Optional[dict[str, int]]
@auto_docstring
class PixtralImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = [0.48145466, 0.4578275, 0.40821073]
image_std = [0.26862954, 0.26130258, 0.27577711]
patch_size = {"height": 16, "width": 16}
size = {"longest_edge": 1024}
default_to_square = True
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = PixtralFastImageProcessorKwargs
model_input_names = ["pixel_values", "image_sizes"]
def __init__(self, **kwargs: Unpack[PixtralFastImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[PixtralFastImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def resize(
self,
image: torch.Tensor,
size: SizeDict,
patch_size: SizeDict,
interpolation: "F.InterpolationMode" = None,
**kwargs,
) -> torch.Tensor:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dict containing the longest possible edge of the image.
patch_size (`SizeDict`):
Patch size used to calculate the size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Resampling filter to use when resiizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if size.longest_edge:
size = (size.longest_edge, size.longest_edge)
elif size.height and size.width:
size = (size.height, size.width)
else:
raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.")
if patch_size.height and patch_size.width:
patch_size = (patch_size.height, patch_size.width)
else:
raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size)
return F.resize(image, size=output_size, interpolation=interpolation, **kwargs)
# Adapted from transformers.models.pixtral.image_processing_pixtral.PixtralImageProcessor._pad_for_batching
def _pad_for_batching(
self,
pixel_values: list[torch.Tensor],
image_sizes: list[list[int]],
):
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`list[torch.Tensor]`):
An array of pixel values of each images of shape (`batch_size`, `channels`, `height`, `width`)
image_sizes (`list[list[int]]`):
A list of sizes for each image in `pixel_values` in (height, width) format.
Returns:
list[`torch.Tensor`]: The padded images.
"""
max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes]))
pixel_values = [
torch.nn.functional.pad(image, pad=(0, max_shape[1] - size[1], 0, max_shape[0] - size[0]))
for image, size in zip(pixel_values, image_sizes)
]
return torch.stack(pixel_values)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
patch_size: dict[str, int],
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: dict[str, int],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
) -> BatchFeature:
patch_size = get_size_dict(patch_size, default_to_square=True)
patch_size = SizeDict(**patch_size)
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, patch_size=patch_size, interpolation=interpolation
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
batch_image_sizes = [grouped_images_index[i][0] for i in range(len(grouped_images_index))]
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
padded_images = self._pad_for_batching(
pixel_values=processed_images,
image_sizes=batch_image_sizes,
)
return BatchFeature(
data={"pixel_values": padded_images, "image_sizes": batch_image_sizes}, tensor_type=return_tensors
)
__all__ = ["PixtralImageProcessorFast"]
| transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py/0 | {
"file_path": "transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py",
"repo_id": "transformers",
"token_count": 3318
} | 470 |
# coding=utf-8
# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ProphetNet model, ported from ProphetNet repo(fairsequery_states version)."""
import copy
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import LayerNorm
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging
from ...utils.deprecation import deprecate_kwarg
from .configuration_prophetnet import ProphetNetConfig
logger = logging.get_logger(__name__)
def softmax(hidden_state, dim, onnx_trace=False):
if onnx_trace:
return nn.functional.softmax(hidden_state.float(), dim=dim)
else:
return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32)
def ngram_attention_bias(sequence_length, ngram, device, dtype):
"""
This function computes the bias for the predict stream
"""
left_block = (
torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min
)
right_block = left_block.detach().clone()
# create bias
for stream_idx in range(ngram):
right_block[stream_idx].fill_diagonal_(0, wrap=False)
left_block[stream_idx].triu_(-stream_idx + 1)
left_block[:, :, 0] = 0
return torch.cat([left_block, right_block], dim=2)
def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False):
"""
This function computes individual parts of the relative position buckets. For more detail, see paper.
"""
inv_relative_positions = -relative_positions
rel_positions_bucket = 0
if is_bidirectional:
num_buckets = num_buckets // 2
rel_positions_bucket = (
rel_positions_bucket
+ torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets
)
inv_relative_positions = torch.abs(inv_relative_positions)
else:
inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions))
max_exact = num_buckets // 2
is_small = torch.lt(inv_relative_positions, max_exact)
val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(
max_distance / max_exact
) * (num_buckets - max_exact)
val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int()
rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large)
return rel_positions_bucket
def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids):
"""
This function computes both main and predict relative position buckets. For more detail, see paper.
"""
# main stream
main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1)
main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1)
# predicting stream
predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1)
predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1)
predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1)
# get both position buckets
main_relative_position_buckets = compute_relative_buckets(
num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False
)
predict_relative_position_buckets = compute_relative_buckets(
num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False
)
return main_relative_position_buckets, predict_relative_position_buckets
@dataclass
@auto_docstring(
custom_intro="""
Base class for sequence-to-sequence language models outputs.
"""
)
class ProphetNetSeq2SeqLMOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
Prediction scores of the main stream language modeling head (scores for each vocabulary token before
SoftMax).
logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
SoftMax).
past_key_values (`list[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the self-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
logits_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@property
def decoder_cross_attentions(self):
warnings.warn(
"`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
" instead.",
FutureWarning,
)
return self.cross_attentions
@dataclass
@auto_docstring(
custom_intro="""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
"""
)
class ProphetNetSeq2SeqModelOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (`list[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@property
def decoder_cross_attentions(self):
warnings.warn(
"`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
" instead.",
FutureWarning,
)
return self.cross_attentions
@dataclass
@auto_docstring(
custom_intro="""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
"""
)
class ProphetNetDecoderModelOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (`list[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
hidden_states_ngram (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[tuple[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states_ngram: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
ngram_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
"""
)
class ProphetNetDecoderLMOutput(ModelOutput):
r"""
ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
Prediction scores of the main stream language modeling head (scores for each vocabulary token before
SoftMax).
logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
SoftMax).
past_key_values (`list[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
hidden_states_ngram (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
logits_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[tuple[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states_ngram: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
ngram_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring
class ProphetNetPreTrainedModel(PreTrainedModel):
config: ProphetNetConfig
base_model_prefix = "prophetnet"
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert decoder_start_token_id is not None, (
"self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the"
" pad_token_id. See ProphetNet docs for more information"
)
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class ProphetNetPositionalEmbeddings(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
the forward function.
"""
def __init__(self, config: ProphetNetConfig) -> None:
self.max_length = config.max_position_embeddings
super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
assert (position_ids is None) or (self.padding_idx is None), (
"If position_ids is pre-computed then padding_idx should not be set."
)
if position_ids is None:
if past_key_values is not None and past_key_values.get_seq_length() != 0:
# position_ids is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
prev_num_input_ids = past_key_values.get_seq_length()
num_input_ids = inputs_shape[1] + prev_num_input_ids
position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
int(self.padding_idx + num_input_ids)
)
else:
if attention_mask is None:
attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
# retrieve position_ids from input_ids / attention_mask
position_ids = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() + self.padding_idx
# make sure position_ids are not bigger then max_length
position_ids = position_ids.clamp(0, self.max_length - 1)
return super().forward(position_ids), position_ids
def _forward(self, position_ids):
return super().forward(position_ids)
class ProphetNetAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: ProphetNetConfig, num_attn_heads: int, layer_idx: Optional[int] = None):
super().__init__()
hidden_size = config.hidden_size
self.attention_dropout = config.attention_dropout
self.dropout = config.dropout
self.num_attn_heads = num_attn_heads
self.head_dim = hidden_size // num_attn_heads
self.layer_idx = layer_idx
assert self.head_dim * num_attn_heads == hidden_size, (
"`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and"
" `config.num_decoder_attention_heads`"
)
self.key_proj = nn.Linear(hidden_size, hidden_size)
self.value_proj = nn.Linear(hidden_size, hidden_size)
self.query_proj = nn.Linear(hidden_size, hidden_size)
self.out_proj = nn.Linear(hidden_size, hidden_size)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states,
key_value_states: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
layer_head_mask: Optional[Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[Tensor, Optional[Tensor]]:
batch_size, tgt_len, hidden_size = hidden_states.size()
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
assert list(hidden_states.size()) == [
batch_size,
tgt_len,
hidden_size,
], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}"
# previous time steps are cached - no need to recompute key and value if they are static
query_states = self.query_proj(hidden_states) / (self.head_dim**0.5)
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.key_proj(current_states)
value_states = self.value_proj(current_states)
key_states = key_states.view(batch_size, -1, self.num_attn_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.num_attn_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention:
past_key_values.is_updated[self.layer_idx] = True
query_states = query_states.view(batch_size, tgt_len, self.num_attn_heads, self.head_dim).transpose(1, 2)
src_len = key_states.size(2)
attn_weights = torch.einsum("bsij,bsjk->bsik", query_states, key_states.transpose(2, 3))
expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len)
if attn_weights.size() != expected_shape:
raise ValueError(f"Attention weights should have size {expected_shape}, but is {attn_weights.size()}")
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if attention_mask is not None and attention_mask.dim() == 0:
attention_mask = None
expected_shape = (batch_size, self.num_attn_heads, 1, src_len)
if attention_mask is not None and attention_mask.size() != expected_shape:
raise ValueError(f"Attention mask should have size {expected_shape}, but is {attention_mask.size()}")
if attention_mask is not None: # don't attend to padding symbols
attn_weights = attn_weights + attention_mask
if output_attentions:
attn_weights_reshaped = attn_weights
else:
attn_weights_reshaped = None
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (self.num_attn_heads,), (
f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
batch_size, self.num_attn_heads, tgt_len, src_len
)
# apply head_mask also on attn_weights_reshaped which is used for n-gram attention inside the model
attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped
attn_probs = nn.functional.dropout(
attn_weights,
p=self.attention_dropout,
training=self.training,
)
attn_output = torch.einsum("bsij,bsjk->bsik", attn_probs, value_states)
expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim)
if attn_output.size() != expected_shape:
raise ValueError(f"`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}")
attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size)
attn_output = self.out_proj(attn_output)
attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, attn_weights_reshaped
class ProphetNetFeedForward(nn.Module):
"""
This is the residual two feed-forward layer block based on the original Transformer implementation.
"""
def __init__(self, config: ProphetNetConfig, ffn_dim: int):
super().__init__()
self.activation_fn = ACT2FN[config.activation_function]
self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
self.output = nn.Linear(ffn_dim, config.hidden_size)
self.activation_dropout = config.activation_dropout
self.dropout = config.dropout
def forward(self, hidden_states):
hidden_states = self.intermediate(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.output(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ProphetNetNgramSelfAttention(nn.Module):
def __init__(self, config: ProphetNetConfig, layer_idx=None):
super().__init__()
self.hidden_size = config.hidden_size
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.num_attn_heads = config.num_decoder_attention_heads
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
self.head_dim = config.hidden_size // self.num_attn_heads
self.ngram = config.ngram
self.layer_idx = layer_idx
assert self.head_dim * self.num_attn_heads == config.hidden_size, (
"config.hidden_size must be divisible by num_attn_heads"
)
# key, value, query projection
self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
# out projection
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
# rel position embeddings
self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
# for onnx runtime
self.onnx_trace = False
def _shape(self, tensor, seq_len, batch_size):
return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states,
past_key_values: Optional[tuple[Tensor]] = None,
attention_mask=None,
layer_head_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
cache_position=None,
):
batch_size, ngram_sequence_length, hidden_size = hidden_states.size()
assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], (
f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape"
f" {hidden_states.shape}"
)
# project
query_states = self.query_proj(hidden_states)
key_states = self.key_proj(hidden_states)
value_states = self.value_proj(hidden_states)
# normalize
query_states = query_states / (self.head_dim**0.5)
# reshape
query_states = self._shape(query_states, ngram_sequence_length, batch_size)
key_states = self._shape(key_states, -1, batch_size)
value_states = self._shape(value_states, -1, batch_size)
proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
# chunk into main stream and predict stream
hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1)
query_states_list = query_states.chunk(1 + self.ngram, dim=2)
key_states_list = key_states.chunk(1 + self.ngram, dim=2)
value_states_list = value_states.chunk(1 + self.ngram, dim=2)
main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
# ProphetNet has two separate attention layers, one for self and one for cross attention
# We need to obtain the self attention only for this module, if `EncoderDecoderCache`
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
main_key_states, main_value_states = curr_past_key_value.update(
main_key_states, main_value_states, self.layer_idx, {"cache_position": cache_position}
)
# get seq_length of main stream only
sequence_length = ngram_sequence_length // (1 + self.ngram)
# MAIN-STREAM
# main attn weights
# [batch_size, number_heads, sequence_length, head_dimesion]
# x [batch_size, number_heads, head_dimesion, sequence_length]
# -> [batch_size, number_heads, sequence_length, sequence_length]
main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3))
# retrieve relative position embeddings for each layer -> see paper for more details
main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
)
main_attn_weights = main_attn_weights + main_relative_pos_embeddings
if attention_mask is not None:
main_attn_weights = main_attn_weights + attention_mask
main_attn_probs = softmax(
main_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(main_attn_weights)
if layer_head_mask is not None:
assert layer_head_mask.size() == (self.num_attn_heads,), (
f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
f" {layer_head_mask.size()}"
)
main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view(
batch_size, self.num_attn_heads, -1, sequence_length
)
main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
# project to attn_output
# [batch_size, number_heads, sequence_length, sequence_length]
# x [batch_size, number_heads, sequence_length, head_dimesion]
# -> [batch_size, number_heads, sequence_length, head_dimesion]
main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states)
# reshape so that num_heads dim is merged into last `head_dim` axis
main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size)
main_attn_output = self.out_proj(main_attn_output)
# PREDICT-STREAM
# [batch_size, ngram, number_heads, sequence_length, head_dimesion]
predict_query_states = torch.stack(predict_query_states_list, 1).view(
batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim
)
# [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1)
# [batch_size, sequence_length, ngram, hidden_size]
predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2)
# [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion]
predict_value_states = torch.cat(
[torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2
)
# [batch_size, ngram, number_heads, sequence_length, head_dimesion]
# x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
# -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states))
# retrieve relative position embeddings for each layer -> see paper for more details
# [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings]
predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
)
# [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
if extended_predict_attention_mask is not None:
# Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4)
extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype)
predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
predict_attn_probs = softmax(
predict_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(predict_attn_weights)
if layer_head_mask is not None:
assert layer_head_mask.size() == (self.num_attn_heads,), (
f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
f" {layer_head_mask.size()}"
)
predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs
predict_attn_probs = nn.functional.dropout(
predict_attn_probs, p=self.attention_dropout, training=self.training
)
# project to attention output
# [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
# x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
# -> [batch_size, ngram, number_heads, sequence_length, head_dimesion]
predict_attn_output = torch.einsum(
"bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2))
)
# reshape so that num_heads dim is merged into last `head_dim` axis
# [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size]
predict_attn_output = predict_attn_output.transpose(2, 3)
predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size)
predict_attn_output = self.out_proj(predict_attn_output)
# concat to single attn output
# [batch_size, (1+ngram)*sequence_length, hidden_size]
attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size)
# reshape into better form for `config.output_attentions`
main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1)
attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, main_attn_probs, predict_attn_probs
def get_main_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
):
# input hidden_states [batch_size, sequence_length, hidden_size]
# input attn_weights [batch_size, num_heads, sequence_length, sequence_length]
# input position_ids [batch_size, sequence_length] or [1,1]
batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape
attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len)
if main_relative_position_buckets is None:
batch_size, sequence_length = hidden_states.shape[:2]
relative_positions = (
torch.arange(1, attn_weights.shape[-1] + 1)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
# [batch_size, sequence_length, sequence_length+1]
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
main_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
# [batch_size, sequence_length, num_buckets * num_heads]
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
rel_pos_embeddings = rel_pos_embeddings.view(
rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
)
rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2)
# [batch_size, num_heads, sequence_length, num_buckets]
rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,))
main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
# [batch_size * num_heads * sequence_length, sequence_length]
main_relative_position_buckets = main_relative_position_buckets.view(
-1, main_relative_position_buckets.shape[-1]
)
main_relative_position_buckets = main_relative_position_buckets.long()
# [batch_size * num_heads * sequence_length, sequence_length]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets)
main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1)
return main_relative_pos_embeddings
def get_predict_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
):
# input hidden_states [batch_size, sequence_length, ngram, hidden_size]
# input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length]
# input position_ids [batch_size, sequence_length] or [1,1]
# input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None
batch_size, sequence_length = hidden_states.shape[0:2]
if predict_relative_position_buckets is None:
key_sequence_length = attn_weights.shape[-1]
assert position_ids[0][0] == key_sequence_length - 1, (
"`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
)
relative_positions = (
torch.arange(0, key_sequence_length)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
predict_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
# [batch_size, ngram, sequence_length, hidden_size]
hidden_states = hidden_states.transpose(1, 2)
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
# [batch_size, ngram, sequence_length, num_buckets, num_heads]
rel_pos_embeddings = rel_pos_embeddings.view(
hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
)
rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3)
# [batch_size * ngram * sequence_length * num_heads, num_buckets]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets)
# [ngram, batch_size, num_heads * sequence_length, -1]
predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0)
predict_relative_position_buckets = predict_relative_position_buckets.repeat(
self.ngram, 1, self.num_attn_heads, 1
)
# [ngram * batch_size * num_heads * sequence_length, -1]
predict_relative_position_buckets = predict_relative_position_buckets.view(
-1, predict_relative_position_buckets.size(-1)
).long()
predict_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
)
# [batch_size, gram, num_heads, sequence_length, -1]
predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(
batch_size, self.ngram, self.num_attn_heads, sequence_length, -1
)
return predict_relative_pos_embeddings
class ProphetNetEncoderLayer(GradientCheckpointingLayer):
"""
Encoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetAttention(config, config.num_encoder_attention_heads)
self.self_attn_layer_norm = LayerNorm(config.hidden_size)
# 2nd residual block
self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim)
self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
def forward(
self,
hidden_states,
attention_mask,
layer_head_mask,
output_attentions: bool = False,
):
# 1st residual block
attention_output, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
# 2nd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class ProphetNetDecoderLayer(GradientCheckpointingLayer):
"""
Decoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig, layer_idx=None):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetNgramSelfAttention(config, layer_idx=layer_idx)
self.self_attn_layer_norm = LayerNorm(config.hidden_size)
# 2nd residual block
if config.add_cross_attention:
self.cross_attn = ProphetNetAttention(config, config.num_decoder_attention_heads, layer_idx=layer_idx)
self.cross_attn_layer_norm = LayerNorm(config.hidden_size)
# 3rd residual block
self.feed_forward = ProphetNetFeedForward(config, config.decoder_ffn_dim)
self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attn_mask=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
past_key_values=None,
use_cache: Optional[bool] = True,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
):
# 1st residual block
ngram_attention_output, self_attn_weights, self_attn_weights_ngram = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
)
hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
cross_attn_weights = None
if encoder_hidden_states is not None:
# 2nd residual block
attention_output, cross_attn_weights = self.cross_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attn_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
)
hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
# 3rd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights)
return outputs
@auto_docstring(
custom_intro="""
The standalone encoder part of the ProphetNetModel.
"""
)
class ProphetNetEncoder(ProphetNetPreTrainedModel):
def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
r"""
word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word
embeddings instead of randomly initialized word embeddings.
"""
super().__init__(config)
self.word_embeddings = (
word_embeddings
if word_embeddings is not None
else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
)
self.position_embeddings = ProphetNetPositionalEmbeddings(config)
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetEncoder
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either input_ids or inputs_embeds has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# prepare attention mask
if attention_mask is not None:
extended_attention_mask = (
1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)
) * torch.finfo(self.dtype).min
extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
else:
extended_attention_mask = None
position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
hidden_states = inputs_embeds + position_embeddings
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training)
encoder_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (len(self.layers)), (
f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask=extended_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
)
@auto_docstring(
custom_intro="""
The standalone decoder part of the ProphetNetModel.
"""
)
class ProphetNetDecoder(ProphetNetPreTrainedModel):
def __init__(self, config: ProphetNetConfig, word_embeddings: Optional[nn.Embedding] = None):
r"""
word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word
embeddings instead of randomly initialized word embeddings.
"""
super().__init__(config)
self.ngram = config.ngram
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.dropout = config.dropout
self.max_target_positions = config.max_position_embeddings
self.word_embeddings = (
word_embeddings
if word_embeddings is not None
else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
)
self.position_embeddings = ProphetNetPositionalEmbeddings(config)
self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
self.layers = nn.ModuleList(
[ProphetNetDecoderLayer(config, layer_idx=i) for i in range(config.num_decoder_layers)]
)
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, ProphetNetDecoderModelOutput]:
r"""
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetDecoder
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetDecoder.from_pretrained("microsoft/prophetnet-large-uncased", add_cross_attention=False)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(), DynamicCache())
if encoder_hidden_states is not None
else DynamicCache()
)
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once(
"Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. "
"You should pass an instance of `EncoderDecoderCache` instead, e.g. "
"`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`."
)
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
main_stream_pos_embed, position_ids = self.position_embeddings(
(batch_size, sequence_length),
device=inputs_embeds.device,
past_key_values=past_key_values,
)
if past_key_values_length != 0:
main_relative_position_buckets, predict_relative_position_buckets = None, None
else:
(
main_relative_position_buckets,
predict_relative_position_buckets,
) = self.compute_buffered_relative_buckets(position_ids)
predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
# add position embeddings
hidden_states = inputs_embeds + main_stream_pos_embed
ngram_embeddings = self.ngram_embeddings.weight
# prepare attention mask
if past_key_values_length != 0:
assert hidden_states.size(1) == 1, (
"At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
)
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1)
for ngram in range(self.ngram)
]
extended_attention_mask = None
extended_predict_attention_mask = None
else:
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram)
]
extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
# prepare encoder attention mask
if encoder_attention_mask is not None:
extended_encoder_attention_mask = (
1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)
) * torch.finfo(self.dtype).min
extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
else:
extended_encoder_attention_mask = None
hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1)
if self.embeddings_layer_norm:
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# init attentions, hidden_states and cache with empty tuples
all_main_stream_hidden_states = () if output_hidden_states else None
all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
all_main_stream_attns = () if output_attentions else None
all_ngram_stream_attns = () if output_attentions else None
all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (len(self.layers)), (
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
# grad cannot be kept because tensor is sliced
all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
layer_outputs = decoder_layer(
hidden_states,
extended_attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attn_mask=extended_encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_main_stream_attns += (layer_outputs[1],)
all_ngram_stream_attns += (layer_outputs[2],)
if self.config.add_cross_attention:
all_cross_attns += (layer_outputs[3],)
if output_hidden_states:
all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
# split last_hidden_state for return
last_hidden_state = hidden_states[:, :sequence_length]
last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None
if not return_dict:
return tuple(
v
for v in [
last_hidden_state,
last_hidden_state_ngram,
past_key_values,
all_main_stream_hidden_states,
all_ngram_stream_hidden_states,
all_main_stream_attns,
all_ngram_stream_attns,
all_cross_attns,
]
if v is not None
)
return ProphetNetDecoderModelOutput(
last_hidden_state=last_hidden_state,
last_hidden_state_ngram=last_hidden_state_ngram,
past_key_values=past_key_values,
hidden_states=all_main_stream_hidden_states,
hidden_states_ngram=all_ngram_stream_hidden_states,
attentions=all_main_stream_attns,
ngram_attentions=all_ngram_stream_attns,
cross_attentions=all_cross_attns,
)
def compute_buffered_relative_buckets(self, position_ids):
batch_size, sequence_length = position_ids.shape
position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
self.num_buckets, self.relative_max_distance, position_ids
)
# buffer relative buckets
main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
predict_relative_buckets = torch.cat(
[
predict_relative_buckets[:, :sequence_length, :sequence_length],
predict_relative_buckets[
:, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
],
],
2,
).repeat(batch_size, 1, 1)
return main_relative_buckets, predict_relative_buckets
def prepare_attention_mask(self, hidden_states, attention_mask):
batch_size, seq_length = hidden_states.shape[:2]
# get causal mask
causal_mask = torch.full(
(seq_length, seq_length),
torch.finfo(hidden_states.dtype).min,
dtype=hidden_states.dtype,
device=hidden_states.device,
)
causal_mask = torch.triu(causal_mask, 1)
extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand(
(batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min
extended_attention_mask = extended_causal_mask + extended_attention_mask
else:
extended_attention_mask = extended_causal_mask
return extended_attention_mask.to(hidden_states.dtype)
def prepare_predict_attention_mask(self, hidden_states, attention_mask):
batch_size, seq_length = hidden_states.shape[:2]
# get causal mask
predict_causal_mask = ngram_attention_bias(
self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
)
predict_causal_mask = torch.cat(
[
predict_causal_mask[:, :seq_length, :seq_length],
predict_causal_mask[
:, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
],
],
dim=-1,
)
extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand(
(batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min
extended_attention_mask = extended_attention_mask.expand(
(batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)
)
# predicted stream attention_mask should always be 0
extended_attention_mask = torch.cat(
[extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
)
extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
else:
extended_predict_attention_mask = extended_predict_causal_mask
return extended_predict_attention_mask.to(hidden_states.dtype)
@auto_docstring
class ProphetNetModel(ProphetNetPreTrainedModel):
_tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight"]
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
self.encoder.word_embeddings = self.word_embeddings
self.decoder.word_embeddings = self.word_embeddings
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings)
self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[tuple] = None,
past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, ProphetNetSeq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetModel
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetModel.from_pretrained("microsoft/prophetnet-large-uncased")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state # main stream hidden states
>>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return ProphetNetSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram,
decoder_attentions=decoder_outputs.attentions,
decoder_ngram_attentions=decoder_outputs.ngram_attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.
"""
)
class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"]
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.prophetnet = ProphetNetModel(config)
self.padding_idx = config.pad_token_id
self.disable_ngram_loss = config.disable_ngram_loss
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head)
def get_input_embeddings(self):
return self.prophetnet.word_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[torch.Tensor] = None,
past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, ProphetNetSeq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> logits_next_token = outputs.logits # logits to predict next token as usual
>>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
outputs = self.prophetnet(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
batch_size, sequence_length = (
decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
)
predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
predict_logits = self.lm_head(predicting_streams)
logits = predict_logits[:, 0]
logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
# To use .view in loss computation, make sure that logits is contiguous.
if not logits.is_contiguous():
logits = logits.contiguous()
loss = None
if labels is not None:
loss = self._compute_loss(predict_logits, labels)
if not return_dict:
all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
else:
return ProphetNetSeq2SeqLMOutput(
loss=loss,
logits=logits,
logits_ngram=logits_ngram,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
decoder_attentions=outputs.decoder_attentions,
decoder_ngram_attentions=outputs.decoder_ngram_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def _compute_loss(self, logits, labels, ignore_index=-100):
expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
for i in range(self.config.ngram):
if i > 0 and self.disable_ngram_loss:
break
expend_targets[i, :, :] = labels
logits = logits.transpose(0, 1).contiguous()
lprobs = nn.functional.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
if self.config.eps > 0.0:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
smooth_loss = smooth_loss[non_masked_tokens]
smooth_loss = smooth_loss.mean()
eps_i = self.config.eps / lprobs.size(-1)
loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
return loss
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def get_encoder(self):
return self.prophetnet.encoder
def get_decoder(self):
return self.prophetnet.decoder
@auto_docstring(
custom_intro="""
The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal
"""
)
class ProphetNetForCausalLM(ProphetNetPreTrainedModel, GenerationMixin):
_tied_weights_keys = [
"prophetnet.word_embeddings.weight",
"prophetnet.decoder.word_embeddings.weight",
"lm_head.weight",
]
def __init__(self, config: ProphetNetConfig):
# set config for CLM
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.prophetnet = ProphetNetDecoderWrapper(config)
self.padding_idx = config.pad_token_id
self.disable_ngram_loss = config.disable_ngram_loss
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.prophetnet.decoder.word_embeddings
def set_input_embeddings(self, value):
self.prophetnet.decoder.word_embeddings = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head)
def set_decoder(self, decoder):
self.prophetnet.decoder = decoder
def get_decoder(self):
return self.prophetnet.decoder
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ProphetNetDecoderLMOutput]:
r"""
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetForCausalLM
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetForCausalLM.from_pretrained("microsoft/prophetnet-large-uncased")
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # Model can also be used with EncoderDecoder framework
>>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer
>>> import torch
>>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased")
>>> tokenizer_dec = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
... "google-bert/bert-large-uncased", "microsoft/prophetnet-large-uncased"
... )
>>> ARTICLE = (
... "the us state department said wednesday it had received no "
... "formal word from bolivia that it was expelling the us ambassador there "
... "but said the charges made against him are `` baseless ."
... )
>>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
>>> labels = tokenizer_dec(
... "us rejects charges against its ambassador in bolivia", return_tensors="pt"
... ).input_ids
>>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
>>> loss = outputs.loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
outputs = self.prophetnet.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2]
predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
predict_logits = self.lm_head(predicting_streams)
logits = predict_logits[:, 0]
logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
loss = None
if labels is not None:
loss = self._compute_loss(predict_logits, labels)
if not return_dict:
all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
else:
return ProphetNetDecoderLMOutput(
loss=loss,
logits=logits,
logits_ngram=logits_ngram,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
hidden_states_ngram=outputs.hidden_states_ngram,
attentions=outputs.attentions,
ngram_attentions=outputs.ngram_attentions,
cross_attentions=outputs.cross_attentions,
)
def _compute_loss(self, logits, labels, ignore_index=-100):
expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
for i in range(self.config.ngram):
if i > 0 and self.disable_ngram_loss:
break
expend_targets[i, :, :] = labels
logits = logits.transpose(0, 1).contiguous()
lprobs = nn.functional.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
if self.config.eps > 0.0:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
smooth_loss = smooth_loss[non_masked_tokens]
smooth_loss = smooth_loss.mean()
eps_i = self.config.eps / lprobs.size(-1)
loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
return loss
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
use_cache=None,
**kwargs,
):
# Overwritten -- our tests complain if we use GenerationMixin.prepare_inputs_for_generation
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past_key_values is not None and past_key_values.get_seq_length() > 0:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"head_mask": head_mask,
"past_key_values": past_key_values,
"use_cache": use_cache,
}
class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel):
"""
This is a wrapper class, so that [`ProphetNetForCausalLM`] can correctly be loaded from pretrained prophetnet
classes.
"""
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.decoder = ProphetNetDecoder(config, word_embeddings=self.word_embeddings)
# Initialize weights and apply final processing
self.post_init()
def _tie_weights(self):
self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings())
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
__all__ = [
"ProphetNetDecoder",
"ProphetNetEncoder",
"ProphetNetForCausalLM",
"ProphetNetForConditionalGeneration",
"ProphetNetModel",
"ProphetNetPreTrainedModel",
]
| transformers/src/transformers/models/prophetnet/modeling_prophetnet.py/0 | {
"file_path": "transformers/src/transformers/models/prophetnet/modeling_prophetnet.py",
"repo_id": "transformers",
"token_count": 40922
} | 471 |
# coding=utf-8
# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Qwen2."""
import json
import os
import unicodedata
from functools import lru_cache
from typing import Optional
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
@lru_cache
# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Qwen2Tokenizer(PreTrainedTokenizer):
"""
Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import Qwen2Tokenizer
>>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
>>> tokenizer("Hello world")["input_ids"]
[9707, 1879]
>>> tokenizer(" Hello world")["input_ids"]
[21927, 1879]
```
This is expected.
You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*):
The beginning of sequence token. Not applicable for this tokenizer.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The token used for padding, for example when batching sequences of different lengths.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not the model should cleanup the spaces that were added when splitting the input text during the
tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
split_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the special tokens should be split during the tokenization process. The default behavior is
to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
'|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="<|endoftext|>",
bos_token=None,
eos_token="<|endoftext|>",
pad_token="<|endoftext|>",
clean_up_tokenization_spaces=False,
split_special_tokens=False,
**kwargs,
):
# Qwen vocab does not contain control tokens; added tokens need to be special
bos_token = (
AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
if isinstance(bos_token, str)
else bos_token
)
eos_token = (
AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
if isinstance(eos_token, str)
else eos_token
)
unk_token = (
AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
if isinstance(unk_token, str)
else unk_token
)
pad_token = (
AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
if isinstance(pad_token, str)
else pad_token
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
bpe_merges = []
with open(merges_file, encoding="utf-8") as merges_handle:
for i, line in enumerate(merges_handle):
line = line.strip()
if (i == 0 and line.startswith("#version:")) or not line:
continue
bpe_merges.append(tuple(line.split()))
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
# NOTE: the cache can grow without bound and will get really large for long running processes
# (esp. for texts of language that do not use space between word, e.g. Chinese); technically
# not a memory leak but appears as one.
# GPT2Tokenizer has the same problem, so let's be consistent.
self.cache = {}
self.pat = re.compile(PRETOKENIZE_REGEX)
if kwargs.get("add_prefix_space", False):
logger.warning_once(
f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
)
super().__init__(
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
unk_token=unk_token,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
split_special_tokens=split_special_tokens,
**kwargs,
)
@property
def vocab_size(self) -> int:
return len(self.encoder)
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def decode(
self,
token_ids,
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = False,
spaces_between_special_tokens: bool = False,
**kwargs,
) -> str:
# `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
# and cannot be configured elsewhere, but it should default to False for Qwen2Tokenizer
return super().decode(
token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
spaces_between_special_tokens=spaces_between_special_tokens,
**kwargs,
)
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def prepare_for_tokenization(self, text, **kwargs):
text = unicodedata.normalize("NFC", text)
return (text, kwargs)
__all__ = ["Qwen2Tokenizer"]
| transformers/src/transformers/models/qwen2/tokenization_qwen2.py/0 | {
"file_path": "transformers/src/transformers/models/qwen2/tokenization_qwen2.py",
"repo_id": "transformers",
"token_count": 6058
} | 472 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/qwen3_moe/modular_qwen3_moe.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_qwen3_moe.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import (
GenericForQuestionAnswering,
GenericForSequenceClassification,
GenericForTokenClassification,
GradientCheckpointingLayer,
)
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.deprecation import deprecate_kwarg
from ...utils.generic import OutputRecorder, check_model_inputs
from .configuration_qwen3_moe import Qwen3MoeConfig
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class Qwen3MoeAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Qwen3MoeConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.q_norm = Qwen3MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
self.k_norm = Qwen3MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
self.sliding_window = getattr(config, "sliding_window", None)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window, # diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Qwen3MoeMLP(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class Qwen3MoeSparseMoeBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_experts = config.num_experts
self.top_k = config.num_experts_per_tok
self.norm_topk_prob = config.norm_topk_prob
# gating
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
self.experts = nn.ModuleList(
[Qwen3MoeMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
""" """
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (batch * sequence_length, n_experts)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
if self.norm_topk_prob: # only diff with mixtral sparse moe block!
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# we cast back to the input dtype
routing_weights = routing_weights.to(hidden_states.dtype)
final_hidden_states = torch.zeros(
(batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
)
# One hot encode the selected experts to create an expert mask
# this will be used to easily index which expert is going to be sollicitated
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
# Loop over all available experts in the model and perform the computation on each expert
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_layer = self.experts[expert_idx]
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
# Index the correct hidden states and compute the expert hidden state for
# the current expert. We need to make sure to multiply the output hidden
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
# However `index_add_` only support torch tensors for indexing so we'll use
# the `top_x` tensor here.
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return final_hidden_states, router_logits
@use_kernel_forward_from_hub("RMSNorm")
class Qwen3MoeRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Qwen3MoeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class Qwen3MoeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Qwen3MoeConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Qwen3MoeAttention(config, layer_idx)
if (layer_idx not in config.mlp_only_layers) and (
config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
):
self.mlp = Qwen3MoeSparseMoeBlock(config)
else:
self.mlp = Qwen3MoeMLP(config, intermediate_size=config.intermediate_size)
self.input_layernorm = Qwen3MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[torch.Tensor]] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> torch.FloatTensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_router_logits (`bool`, *optional*):
Whether or not to return the logits of all the routers. They are useful for computing the router loss,
and should not be returned during inference.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# For the MoE layers, we need to unpack
if isinstance(hidden_states, tuple):
hidden_states, _ = hidden_states
hidden_states = residual + hidden_states
return hidden_states
class Qwen3MoeRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen3MoeConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
class Qwen3MoePreTrainedModel(PreTrainedModel):
config: Qwen3MoeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Qwen3MoeDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
_supports_attention_backend = True
_can_record_outputs = {
"router_logits": OutputRecorder(Qwen3MoeSparseMoeBlock, index=1),
"hidden_states": Qwen3MoeDecoderLayer,
"attentions": Qwen3MoeAttention,
}
@auto_docstring
class Qwen3MoeModel(Qwen3MoePreTrainedModel):
def __init__(self, config: Qwen3MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Qwen3MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Qwen3MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Qwen3MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
@auto_docstring
class Qwen3MoeForCausalLM(Qwen3MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Qwen3MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_experts
self.num_experts_per_tok = config.num_experts_per_tok
# Initialize weights and apply final processing
self.post_init()
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Qwen3MoeForCausalLM
>>> model = Qwen3MoeForCausalLM.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
class Qwen3MoeForSequenceClassification(GenericForSequenceClassification, Qwen3MoePreTrainedModel):
pass
class Qwen3MoeForTokenClassification(GenericForTokenClassification, Qwen3MoePreTrainedModel):
pass
class Qwen3MoeForQuestionAnswering(GenericForQuestionAnswering, Qwen3MoePreTrainedModel):
base_model_prefix = "transformer" # For BC, where `transformer` was used instead of `model`
__all__ = [
"Qwen3MoeForCausalLM",
"Qwen3MoeForQuestionAnswering",
"Qwen3MoeModel",
"Qwen3MoePreTrainedModel",
"Qwen3MoeForSequenceClassification",
"Qwen3MoeForTokenClassification",
]
| transformers/src/transformers/models/qwen3_moe/modeling_qwen3_moe.py/0 | {
"file_path": "transformers/src/transformers/models/qwen3_moe/modeling_qwen3_moe.py",
"repo_id": "transformers",
"token_count": 13750
} | 473 |
# coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for model Reformer."""
import os
from shutil import copyfile
from typing import Any, Optional
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
@requires(backends=("sentencepiece",))
class ReformerTokenizer(PreTrainedTokenizer):
"""
Construct a Reformer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece) .
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
additional_special_tokens (`list[str]`, *optional*, defaults to `[]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
eos_token="</s>",
unk_token="<unk>",
additional_special_tokens=[],
sp_model_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
@property
def vocab_size(self):
return self.sp_model.get_piece_size()
def get_vocab(self) -> dict[str, int]:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index < self.sp_model.get_piece_size():
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(current_sub_tokens) + token
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
__all__ = ["ReformerTokenizer"]
| transformers/src/transformers/models/reformer/tokenization_reformer.py/0 | {
"file_path": "transformers/src/transformers/models/reformer/tokenization_reformer.py",
"repo_id": "transformers",
"token_count": 2825
} | 474 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert RoBERTa-PreLayerNorm checkpoint."""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def convert_roberta_prelayernorm_checkpoint_to_pytorch(checkpoint_repo: str, pytorch_dump_folder_path: str):
"""
Copy/paste/tweak roberta_prelayernorm's weights to our BERT structure.
"""
# convert configuration
config = RobertaPreLayerNormConfig.from_pretrained(
checkpoint_repo, architectures=["RobertaPreLayerNormForMaskedLM"]
)
# convert state_dict
original_state_dict = torch.load(
hf_hub_download(repo_id=checkpoint_repo, filename="pytorch_model.bin"), weights_only=True
)
state_dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta."):
tensor_key = "roberta_prelayernorm." + tensor_key[len("roberta.") :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight") or tensor_key.endswith(".self.LayerNorm.bias"):
continue
state_dict[tensor_key] = tensor_value
model = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=None, config=config, state_dict=state_dict
)
model.save_pretrained(pytorch_dump_folder_path)
# convert tokenizer
tokenizer = AutoTokenizer.from_pretrained(checkpoint_repo)
tokenizer.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| transformers/src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 1081
} | 475 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization utils for RoFormer."""
from tokenizers import NormalizedString, PreTokenizedString, normalizers
class JiebaPreTokenizer:
def __init__(self, vocab) -> None:
self.vocab = vocab
self.normalizers = normalizers.BertNormalizer(
clean_text=False,
handle_chinese_chars=True,
strip_accents=False,
lowercase=False,
)
try:
import rjieba
except ImportError:
raise ImportError(
"You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
def jieba_split(self, i: int, normalized_string: NormalizedString) -> list[NormalizedString]:
splits = []
# this code slice normalized_string is too slow (6s) but test_alignment_methods can pass
for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False):
if token in self.vocab:
splits.append(normalized_string[start:end])
else:
token_list = self.normalizers.normalize_str(token).split()
for token in token_list:
if token:
end = start + len(token)
splits.append(normalized_string[start:end])
start = end
# this code test_alignment_methods can't pass but fast (300ms)
# for token in self.jieba.cut(str(normalized_string), False):
# if token in self.vocab:
# splits.append(NormalizedString(token))
# else:
# token_list = self.normalizers.normalize_str(token).split()
# for token in token_list:
# if token:
# splits.append(NormalizedString(token))
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
pretok.split(self.jieba_split)
| transformers/src/transformers/models/roformer/tokenization_utils.py/0 | {
"file_path": "transformers/src/transformers/models/roformer/tokenization_utils.py",
"repo_id": "transformers",
"token_count": 1134
} | 476 |
# coding=utf-8
# Copyright 2023 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RWKV configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class RwkvConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`RwkvModel`]. It is used to instantiate a RWKV
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the RWVK-4
[RWKV/rwkv-4-169m-pile](https://huggingface.co/RWKV/rwkv-4-169m-pile) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50277):
Vocabulary size of the RWKV model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`RwkvModel`].
context_length (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model can be used with in a single forward (using it in RNN mode
lets use any sequence length).
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the model.
attention_hidden_size (`int`, *optional*):
Dimensionality of the attention hidden states. Will default to `hidden_size` if unset.
intermediate_size (`int`, *optional*):
Dimensionality of the inner feed-forward layers. Will default to 4 times `hidden_size` if unset.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
bos_token_id (`int`, *optional*, defaults to 0):
The id of the beginning of sentence token in the vocabulary. Defaults to 0 as RWKV uses the same tokenizer
as GPTNeoX.
eos_token_id (`int`, *optional*, defaults to 0):
The id of the end of sentence token in the vocabulary. Defaults to 0 as RWKV uses the same tokenizer as
GPTNeoX.
rescale_every (`int`, *optional*, defaults to 6):
At inference, the hidden states (and weights of the corresponding output layers) are divided by 2 every
`rescale_every` layer. If set to 0 or a negative number, no rescale is done.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the word embeddings with the input token embeddings.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last state.
Example:
```python
>>> from transformers import RwkvConfig, RwkvModel
>>> # Initializing a Rwkv configuration
>>> configuration = RwkvConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = RwkvModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "rwkv"
attribute_map = {"max_position_embeddings": "context_length"}
def __init__(
self,
vocab_size=50277,
context_length=1024,
hidden_size=4096,
num_hidden_layers=32,
attention_hidden_size=None,
intermediate_size=None,
layer_norm_epsilon=1e-5,
bos_token_id=0,
eos_token_id=0,
rescale_every=6,
tie_word_embeddings=False,
use_cache=True,
**kwargs,
):
self.vocab_size = vocab_size
self.context_length = context_length
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.attention_hidden_size = attention_hidden_size if attention_hidden_size is not None else hidden_size
self.intermediate_size = intermediate_size if intermediate_size is not None else 4 * hidden_size
self.layer_norm_epsilon = layer_norm_epsilon
self.rescale_every = rescale_every
self.use_cache = use_cache
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(
tie_word_embeddings=tie_word_embeddings, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs
)
__all__ = ["RwkvConfig"]
| transformers/src/transformers/models/rwkv/configuration_rwkv.py/0 | {
"file_path": "transformers/src/transformers/models/rwkv/configuration_rwkv.py",
"repo_id": "transformers",
"token_count": 1911
} | 477 |
# coding=utf-8
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SAM 2 model."""
from dataclasses import dataclass
from typing import Callable, Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from transformers.models.maskformer.modeling_maskformer import MaskFormerSinePositionEmbedding
from transformers.models.sam.image_processing_sam_fast import SamImageProcessorFast
from transformers.models.sam.modeling_sam import (
SamLayerNorm,
SamMaskDecoder,
SamMaskEmbedding,
SamModel,
SamPromptEncoder,
SamTwoWayAttentionBlock,
SamTwoWayTransformer,
eager_attention_forward,
)
from transformers.models.vitdet.modeling_vitdet import window_partition, window_unpartition
from transformers.utils.generic import TransformersKwargs, check_model_inputs
from ...activations import ACT2FN
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import (
DefaultFastImageProcessorKwargs,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
pil_torch_interpolation_mapping,
)
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
ModelOutput,
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
logging,
)
from ..auto import AutoModel
from .configuration_sam2 import (
Sam2Config,
Sam2HieraDetConfig,
Sam2MaskDecoderConfig,
Sam2PromptEncoderConfig,
Sam2VisionConfig,
)
if is_torch_available():
import torch
from torch.nn import functional as F
if is_torchvision_v2_available():
pass
elif is_torchvision_available():
pass
logger = logging.get_logger(__name__)
class Sam2FastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
r"""
mask_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width": int}` to resize the segmentation maps to.
"""
mask_size: Optional[dict[str, int]]
@auto_docstring
class Sam2ImageProcessorFast(SamImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
mask_size = {"height": 256, "width": 256}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = Sam2FastImageProcessorKwargs
# modular artefacts
do_pad = None
pad_size = None
mask_pad_size = None
def __init__(self, **kwargs: Unpack[Sam2FastImageProcessorKwargs]):
SamImageProcessorFast().__init__(**kwargs)
def pad_image():
raise NotImplementedError("No pad_image for SAM 2.")
def _get_preprocess_shape():
raise NotImplementedError("No _get_preprocess_shape for SAM 2.")
def resize():
raise NotImplementedError("No need to override resize for SAM 2.")
def _preprocess(
self,
images: list["torch.Tensor"],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> "torch.Tensor":
return SamImageProcessorFast()._preprocess(images, return_tensors=return_tensors, **kwargs).pixel_values
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
pixel_values = self._preprocess(images, **images_kwargs)
reshaped_input_sizes = [image.shape[-2:] for image in images]
data = {
"pixel_values": pixel_values,
"original_sizes": original_sizes,
"reshaped_input_sizes": reshaped_input_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": pil_torch_interpolation_mapping[PILImageResampling.NEAREST],
"size": segmentation_maps_kwargs.pop("mask_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
mask_size: Optional[SizeDict] = None,
default_to_square: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["mask_size"] = mask_size
kwargs["default_to_square"] = default_to_square
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
return kwargs
def _apply_non_overlapping_constraints(self, pred_masks: torch.Tensor) -> torch.Tensor:
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
# TODO: add connected components kernel for postprocessing
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
if apply_non_overlapping_constraints:
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
@dataclass
@auto_docstring(custom_intro="Base class for the vision encoder's outputs.")
class Sam2VisionEncoderOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
fpn_hidden_states (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck.
fpn_position_encoding (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the
model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
fpn_hidden_states: Optional[torch.FloatTensor] = None
fpn_position_encoding: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(custom_intro="Base class for the Sam2 model's output.")
class Sam2ImageSegmentationOutput(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
"""
iou_scores: torch.FloatTensor = None
pred_masks: torch.FloatTensor = None
object_score_logits: torch.FloatTensor = None
image_embeddings: tuple[torch.FloatTensor, ...] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
class Sam2PatchEmbeddings(nn.Module):
r"""
Turns pixel values into patch embeddings for transformer consumption.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`Sam2ImageProcessorFast.__call__`] for details.
Returns:
embeddings (`torch.FloatTensor`):
Patch embeddings depend on image_size, patch_kernel_size, patch_stride and patch_padding
"""
def __init__(self, config: Sam2HieraDetConfig):
super().__init__()
num_channels = config.num_channels
hidden_size = config.hidden_size
self.projection = nn.Conv2d(
num_channels,
hidden_size,
kernel_size=config.patch_kernel_size,
stride=config.patch_stride,
padding=config.patch_padding,
)
def forward(self, pixel_values):
_, num_channels, height, width = pixel_values.shape
embeddings = self.projection(pixel_values).permute(0, 2, 3, 1)
return embeddings
class Sam2SinePositionEmbedding(MaskFormerSinePositionEmbedding):
pass
class Sam2VisionNeck(nn.Module):
def __init__(self, config: Sam2VisionConfig):
super().__init__()
self.config = config
self.position_encoding = Sam2SinePositionEmbedding(num_pos_feats=config.fpn_hidden_size // 2, normalize=True)
self.convs = nn.ModuleList()
for in_channels in config.backbone_channel_list:
self.convs.append(
nn.Conv2d(
in_channels=in_channels,
out_channels=config.fpn_hidden_size,
kernel_size=config.fpn_kernel_size,
stride=config.fpn_stride,
padding=config.fpn_padding,
),
)
self.fpn_top_down_levels = config.fpn_top_down_levels
def forward(self, hidden_states: torch.Tensor) -> tuple[tuple[torch.Tensor, ...], tuple[torch.Tensor, ...]]:
fpn_hidden_states = ()
fpn_position_encoding = ()
# forward in top-down order (from low to high resolution)
n = len(self.convs) - 1
for i in range(n, -1, -1):
lateral_features = hidden_states[i].permute(0, 3, 1, 2)
lateral_features = self.convs[n - i](lateral_features)
if i not in self.fpn_top_down_levels or i == n:
prev_features = lateral_features
else:
top_down_features = F.interpolate(
prev_features.to(dtype=torch.float32),
scale_factor=2.0,
mode="nearest",
align_corners=None,
antialias=False,
).to(lateral_features.dtype)
prev_features = lateral_features + top_down_features
prev_position_encoding = self.position_encoding(
prev_features.shape, prev_features.device, prev_features.dtype
).to(prev_features.dtype)
fpn_hidden_states += (prev_features,)
fpn_position_encoding += (prev_position_encoding,)
return fpn_hidden_states, fpn_position_encoding
def do_pool(x: torch.Tensor, query_stride: Optional[int] = None) -> torch.Tensor:
if query_stride is None:
return x
# (B, H, W, C) -> (B, C, H, W)
x = x.permute(0, 3, 1, 2)
x = nn.functional.max_pool2d(x, kernel_size=query_stride, stride=query_stride, ceil_mode=False)
# (B, C, H', W') -> (B, H', W', C)
x = x.permute(0, 2, 3, 1)
return x
class Sam2MultiScaleAttention(nn.Module):
def __init__(
self,
config: Sam2HieraDetConfig,
dim: int,
dim_out: int,
num_attention_heads: int,
query_stride: Optional[tuple[int, int]] = None,
):
super().__init__()
self.config = config
self.dim = dim
self.dim_out = dim_out
self.query_stride = query_stride
self.num_attention_heads = num_attention_heads
head_dim = dim_out // num_attention_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim_out * 3)
self.proj = nn.Linear(dim_out, dim_out)
self.is_causal = False
def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
batch_size, height, width, _ = hidden_states.shape
# qkv with shape (B, H * W, 3, nHead, C)
qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
# q, k, v with shape (B, H * W, nheads, C)
query, key, value = torch.unbind(qkv, 2)
attn_weights = (query * self.scale) @ key.transpose(-2, -1)
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
# Q pooling (for downsample at stage changes)
if self.query_stride:
query = do_pool(query.reshape(batch_size, height, width, -1), self.query_stride)
height, width = query.shape[1:3] # downsampled shape
query = query.reshape(batch_size, height * width, self.num_attention_heads, -1)
# transpose query, key, value to (B, nHead, H * W, C)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, _ = attention_interface(
self,
query,
key,
value,
attention_mask=None,
is_causal=self.is_causal,
scaling=self.scale,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return attn_output
class Sam2FeedForward(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
activation: str = "relu",
sigmoid_output: bool = False,
):
super().__init__()
self.num_layers = num_layers
self.activation = ACT2FN[activation]
self.proj_in = nn.Linear(input_dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, output_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
self.sigmoid_output = sigmoid_output
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
hidden_states = self.activation(hidden_states)
for layer in self.layers:
hidden_states = self.activation(layer(hidden_states))
hidden_states = self.proj_out(hidden_states)
if self.sigmoid_output:
hidden_states = F.sigmoid(hidden_states)
return hidden_states
class Sam2MultiScaleBlock(GradientCheckpointingLayer):
def __init__(
self,
config: Sam2HieraDetConfig,
stage_idx: int,
block_idx: int,
total_block_idx: int,
):
super().__init__()
# take embed dim from previous stage if first block of stage
self.dim = (
config.embed_dim_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.embed_dim_per_stage[stage_idx]
)
self.dim_out = config.embed_dim_per_stage[stage_idx]
self.layer_norm1 = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# take window size from previous stage if first block of stage
self.window_size = (
config.window_size_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.window_size_per_stage[stage_idx]
)
self.window_size = 0 if total_block_idx in config.global_attention_blocks else self.window_size
# use query stride for first block of stage if stage is a query pool stage
self.query_stride = (
config.query_stride if 0 < stage_idx <= config.num_query_pool_stages and block_idx == 0 else None
)
self.attn = Sam2MultiScaleAttention(
config,
self.dim,
self.dim_out,
num_attention_heads=config.num_attention_heads_per_stage[stage_idx],
query_stride=self.query_stride,
)
self.layer_norm2 = nn.LayerNorm(self.dim_out, eps=config.layer_norm_eps)
self.mlp = Sam2FeedForward(
self.dim_out,
int(self.dim_out * config.mlp_ratio),
self.dim_out,
num_layers=2,
activation=config.hidden_act,
)
if self.dim != self.dim_out:
self.proj = nn.Linear(self.dim, self.dim_out)
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states # batch_size, height, width, channel
hidden_states = self.layer_norm1(hidden_states)
# Skip connection
if self.dim != self.dim_out:
residual = do_pool(self.proj(hidden_states), self.query_stride)
# Window partition
window_size = self.window_size
if self.window_size > 0:
H, W = hidden_states.shape[1], hidden_states.shape[2]
hidden_states, pad_hw = window_partition(hidden_states, window_size)
# Window Attention + Q Pooling (if stage change)
attn_output = self.attn(
hidden_states=hidden_states,
**kwargs,
)
hidden_states = attn_output
if self.query_stride:
# Shapes have changed due to Q pooling
window_size = self.window_size // self.query_stride[0]
H, W = residual.shape[1:3]
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
pad_hw = (H + pad_h, W + pad_w)
# Reverse window partition
if self.window_size > 0:
hidden_states = window_unpartition(hidden_states, window_size, pad_hw, (H, W))
hidden_states = residual + hidden_states
layernorm_output = self.layer_norm2(hidden_states)
hidden_states = hidden_states + self.mlp(layernorm_output)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Hiera model's outputs that also contains a pooling of the last hidden states.
"""
)
class Sam2HieraDetModelOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
hidden-states at the output of the last layer of the model.
intermediate_hidden_states (`tuple[torch.FloatTensor]` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the intermediate layers of the model.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
class Sam2PreTrainedModel(PreTrainedModel):
config_class = Sam2Config
base_model_prefix = "sam2"
main_input_name = "pixel_values"
_supports_sdpa = True
_supports_flash_attn_2 = True
_supports_attention_backend = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (nn.LayerNorm, Sam2LayerNorm)):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
if isinstance(module, Sam2HieraDetModel):
if module.pos_embed is not None:
module.pos_embed.data.zero_()
if module.pos_embed_window is not None:
module.pos_embed_window.data.zero_()
if isinstance(module, Sam2Model):
if module.no_memory_embedding is not None:
module.no_memory_embedding.data.zero_()
class Sam2HieraDetModel(Sam2PreTrainedModel):
config_class = Sam2HieraDetConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2HieraDetConfig):
super().__init__(config)
self.patch_embed = Sam2PatchEmbeddings(config)
# Windowed positional embedding (https://arxiv.org/abs/2311.05613)
self.pos_embed = nn.Parameter(
torch.zeros(1, config.hidden_size, *config.window_positional_embedding_background_size)
)
self.pos_embed_window = nn.Parameter(
torch.zeros(1, config.hidden_size, config.window_size_per_stage[0], config.window_size_per_stage[0])
)
self.stage_ends = (np.cumsum(config.blocks_per_stage) - 1).tolist()
self.blocks = nn.ModuleList()
total_block_idx = 0
for stage_idx, blocks_per_stage in enumerate(config.blocks_per_stage):
for block_idx in range(blocks_per_stage):
block = Sam2MultiScaleBlock(
config=config, stage_idx=stage_idx, block_idx=block_idx, total_block_idx=total_block_idx
)
self.blocks.append(block)
total_block_idx += 1
def get_input_embeddings(self):
return self.patch_embed
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
h, w = hw
window_embed = self.pos_embed_window
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
pos_embed = pos_embed.permute(0, 2, 3, 1)
return pos_embed
@check_model_inputs
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2HieraDetModelOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
hidden_states = hidden_states + self._get_pos_embed(hidden_states.shape[1:3])
intermediate_hidden_states = ()
for i, block_module in enumerate(self.blocks):
hidden_states = block_module(hidden_states, **kwargs)
if i in self.stage_ends:
intermediate_hidden_states = intermediate_hidden_states + (hidden_states,)
return Sam2HieraDetModelOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate_hidden_states,
)
@auto_docstring(
custom_intro="""
The vision model from Sam without any head or projection on top.
"""
)
class Sam2VisionModel(Sam2PreTrainedModel):
config_class = Sam2VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2VisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = Sam2VisionNeck(config)
self.num_feature_levels = config.num_feature_levels
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
@check_model_inputs
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2VisionEncoderOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Forward through backbone
backbone_output = self.backbone(pixel_values, **kwargs)
hidden_states = backbone_output.last_hidden_state
intermediate_hidden_states = backbone_output.intermediate_hidden_states
fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states)
# Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution
fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1]
fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1]
return Sam2VisionEncoderOutput(
last_hidden_state=hidden_states,
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
)
class Sam2PositionalEmbedding(nn.Module):
def __init__(self, config: Sam2PromptEncoderConfig):
super().__init__()
self.scale = config.scale
positional_embedding = self.scale * torch.randn((2, config.hidden_size // 2))
self.register_buffer("positional_embedding", positional_embedding)
def forward(self, input_coords, input_shape=None):
"""Positionally encode points that are normalized to [0,1]."""
coordinates = input_coords.clone()
if input_shape is not None:
coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1]
coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0]
coordinates.to(torch.float32)
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coordinates = 2 * coordinates - 1
coordinates = coordinates.to(self.positional_embedding.dtype)
coordinates = coordinates @ self.positional_embedding
coordinates = 2 * np.pi * coordinates
# outputs d_1 x ... x d_n x channel shape
return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
class Sam2MaskEmbedding(SamMaskEmbedding):
pass
class Sam2PromptEncoder(SamPromptEncoder):
def __init__(self, config: Sam2PromptEncoderConfig):
SamPromptEncoder().__init__()
self.shared_embedding = Sam2PositionalEmbedding(config)
self.mask_embed = Sam2MaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size)
self.input_image_size = config.image_size
self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0)
labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(
labels[..., None] != -10,
point_embedding,
torch.zeros_like(point_embedding),
)
# Add point embeddings for labels >= 0
point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
batch_size, nb_boxes = boxes.shape[:2]
coords = boxes.reshape(batch_size, nb_boxes, 2, 2)
input_shape = (self.input_image_size, self.input_image_size)
corner_embedding = self.shared_embedding(coords, input_shape)
corner_embedding[:, :, 0, :] += self.point_embed.weight[2]
corner_embedding[:, :, 1, :] += self.point_embed.weight[3]
return corner_embedding
class Sam2Attention(nn.Module):
"""
SAM2's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
values.
"""
def __init__(self, config, downsample_rate=None):
super().__init__()
downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
self.config = config
self.hidden_size = config.hidden_size
self.internal_dim = config.hidden_size // downsample_rate
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.internal_dim // config.num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.v_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.o_proj = nn.Linear(self.internal_dim, self.hidden_size)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_similarity: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
# Input projections
batch_size, point_batch_size = query.shape[:2]
new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim)
query = self.q_proj(query).view(*new_shape).transpose(1, 2)
key = self.k_proj(key).view(*new_shape).transpose(1, 2)
value = self.v_proj(value).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=attention_similarity,
dropout=0.0 if not self.training else self.dropout_p,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(
batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim
).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Sam2TwoWayAttentionBlock(SamTwoWayAttentionBlock, GradientCheckpointingLayer):
def __init__(self, config: Sam2MaskDecoderConfig, skip_first_layer_pe: bool = False):
SamTwoWayAttentionBlock().__init__()
self.self_attn = Sam2Attention(config, downsample_rate=1)
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.cross_attn_token_to_image = Sam2Attention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = Sam2FeedForward(
config.hidden_size, config.mlp_dim, config.hidden_size, num_layers=config.num_hidden_layers
)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
self.layer_norm4 = nn.LayerNorm(config.hidden_size)
self.cross_attn_image_to_token = Sam2Attention(config)
self.skip_first_layer_pe = skip_first_layer_pe
class Sam2TwoWayTransformer(SamTwoWayTransformer):
pass
class Sam2LayerNorm(SamLayerNorm):
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_first"):
super().__init__()
class Sam2MaskDecoder(SamMaskDecoder):
def __init__(self, config: Sam2MaskDecoderConfig):
super().__init__(config)
del self.iou_prediction_head
self.iou_prediction_head = Sam2FeedForward(
self.hidden_size,
config.iou_head_hidden_dim,
self.num_mask_tokens,
config.iou_head_depth,
sigmoid_output=True,
)
self.conv_s0 = nn.Conv2d(config.hidden_size, config.hidden_size // 8, kernel_size=1, stride=1)
self.conv_s1 = nn.Conv2d(config.hidden_size, config.hidden_size // 4, kernel_size=1, stride=1)
self.obj_score_token = nn.Embedding(1, self.hidden_size)
self.pred_obj_score_head = Sam2FeedForward(self.hidden_size, self.hidden_size, 1, 3)
self.dynamic_multimask_via_stability = config.dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = config.dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = config.dynamic_multimask_stability_thresh
def _get_stability_scores(self, mask_logits):
"""
Compute stability scores of the mask logits based on the IoU between upper and
lower thresholds.
"""
mask_logits = mask_logits.flatten(-2)
stability_delta = self.dynamic_multimask_stability_delta
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
return stability_scores
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
"""
When outputting a single mask, if the stability score from the current single-mask
output (based on output token 0) falls below a threshold, we instead select from
multi-mask outputs (based on output token 1~3) the mask with the highest predicted
IoU score. This is intended to ensure a valid mask for both clicking and tracking.
"""
# The best mask from multimask output tokens (1~3)
multimask_logits = all_mask_logits[:, :, 1:, :, :]
multimask_iou_scores = all_iou_scores[:, :, 1:]
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) # [B, P]
best_scores_inds_expanded = best_scores_inds.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
best_scores_inds_expanded = best_scores_inds_expanded.expand(
-1, -1, 1, multimask_logits.size(-2), multimask_logits.size(-1)
)
best_multimask_logits = torch.gather(multimask_logits, 2, best_scores_inds_expanded) # [B, P, 1, H, W]
best_multimask_iou_scores = torch.gather(multimask_iou_scores, 2, best_scores_inds.unsqueeze(-1)) # [B, P, 1]
# The mask from singlemask output token 0 and its stability score
singlemask_logits = all_mask_logits[:, :, 0:1, :, :]
singlemask_iou_scores = all_iou_scores[:, :, 0:1]
stability_scores = self._get_stability_scores(singlemask_logits)
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
# Dynamically fall back to best multimask output upon low stability scores.
mask_logits_out = torch.where(
is_stable[..., None, None].expand_as(singlemask_logits),
singlemask_logits,
best_multimask_logits,
)
iou_scores_out = torch.where(
is_stable.expand_as(singlemask_iou_scores),
singlemask_iou_scores,
best_multimask_iou_scores,
)
return mask_logits_out, iou_scores_out
def forward(
self,
image_embeddings: torch.Tensor,
image_positional_embeddings: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
high_resolution_features: list[torch.Tensor],
attention_similarity: Optional[torch.Tensor] = None,
target_embedding: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Args:
image_embeddings (`torch.Tensor`):
The embeddings from the image encoder.
image_positional_embeddings (`torch.Tensor`):
Positional encoding with the shape of image_embeddings.
sparse_prompt_embeddings (`torch.Tensor`):
The embeddings of the points and boxes.
dense_prompt_embeddings (`torch.Tensor`):
The embeddings of the mask inputs.
multimask_output (`bool`):
Whether to return multiple masks or a single mask.
high_resolution_features (`list[torch.Tensor]`, *optional*):
The high-resolution features from the vision encoder.
attention_similarity (`torch.Tensor`, *optional*):
The attention similarity tensor.
target_embedding (`torch.Tensor`, *optional*):
The target embedding.
"""
batch_size, num_channels, height, width = image_embeddings.shape
point_batch_size = sparse_prompt_embeddings.shape[1]
# Concatenate output tokens
output_tokens = torch.cat(
[
self.obj_score_token.weight,
self.iou_token.weight,
self.mask_tokens.weight,
],
dim=0,
)
output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
if sparse_prompt_embeddings.shape[0] != 0:
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)
else:
tokens = output_tokens
point_embeddings = tokens.to(self.iou_token.weight.dtype)
# Expand per-image data in batch direction to be per-mask
image_embeddings = image_embeddings + dense_prompt_embeddings
image_embeddings = image_embeddings.repeat_interleave(point_batch_size, dim=0)
image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
# Run the transformer
point_embeddings, image_embeddings = self.transformer(
point_embeddings=point_embeddings,
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
iou_token_out = point_embeddings[:, :, 1, :]
mask_tokens_out = point_embeddings[:, :, 2 : (2 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
image_embeddings = image_embeddings.transpose(2, 3).view(
batch_size * point_batch_size, num_channels, height, width
)
feat_s0, feat_s1 = high_resolution_features
feat_s0 = feat_s0.repeat_interleave(point_batch_size, dim=0)
feat_s1 = feat_s1.repeat_interleave(point_batch_size, dim=0)
upscaled_embedding = self.upscale_conv1(image_embeddings) + feat_s1
upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding) + feat_s0)
hyper_in_list: list[torch.Tensor] = []
for i in range(self.num_mask_tokens):
current_mlp = self.output_hypernetworks_mlps[i]
hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
hyper_in = torch.stack(hyper_in_list, dim=2)
_, num_channels, height, width = upscaled_embedding.shape
upscaled_embedding = upscaled_embedding.view(batch_size, point_batch_size, num_channels, height * width)
masks = (hyper_in @ upscaled_embedding).view(batch_size, point_batch_size, -1, height, width)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
object_score_logits = self.pred_obj_score_head(point_embeddings[:, :, 0, :])
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
elif self.dynamic_multimask_via_stability and not self.training:
mask_slice = slice(0, 1)
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
else:
mask_slice = slice(0, 1)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
sam_tokens_out = mask_tokens_out[:, :, mask_slice] # [b, 3, c] shape
return masks, iou_pred, sam_tokens_out, object_score_logits
@auto_docstring(
custom_intro="""
Segment Anything Model 2 (SAM 2) for generating segmentation masks, given an input image and
input points and labels, boxes, or masks.
"""
)
class Sam2Model(SamModel):
_keys_to_ignore_on_load_unexpected = [
r"^memory_.*",
r"^mask_downsample.*",
r"^object_pointer_proj.*",
r"^temporal_positional_encoding_projection_layer.*",
"no_memory_positional_encoding",
"no_object_pointer",
"occlusion_spatial_embedding_parameter",
]
def __init__(self, config: Sam2Config):
SamModel().__init__(config)
self.shared_image_embedding = Sam2PositionalEmbedding(config.prompt_encoder_config)
self.vision_encoder = AutoModel.from_config(config.vision_config)
self.prompt_encoder = Sam2PromptEncoder(config.prompt_encoder_config)
# The module using it is not a PreTrainedModel subclass so we need this
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.mask_decoder = Sam2MaskDecoder(config.mask_decoder_config)
self.num_feature_levels = config.vision_config.num_feature_levels
self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes
# a single token to indicate no memory embedding from previous frames
self.hidden_dim = config.vision_config.fpn_hidden_size
self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
self.post_init()
def get_image_wide_positional_embeddings(self) -> torch.Tensor:
size = self.prompt_encoder.image_embedding_size
target_device = self.shared_image_embedding.positional_embedding.device
target_dtype = self.shared_image_embedding.positional_embedding.dtype
grid = torch.ones(size, device=target_device, dtype=target_dtype)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / size[0]
x_embed = x_embed / size[1]
positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))
return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width
@torch.no_grad()
def get_image_embeddings(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> list[torch.Tensor]:
r"""
Returns the image embeddings by passing the pixel values through the vision encoder.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Input pixel values
"""
batch_size = pixel_values.shape[0]
feature_maps, _, _, _ = self.get_image_features(pixel_values, **kwargs)
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
return image_embeddings
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[
list[torch.Tensor],
list[torch.Tensor],
Optional[tuple[torch.FloatTensor, ...]],
Optional[tuple[torch.FloatTensor, ...]],
]:
r"""
Extract and preprocess image features using the vision encoder.
Args:
pixel_values (`torch.FloatTensor`):
Input pixel values of shape `(batch_size, num_channels, height, width)`.
Returns:
`tuple`: A tuple containing:
- feature_maps (`list[torch.Tensor]`): List of feature maps from different levels.
- feature_maps_position_embeddings (`list[torch.Tensor]`): List of positional embeddings for each feature level.
- vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Hidden states from the vision encoder.
- vision_attentions (`tuple[torch.FloatTensor]`, *optional*): Attention weights from the vision encoder.
"""
vision_outputs: Sam2VisionEncoderOutput = self.vision_encoder(
pixel_values,
**kwargs,
)
feature_maps = vision_outputs.fpn_hidden_states
feature_maps_position_embeddings = vision_outputs.fpn_position_encoding
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
feature_maps = list(feature_maps)
feature_maps[0] = self.mask_decoder.conv_s0(feature_maps[0])
feature_maps[1] = self.mask_decoder.conv_s1(feature_maps[1])
# flatten NxCxHxW to HWxNxC
feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps]
feature_maps_position_embeddings = [
feature_map_position_embedding.flatten(2).permute(2, 0, 1)
for feature_map_position_embedding in feature_maps_position_embeddings
]
return feature_maps, feature_maps_position_embeddings, vision_outputs.hidden_states, vision_outputs.attentions
@check_model_inputs
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
multimask_output: bool = True,
attention_similarity: Optional[torch.FloatTensor] = None,
target_embedding: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Sam2ImageSegmentationOutput:
r"""
input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
better results. The points can be obtained by passing a list of list of list to the processor that will
create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
coordinates of the point. If a different number of points is passed either for each image, or for each
mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
computation of the embedding will be skipped for these points using the labels.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
official implementation, there are 3 types of labels
- `1`: the point is a point that contains the object of interest
- `0`: the point is a point that does not contain the object of interest
- `-1`: the point corresponds to the background
We added the label:
- `-10`: the point is a padding point, thus should be ignored by the prompt encoder
The padding labels should be automatically done by the processor.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
In the order (`x1`, `y1`, `x2`, `y2`):
- `x1`: the x coordinate of the top left point of the input box
- `y1`: the y coordinate of the top left point of the input box
- `x2`: the x coordinate of the bottom right point of the input box
- `y2`: the y coordinate of the bottom right point of the input box
input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory
efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
multimask_output (`bool`, *optional*):
In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
"best" mask, by specifying `multimask_output=False`.
attention_similarity (`torch.FloatTensor`, *optional*):
Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
target_embedding (`torch.FloatTensor`, *optional*):
Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoModel, AutoProcessor
>>> model = AutoModel.from_pretrained("danelcsb/sam2.1_hiera_tiny")
>>> processor = AutoProcessor.from_pretrained("danelcsb/sam2.1_hiera_tiny")
>>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
>>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
>>> input_points = [[[400, 650]]] # 2D location of a window on the car
>>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt")
>>> # Get segmentation mask
>>> outputs = model(**inputs)
>>> # Postprocess masks
>>> masks = processor.post_process_masks(
... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
... )
```
"""
if not ((pixel_values is None) ^ (image_embeddings is None)):
raise ValueError("Exactly one of pixel_values or image_embeddings must be provided.")
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
input_points.shape[1], input_boxes.shape[1]
)
)
image_positional_embeddings = self.get_image_wide_positional_embeddings()
# repeat with batch size
batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings[-1].shape[0]
image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
vision_attentions = None
vision_hidden_states = None
if pixel_values is not None:
feature_maps, _, vision_hidden_states, vision_attentions = self.get_image_features(
pixel_values,
**kwargs,
)
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
if input_points is not None and input_labels is None:
input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
if input_points is None and input_boxes is None:
# If no points are provide, pad with an empty point (with label -1)
input_points = torch.zeros(
batch_size, 1, 1, 2, dtype=image_embeddings[-1].dtype, device=image_embeddings[-1].device
)
input_labels = -torch.ones(batch_size, 1, 1, dtype=torch.int32, device=image_embeddings[-1].device)
if input_masks is not None:
# If mask_inputs is provided, downsize it into low-res mask input if needed
# and feed it as a dense mask prompt into the SAM mask encoder
if input_masks.shape[-2:] != self.prompt_encoder.mask_input_size:
input_masks = F.interpolate(
input_masks.float(),
size=self.prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(input_masks.dtype)
sparse_embeddings, dense_embeddings = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
low_res_multimasks, iou_scores, _, object_score_logits = self.mask_decoder(
image_embeddings=image_embeddings[-1],
image_positional_embeddings=image_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
high_resolution_features=image_embeddings[:-1],
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
return Sam2ImageSegmentationOutput(
iou_scores=iou_scores,
pred_masks=low_res_multimasks,
object_score_logits=object_score_logits,
image_embeddings=image_embeddings,
vision_hidden_states=vision_hidden_states,
vision_attentions=vision_attentions,
)
__all__ = [
"Sam2Model",
"Sam2VisionModel",
"Sam2PreTrainedModel",
"Sam2ImageProcessorFast",
"Sam2HieraDetModel",
]
| transformers/src/transformers/models/sam2/modular_sam2.py/0 | {
"file_path": "transformers/src/transformers/models/sam2/modular_sam2.py",
"repo_id": "transformers",
"token_count": 27676
} | 478 |
# coding=utf-8
# Copyright 2021 NVIDIA and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SegFormer model configuration"""
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class SegformerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SegformerModel`]. It is used to instantiate an
SegFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SegFormer
[nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability before the classification head.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 256):
The dimension of the all-MLP decode head.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import SegformerModel, SegformerConfig
>>> # Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> configuration = SegformerConfig()
>>> # Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> model = SegformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "segformer"
def __init__(
self,
num_channels=3,
num_encoder_blocks=4,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
hidden_sizes=[32, 64, 160, 256],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
num_attention_heads=[1, 2, 5, 8],
mlp_ratios=[4, 4, 4, 4],
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
classifier_dropout_prob=0.1,
initializer_range=0.02,
drop_path_rate=0.1,
layer_norm_eps=1e-6,
decoder_hidden_size=256,
semantic_loss_ignore_index=255,
**kwargs,
):
super().__init__(**kwargs)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True.",
FutureWarning,
)
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.depths = depths
self.sr_ratios = sr_ratios
self.hidden_sizes = hidden_sizes
self.patch_sizes = patch_sizes
self.strides = strides
self.mlp_ratios = mlp_ratios
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.drop_path_rate = drop_path_rate
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.reshape_last_stage = kwargs.get("reshape_last_stage", True)
self.semantic_loss_ignore_index = semantic_loss_ignore_index
class SegformerOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.11")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-4
@property
def default_onnx_opset(self) -> int:
return 12
__all__ = ["SegformerConfig", "SegformerOnnxConfig"]
| transformers/src/transformers/models/segformer/configuration_segformer.py/0 | {
"file_path": "transformers/src/transformers/models/segformer/configuration_segformer.py",
"repo_id": "transformers",
"token_count": 2903
} | 479 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/sew/modular_sew.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_sew.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from .modeling_sew import SEWFeatureEncoder
class SEWFeatureExtractor(SEWFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
| transformers/src/transformers/models/sew/feature_extraction_sew.py/0 | {
"file_path": "transformers/src/transformers/models/sew/feature_extraction_sew.py",
"repo_id": "transformers",
"token_count": 831
} | 480 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for SigLIP."""
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
)
from ...utils import auto_docstring
@auto_docstring
class SiglipImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
default_to_square = False
do_resize = True
do_rescale = True
do_normalize = True
__all__ = ["SiglipImageProcessorFast"]
| transformers/src/transformers/models/siglip/image_processing_siglip_fast.py/0 | {
"file_path": "transformers/src/transformers/models/siglip/image_processing_siglip_fast.py",
"repo_id": "transformers",
"token_count": 404
} | 481 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from torch import nn
from transformers import Speech2TextConfig, Speech2TextForConditionalGeneration
def remove_ignore_keys_(state_dict):
ignore_keys = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(k, None)
def rename_keys(s_dict):
keys = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
s_dict[key.replace("transformer_layers", "layers")] = s_dict.pop(key)
elif "subsample" in key:
s_dict[key.replace("subsample", "conv")] = s_dict.pop(key)
def make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
def convert_fairseq_s2t_checkpoint_to_tfms(checkpoint_path, pytorch_dump_folder_path):
m2m_100 = torch.load(checkpoint_path, map_location="cpu", weights_only=True)
args = m2m_100["args"]
state_dict = m2m_100["model"]
lm_head_weights = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(state_dict)
rename_keys(state_dict)
vocab_size = state_dict["decoder.embed_tokens.weight"].shape[0]
tie_embeds = args.share_decoder_input_output_embed
conv_kernel_sizes = [int(i) for i in args.conv_kernel_sizes.split(",")]
config = Speech2TextConfig(
vocab_size=vocab_size,
max_source_positions=args.max_source_positions,
max_target_positions=args.max_target_positions,
encoder_layers=args.encoder_layers,
decoder_layers=args.decoder_layers,
encoder_attention_heads=args.encoder_attention_heads,
decoder_attention_heads=args.decoder_attention_heads,
encoder_ffn_dim=args.encoder_ffn_embed_dim,
decoder_ffn_dim=args.decoder_ffn_embed_dim,
d_model=args.encoder_embed_dim,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_function="relu",
num_conv_layers=len(conv_kernel_sizes),
conv_channels=args.conv_channels,
conv_kernel_sizes=conv_kernel_sizes,
input_feat_per_channel=args.input_feat_per_channel,
input_channels=args.input_channels,
tie_word_embeddings=tie_embeds,
num_beams=5,
max_length=200,
use_cache=True,
decoder_start_token_id=2,
early_stopping=True,
)
model = Speech2TextForConditionalGeneration(config)
missing, unexpected = model.model.load_state_dict(state_dict, strict=False)
if len(missing) > 0 and not set(missing) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}"
)
if tie_embeds:
model.lm_head = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
model.lm_head.weight.data = lm_head_weights
model.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
args = parser.parse_args()
convert_fairseq_s2t_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| transformers/src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py/0 | {
"file_path": "transformers/src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py",
"repo_id": "transformers",
"token_count": 1830
} | 482 |
# coding=utf-8
# Copyright 2024 the Fast authors and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TextNet model configuration"""
from transformers import PretrainedConfig
from transformers.utils import logging
from transformers.utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
class TextNetConfig(BackboneConfigMixin, PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`TextNextModel`]. It is used to instantiate a
TextNext model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[czczup/textnet-base](https://huggingface.co/czczup/textnet-base). Configuration objects inherit from
[`PretrainedConfig`] and can be used to control the model outputs.Read the documentation from [`PretrainedConfig`]
for more information.
Args:
stem_kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the initial convolution layer.
stem_stride (`int`, *optional*, defaults to 2):
The stride for the initial convolution layer.
stem_num_channels (`int`, *optional*, defaults to 3):
The num of channels in input for the initial convolution layer.
stem_out_channels (`int`, *optional*, defaults to 64):
The num of channels in out for the initial convolution layer.
stem_act_func (`str`, *optional*, defaults to `"relu"`):
The activation function for the initial convolution layer.
image_size (`tuple[int, int]`, *optional*, defaults to `[640, 640]`):
The size (resolution) of each image.
conv_layer_kernel_sizes (`list[list[list[int]]]`, *optional*):
A list of stage-wise kernel sizes. If `None`, defaults to:
`[[[3, 3], [3, 3], [3, 3]], [[3, 3], [1, 3], [3, 3], [3, 1]], [[3, 3], [3, 3], [3, 1], [1, 3]], [[3, 3], [3, 1], [1, 3], [3, 3]]]`.
conv_layer_strides (`list[list[int]]`, *optional*):
A list of stage-wise strides. If `None`, defaults to:
`[[1, 2, 1], [2, 1, 1, 1], [2, 1, 1, 1], [2, 1, 1, 1]]`.
hidden_sizes (`list[int]`, *optional*, defaults to `[64, 64, 128, 256, 512]`):
Dimensionality (hidden size) at each stage.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage.
Examples:
```python
>>> from transformers import TextNetConfig, TextNetBackbone
>>> # Initializing a TextNetConfig
>>> configuration = TextNetConfig()
>>> # Initializing a model (with random weights)
>>> model = TextNetBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "textnet"
def __init__(
self,
stem_kernel_size=3,
stem_stride=2,
stem_num_channels=3,
stem_out_channels=64,
stem_act_func="relu",
image_size=[640, 640],
conv_layer_kernel_sizes=None,
conv_layer_strides=None,
hidden_sizes=[64, 64, 128, 256, 512],
batch_norm_eps=1e-5,
initializer_range=0.02,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
if conv_layer_kernel_sizes is None:
conv_layer_kernel_sizes = [
[[3, 3], [3, 3], [3, 3]],
[[3, 3], [1, 3], [3, 3], [3, 1]],
[[3, 3], [3, 3], [3, 1], [1, 3]],
[[3, 3], [3, 1], [1, 3], [3, 3]],
]
if conv_layer_strides is None:
conv_layer_strides = [[1, 2, 1], [2, 1, 1, 1], [2, 1, 1, 1], [2, 1, 1, 1]]
self.stem_kernel_size = stem_kernel_size
self.stem_stride = stem_stride
self.stem_num_channels = stem_num_channels
self.stem_out_channels = stem_out_channels
self.stem_act_func = stem_act_func
self.image_size = image_size
self.conv_layer_kernel_sizes = conv_layer_kernel_sizes
self.conv_layer_strides = conv_layer_strides
self.initializer_range = initializer_range
self.hidden_sizes = hidden_sizes
self.batch_norm_eps = batch_norm_eps
self.depths = [len(layer) for layer in self.conv_layer_kernel_sizes]
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, 5)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["TextNetConfig"]
| transformers/src/transformers/models/textnet/configuration_textnet.py/0 | {
"file_path": "transformers/src/transformers/models/textnet/configuration_textnet.py",
"repo_id": "transformers",
"token_count": 2473
} | 483 |
# coding=utf-8
# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch TimeSformer model."""
import collections
from typing import Optional, Union
import torch
import torch.nn.functional
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...utils import (
auto_docstring,
logging,
)
from .configuration_timesformer import TimesformerConfig
logger = logging.get_logger(__name__)
# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L155
class TimesformerPatchEmbeddings(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, config):
super().__init__()
image_size = config.image_size
patch_size = config.patch_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values):
batch_size, num_frames, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width)
embeddings = self.projection(pixel_values)
patch_width = embeddings.size(-1)
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, num_frames, patch_width
class TimesformerEmbeddings(nn.Module):
"""
Construct the patch and position embeddings.
"""
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
num_frames = config.num_frames
drop_rate = config.hidden_dropout_prob
attention_type = config.attention_type
self.attention_type = attention_type
self.patch_embeddings = TimesformerPatchEmbeddings(config)
self.num_patches = self.patch_embeddings.num_patches
# Positional Embeddings
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if attention_type != "space_only":
self.time_embeddings = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
self.time_drop = nn.Dropout(p=drop_rate)
def forward(self, pixel_values):
batch_size = pixel_values.shape[0]
# create patch embeddings
embeddings, num_frames, patch_width = self.patch_embeddings(pixel_values)
cls_tokens = self.cls_token.expand(embeddings.size(0), -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# resizing the positional embeddings in case they don't match the input at inference
if embeddings.size(1) != self.position_embeddings.size(1):
position_embeddings = self.position_embeddings
cls_pos_embed = position_embeddings[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = position_embeddings[0, 1:, :].unsqueeze(0).transpose(1, 2)
patch_num = int(other_pos_embed.size(2) ** 0.5)
patch_height = embeddings.size(1) // patch_width
other_pos_embed = other_pos_embed.reshape(1, embeddings.size(2), patch_num, patch_num)
new_pos_embed = nn.functional.interpolate(
other_pos_embed, size=(patch_height, patch_width), mode="nearest"
)
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
embeddings = embeddings + new_pos_embed
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.pos_drop(embeddings)
# Time Embeddings
if self.attention_type != "space_only":
cls_tokens = embeddings[:batch_size, 0, :].unsqueeze(1)
embeddings = embeddings[:, 1:]
_, patch_height, patch_width = embeddings.shape
embeddings = (
embeddings.reshape(batch_size, num_frames, patch_height, patch_width)
.permute(0, 2, 1, 3)
.reshape(batch_size * patch_height, num_frames, patch_width)
)
# Resizing time embeddings in case they don't match
if num_frames != self.time_embeddings.size(1):
time_embeddings = self.time_embeddings.transpose(1, 2)
new_time_embeddings = nn.functional.interpolate(time_embeddings, size=(num_frames), mode="nearest")
new_time_embeddings = new_time_embeddings.transpose(1, 2)
embeddings = embeddings + new_time_embeddings
else:
embeddings = embeddings + self.time_embeddings
embeddings = self.time_drop(embeddings)
embeddings = embeddings.view(batch_size, patch_height, num_frames, patch_width).reshape(
batch_size, patch_height * num_frames, patch_width
)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
return embeddings
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->TimeSformer
class TimeSformerDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L57
class TimesformerSelfAttention(nn.Module):
def __init__(self, config: TimesformerConfig):
super().__init__()
num_heads = config.num_attention_heads
qkv_bias = config.qkv_bias
attention_dropout_prob = config.attention_probs_dropout_prob
self.num_heads = num_heads
head_dim = config.hidden_size // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attention_dropout_prob)
def forward(self, hidden_states, output_attentions: bool = False):
batch_size, hidden_size, num_channels = hidden_states.shape
qkv = (
self.qkv(hidden_states)
.reshape(batch_size, hidden_size, 3, self.num_heads, num_channels // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
query, key, value = qkv[0], qkv[1], qkv[2]
attention_probs = (query @ key.transpose(-2, -1)) * self.scale
attention_probs = attention_probs.softmax(dim=-1)
attention_probs = self.attn_drop(attention_probs)
context_layer = (attention_probs @ value).transpose(1, 2).reshape(batch_size, hidden_size, num_channels)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TimesformerSelfOutput(nn.Module):
"""
The residual connection is defined in TimesformerLayer instead of here (as is the case with other models), due to
the layernorm applied before each block.
"""
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class TimeSformerAttention(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.attention = TimesformerSelfAttention(config)
self.output = TimesformerSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
self_outputs = self.attention(hidden_states, output_attentions)
attention_output = self.output(self_outputs[0])
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L39
class TimesformerIntermediate(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class TimesformerOutput(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L89
class TimesformerLayer(GradientCheckpointingLayer):
def __init__(self, config: TimesformerConfig, layer_index: int) -> None:
super().__init__()
attention_type = config.attention_type
drop_path_rates = [
x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers, device="cpu")
] # stochastic depth decay rule
drop_path_rate = drop_path_rates[layer_index]
self.drop_path = TimeSformerDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.attention = TimeSformerAttention(config)
self.intermediate = TimesformerIntermediate(config)
self.output = TimesformerOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.config = config
self.attention_type = attention_type
if attention_type not in ["divided_space_time", "space_only", "joint_space_time"]:
raise ValueError(f"Unknown attention type: {attention_type}")
# Temporal Attention Parameters
if self.attention_type == "divided_space_time":
self.temporal_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.temporal_attention = TimeSformerAttention(config)
self.temporal_dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False):
num_frames = self.config.num_frames
num_patch_width = self.config.image_size // self.config.patch_size
batch_size = hidden_states.shape[0]
num_spatial_tokens = (hidden_states.size(1) - 1) // num_frames
num_patch_height = num_spatial_tokens // num_patch_width
if self.attention_type in ["space_only", "joint_space_time"]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), output_attentions=output_attentions
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
hidden_states = hidden_states + self.drop_path(attention_output)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(layer_output)
outputs = (layer_output,) + outputs
return outputs
elif self.attention_type == "divided_space_time":
# Temporal
temporal_embedding = hidden_states[:, 1:, :]
temporal_embedding = temporal_embedding.reshape(
batch_size, num_patch_height, num_patch_width, num_frames, temporal_embedding.shape[2]
).reshape(batch_size * num_patch_height * num_patch_width, num_frames, temporal_embedding.shape[2])
temporal_attention_outputs = self.temporal_attention(
self.temporal_layernorm(temporal_embedding),
)
attention_output = temporal_attention_outputs[0]
residual_temporal = self.drop_path(attention_output)
residual_temporal = residual_temporal.reshape(
batch_size, num_patch_height, num_patch_width, num_frames, residual_temporal.shape[2]
).reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_temporal.shape[2])
residual_temporal = self.temporal_dense(residual_temporal)
temporal_embedding = hidden_states[:, 1:, :] + residual_temporal
# Spatial
init_cls_token = hidden_states[:, 0, :].unsqueeze(1)
cls_token = init_cls_token.repeat(1, num_frames, 1)
cls_token = cls_token.reshape(batch_size * num_frames, 1, cls_token.shape[2])
spatial_embedding = temporal_embedding
spatial_embedding = (
spatial_embedding.reshape(
batch_size, num_patch_height, num_patch_width, num_frames, spatial_embedding.shape[2]
)
.permute(0, 3, 1, 2, 4)
.reshape(batch_size * num_frames, num_patch_height * num_patch_width, spatial_embedding.shape[2])
)
spatial_embedding = torch.cat((cls_token, spatial_embedding), 1)
spatial_attention_outputs = self.attention(
self.layernorm_before(spatial_embedding), output_attentions=output_attentions
)
attention_output = spatial_attention_outputs[0]
outputs = spatial_attention_outputs[1:] # add self attentions if we output attention weights
residual_spatial = self.drop_path(attention_output)
# Taking care of CLS token
cls_token = residual_spatial[:, 0, :]
cls_token = cls_token.reshape(batch_size, num_frames, cls_token.shape[1])
cls_token = torch.mean(cls_token, 1, True) # averaging for every frame
residual_spatial = residual_spatial[:, 1:, :]
residual_spatial = (
residual_spatial.reshape(
batch_size, num_frames, num_patch_height, num_patch_width, residual_spatial.shape[2]
)
.permute(0, 2, 3, 1, 4)
.reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_spatial.shape[2])
)
residual = residual_spatial
hidden_states = temporal_embedding
# Mlp
hidden_states = torch.cat((init_cls_token, hidden_states), 1) + torch.cat((cls_token, residual), 1)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(layer_output)
outputs = (layer_output,) + outputs
return outputs
class TimesformerEncoder(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([TimesformerLayer(config, ind) for ind in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
class TimesformerPreTrainedModel(PreTrainedModel):
config: TimesformerConfig
base_model_prefix = "timesformer"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["TimesformerLayer"]
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.trunc_normal_(module.weight, std=self.config.initializer_range)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.bias, 0)
nn.init.constant_(module.weight, 1.0)
elif isinstance(module, TimesformerEmbeddings):
nn.init.trunc_normal_(module.cls_token, std=self.config.initializer_range)
nn.init.trunc_normal_(module.position_embeddings, std=self.config.initializer_range)
module.patch_embeddings.apply(self._init_weights)
@auto_docstring
class TimesformerModel(TimesformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = TimesformerEmbeddings(config)
self.encoder = TimesformerEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], BaseModelOutput]:
r"""
Examples:
```python
>>> import av
>>> import numpy as np
>>> from transformers import AutoImageProcessor, TimesformerModel
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
>>> model = TimesformerModel.from_pretrained("facebook/timesformer-base-finetuned-k400")
>>> # prepare video for the model
>>> inputs = image_processor(list(video), return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 1569, 768]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if self.layernorm is not None:
sequence_output = self.layernorm(sequence_output)
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
TimeSformer Model transformer with a video classification head on top (a linear layer on top of the final hidden state
of the [CLS] token) e.g. for ImageNet.
"""
)
class TimesformerForVideoClassification(TimesformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.timesformer = TimesformerModel(config)
# Classifier head
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import av
>>> import torch
>>> import numpy as np
>>> from transformers import AutoImageProcessor, TimesformerForVideoClassification
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
>>> model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400")
>>> inputs = image_processor(list(video), return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
... logits = outputs.logits
>>> # model predicts one of the 400 Kinetics-400 classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
eating spaghetti
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.timesformer(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0][:, 0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel"]
| transformers/src/transformers/models/timesformer/modeling_timesformer.py/0 | {
"file_path": "transformers/src/transformers/models/timesformer/modeling_timesformer.py",
"repo_id": "transformers",
"token_count": 14136
} | 484 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for TVP."""
from typing import Optional, Union
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ImageInput,
PILImageResampling,
SizeDict,
make_nested_list_of_images,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
)
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
class TvpFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
r"""
do_flip_channel_order (`bool`, *optional*):
Whether to flip the channel order of the image from RGB to BGR.
do_pad (`bool`, *optional*):
Whether to pad the image.
pad_size (`Dict[str, int]` or `SizeDict`, *optional*):
Size dictionary specifying the desired height and width for padding.
constant_values (`float` or `List[float]`, *optional*):
Value used to fill the padding area when `pad_mode` is `'constant'`.
pad_mode (`str`, *optional*):
Padding mode to use — `'constant'`, `'edge'`, `'reflect'`, or `'symmetric'`.
"""
do_flip_channel_order: Optional[bool]
do_pad: Optional[bool]
pad_size: Optional[SizeDict]
constant_values: Optional[Union[float, list[float]]]
pad_mode: Optional[str]
@auto_docstring
class TvpImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"longest_edge": 448}
default_to_square = False
crop_size = {"height": 448, "width": 448}
do_resize = True
do_center_crop = True
do_rescale = True
rescale_factor = 1 / 255
do_pad = True
pad_size = {"height": 448, "width": 448}
constant_values = 0
pad_mode = "constant"
do_normalize = True
do_flip_channel_order = True
valid_kwargs = TvpFastImageProcessorKwargs
def __init__(self, **kwargs: Unpack[TvpFastImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(
self,
videos: Union[ImageInput, list[ImageInput], list[list[ImageInput]]],
**kwargs: Unpack[TvpFastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess videos using the fast image processor.
"""
return super().preprocess(videos, **kwargs)
def _further_process_kwargs(
self,
pad_size: Optional[SizeDict] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if pad_size is not None:
pad_size = SizeDict(**get_size_dict(pad_size, param_name="pad_size"))
kwargs["pad_size"] = pad_size
return super()._further_process_kwargs(**kwargs)
def _prepare_images_structure(
self,
images: ImageInput,
**kwargs,
) -> ImageInput:
"""
Prepare the images structure for processing.
Args:
images (`ImageInput`):
The input images to process.
Returns:
`ImageInput`: The images with a valid nesting.
"""
return make_nested_list_of_images(images, **kwargs)
def _pad_frames(
self,
frames: "torch.Tensor",
pad_size: Union[SizeDict, dict],
constant_values: Union[float, list[float]],
pad_mode: str,
) -> "torch.Tensor":
"""Pad frames to the specified size."""
height, width = pad_size.height, pad_size.width
if frames.shape[-2:] == (height, width):
return frames
# Calculate padding
current_height, current_width = frames.shape[-2:]
pad_bottom = height - current_height
pad_right = width - current_width
if pad_bottom < 0 or pad_right < 0:
raise ValueError("The padding size must be greater than frame size")
# Apply padding
padding = [0, 0, pad_right, pad_bottom] # [left, top, right, bottom]
return F.pad(frames, padding, fill=constant_values, padding_mode=pad_mode)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: "F.InterpolationMode" = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image to the specified size.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict` or `dict`):
Size dictionary. If `size` has `longest_edge`, resize the longest edge to that value
while maintaining aspect ratio. Otherwise, use the base class resize method.
interpolation (`F.InterpolationMode`, *optional*):
Interpolation method to use.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing.
Returns:
`torch.Tensor`: The resized image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
# Handle longest_edge case (TVP-specific)
if size.longest_edge:
# Get current dimensions
current_height, current_width = image.shape[-2:]
# Calculate new dimensions maintaining aspect ratio
if current_height >= current_width:
ratio = current_width * 1.0 / current_height
new_height = size.longest_edge
new_width = int(new_height * ratio)
else:
ratio = current_height * 1.0 / current_width
new_width = size.longest_edge
new_height = int(new_width * ratio)
return super().resize(
image, SizeDict(height=new_height, width=new_width), interpolation=interpolation, antialias=antialias
)
# Use base class resize method for other cases
return super().resize(image, size, interpolation, antialias, **kwargs)
def _flip_channel_order(self, frames: "torch.Tensor") -> "torch.Tensor":
"""
Flip channel order from RGB to BGR.
The slow processor puts the red channel at the end (BGR format),
but the channel order is different. We need to match the exact
channel order of the slow processor:
Slow processor:
- Channel 0: Blue (originally Red)
- Channel 1: Green
- Channel 2: Red (originally Blue)
"""
# Assuming frames are in channels_first format (..., C, H, W)
frames = frames.flip(-3)
return frames
def _preprocess(
self,
images: list[list["torch.Tensor"]],
do_resize: bool,
size: Union[SizeDict, dict],
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: Union[SizeDict, dict],
do_rescale: bool,
rescale_factor: float,
do_pad: bool,
pad_size: Union[SizeDict, dict],
constant_values: Union[float, list[float]],
pad_mode: str,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_flip_channel_order: bool,
return_tensors: Optional[Union[str, TensorType]],
disable_grouping: Optional[bool],
**kwargs,
) -> BatchFeature:
"""
Preprocess videos using the fast image processor.
This method processes each video frame through the same pipeline as the original
TVP image processor but uses torchvision operations for better performance.
"""
grouped_images, grouped_images_index = group_images_by_shape(
images, disable_grouping=disable_grouping, is_nested=True
)
processed_images_grouped = {}
for shape, stacked_frames in grouped_images.items():
# Resize if needed
if do_resize:
stacked_frames = self.resize(stacked_frames, size, interpolation)
# Center crop if needed
if do_center_crop:
stacked_frames = self.center_crop(stacked_frames, crop_size)
# Rescale and normalize using fused method for consistency
stacked_frames = self.rescale_and_normalize(
stacked_frames, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
# Pad if needed
if do_pad:
stacked_frames = self._pad_frames(stacked_frames, pad_size, constant_values, pad_mode)
# Flip channel order if needed (RGB to BGR)
if do_flip_channel_order:
stacked_frames = self._flip_channel_order(stacked_frames)
processed_images_grouped[shape] = stacked_frames
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
if return_tensors == "pt":
processed_images = [torch.stack(images, dim=0) for images in processed_images]
processed_images = torch.stack(processed_images, dim=0)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["TvpImageProcessorFast"]
| transformers/src/transformers/models/tvp/image_processing_tvp_fast.py/0 | {
"file_path": "transformers/src/transformers/models/tvp/image_processing_tvp_fast.py",
"repo_id": "transformers",
"token_count": 4344
} | 485 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert UniSpeech checkpoint."""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
Wav2Vec2FeatureExtractor,
Wav2Vec2PhonemeCTCTokenizer,
Wav2Vec2Processor,
logging,
)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
MAPPING = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
TOP_LEVEL_KEYS = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def set_recursively(hf_pointer, key, value, full_name, weight_type, is_finetuned):
for attribute in key.split("."):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
attribute = "lm_head"
hf_pointer = getattr(hf_pointer, attribute)
if weight_type is not None:
hf_shape = getattr(hf_pointer, weight_type).shape
else:
hf_shape = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
hf_pointer.weight.data = value
elif weight_type == "weight_g":
hf_pointer.weight_g.data = value
elif weight_type == "weight_v":
hf_pointer.weight_v.data = value
elif weight_type == "bias":
hf_pointer.bias.data = value
else:
hf_pointer.data = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
is_used = False
if "conv_layers" in name:
load_conv_layer(
name,
value,
feature_extractor,
unused_weights,
hf_model.config.feat_extract_norm == "group",
)
is_used = True
else:
for key, mapped_key in MAPPING.items():
mapped_key = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
is_used = True
if "*" in mapped_key:
layer_index = name.split(key)[0].split(".")[-2]
mapped_key = mapped_key.replace("*", layer_index)
if "weight_g" in name:
weight_type = "weight_g"
elif "weight_v" in name:
weight_type = "weight_v"
elif "bias" in name:
weight_type = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
weight_type = "weight"
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type, is_finetuned)
continue
if not is_used:
unused_weights.append(name)
logger.warning(f"Unused weights: {unused_weights}")
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
name = full_name.split("conv_layers.")[-1]
items = name.split(".")
layer_id = int(items[0])
type_id = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].conv.bias.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].conv.weight.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(full_name)
@torch.no_grad()
def convert_unispeech_checkpoint(
checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if config_path is not None:
config = UniSpeechConfig.from_pretrained(config_path)
else:
config = UniSpeechConfig()
if is_finetuned:
if dict_path:
target_dict = Dictionary.load_from_json(dict_path)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
config.bos_token_id = target_dict.pad_index
config.pad_token_id = target_dict.bos_index
config.eos_token_id = target_dict.eos_index
config.vocab_size = len(target_dict.symbols)
vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
if not os.path.isdir(pytorch_dump_folder_path):
logger.error(f"--pytorch_dump_folder_path ({pytorch_dump_folder_path}) should be a directory")
return
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
vocab_dict = target_dict.indices
# fairseq has the <pad> and <s> switched
vocab_dict["<pad>"] = 42
vocab_dict["<s>"] = 43
with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
json.dump(vocab_dict, vocab_handle)
tokenizer = Wav2Vec2PhonemeCTCTokenizer(
vocab_path,
unk_token=target_dict.unk_word,
pad_token=target_dict.pad_word,
bos_token=target_dict.bos_word,
eos_token=target_dict.eos_word,
word_delimiter_token="|",
do_lower_case=False,
)
return_attention_mask = config.feat_extract_norm == "layer"
feature_extractor = Wav2Vec2FeatureExtractor(
feature_size=1,
sampling_rate=16000,
padding_value=0,
do_normalize=True,
return_attention_mask=return_attention_mask,
)
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(pytorch_dump_folder_path)
hf_unispeech = UniSpeechForCTC(config)
else:
hf_unispeech = UniSpeechForPreTraining(config)
if is_finetuned:
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1]), "w2v_path": checkpoint_path}
)
else:
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
model = model[0].eval()
recursively_load_weights(model, hf_unispeech, is_finetuned)
hf_unispeech.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
args = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| transformers/src/transformers/models/unispeech/convert_unispeech_original_pytorch_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/unispeech/convert_unispeech_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 5195
} | 486 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ConvNext + UperNet checkpoints from mmsegmentation."""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def get_upernet_config(model_name):
auxiliary_in_channels = 384
if "tiny" in model_name:
depths = [3, 3, 9, 3]
hidden_sizes = [96, 192, 384, 768]
if "small" in model_name:
depths = [3, 3, 27, 3]
hidden_sizes = [96, 192, 384, 768]
if "base" in model_name:
depths = [3, 3, 27, 3]
hidden_sizes = [128, 256, 512, 1024]
auxiliary_in_channels = 512
if "large" in model_name:
depths = [3, 3, 27, 3]
hidden_sizes = [192, 384, 768, 1536]
auxiliary_in_channels = 768
if "xlarge" in model_name:
depths = [3, 3, 27, 3]
hidden_sizes = [256, 512, 1024, 2048]
auxiliary_in_channels = 1024
# set label information
num_labels = 150
repo_id = "huggingface/label-files"
filename = "ade20k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
backbone_config = ConvNextConfig(
depths=depths, hidden_sizes=hidden_sizes, out_features=["stage1", "stage2", "stage3", "stage4"]
)
config = UperNetConfig(
backbone_config=backbone_config,
auxiliary_in_channels=auxiliary_in_channels,
num_labels=num_labels,
id2label=id2label,
label2id=label2id,
)
return config
# here we list all keys to be renamed (original name on the left, our name on the right)
def create_rename_keys(config):
rename_keys = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight"))
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias"))
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight"))
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter"))
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias"))
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias"))
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight"))
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias"))
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight"))
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias"))
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight"))
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias"))
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
]
)
# fmt: on
return rename_keys
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
def convert_upernet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
model_name_to_url = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
checkpoint_url = model_name_to_url[model_name]
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"]
config = get_upernet_config(model_name)
model = UperNetForSemanticSegmentation(config)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy():
val = state_dict.pop(key)
if "bn" in key:
key = key.replace("bn", "batch_norm")
state_dict[key] = val
# rename keys
rename_keys = create_rename_keys(config)
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
model.load_state_dict(state_dict)
# verify on image
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
processor = SegformerImageProcessor()
pixel_values = processor(image, return_tensors="pt").pixel_values
with torch.no_grad():
outputs = model(pixel_values)
if model_name == "upernet-convnext-tiny":
expected_slice = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]
)
elif model_name == "upernet-convnext-small":
expected_slice = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]]
)
elif model_name == "upernet-convnext-base":
expected_slice = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]]
)
elif model_name == "upernet-convnext-large":
expected_slice = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]]
)
elif model_name == "upernet-convnext-xlarge":
expected_slice = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]]
)
print("Logits:", outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub")
model.push_to_hub(f"openmmlab/{model_name}")
processor.push_to_hub(f"openmmlab/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
args = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| transformers/src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py",
"repo_id": "transformers",
"token_count": 4519
} | 487 |
# coding=utf-8
# Copyright 2022 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to support TF Vision-Encoder-Text-Decoder architectures"""
from __future__ import annotations
import re
import warnings
import numpy as np
import tensorflow as tf
from ...configuration_utils import PretrainedConfig
from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, get_initializer, keras, unpack_inputs
from ...tf_utils import shape_list
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ..auto.configuration_auto import AutoConfig
from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "VisionEncoderDecoderConfig"
DEPRECATION_WARNING = (
"Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
" encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
" fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
" labels, no need to pass them yourself anymore."
)
VISION_ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model
as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via
[`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
generative task, like image captioning.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
Tasks](https://huggingface.co/papers/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
Additionally, in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained
Models](https://huggingface.co/papers/2109.10282) it is shown how leveraging large pretrained vision models for optical
character recognition (OCR) yields a significant performance improvement.
After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any
other models (see the examples for more information).
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
Parameters:
config ([`VisionEncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
VISION_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using the vision's model's image processor. For example, using
[`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details.
decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
Provide for sequence to sequence training to the decoder. Indices can be obtained using
[`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `({0})`.
decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
- With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
"""
# Copied from transformers.models.encoder_decoder.modeling_tf_encoder_decoder.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
if pad_token_id is None:
raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
if decoder_start_token_id is None:
raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
@add_start_docstrings(VISION_ENCODER_DECODER_START_DOCSTRING)
class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
r"""
[`TFVisionEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture
with one of the base vision model classes of the library as encoder and another one of the base model classes as
decoder when created with the [`~TFAutoModel.from_pretrained`] class method for the encoder and
[`~TFAutoModelForCausalLM.from_pretrained`] class method for the decoder.
"""
config_class = VisionEncoderDecoderConfig
base_model_prefix = "vision_encoder_decoder"
load_weight_prefix = "tf_vision_encoder_decoder_model"
main_input_name = "pixel_values"
def __init__(
self,
config: PretrainedConfig | None = None,
encoder: TFPreTrainedModel | None = None,
decoder: TFPreTrainedModel | None = None,
):
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
" `config.encoder.hidden_size`."
)
# initialize with config
super().__init__(config)
if encoder is None:
encoder = TFAutoModel.from_config(config.encoder, name="encoder")
if decoder is None:
decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
f" {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
f" {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = keras.layers.Dense(
units=self.decoder.config.hidden_size,
kernel_initializer=get_initializer(config.encoder.initializer_range),
name="enc_to_dec_proj",
)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
@property
def input_signature(self):
vision_config = self.config.encoder
if hasattr(vision_config, "vision_config"):
vision_config = vision_config.vision_config
if hasattr(vision_config, "image_size"):
image_size = vision_config.image_size
else:
image_size = vision_config.input_size
return {
"pixel_values": tf.TensorSpec(
shape=(
None,
vision_config.num_channels,
image_size,
image_size,
),
dtype=tf.float32,
),
"decoder_input_ids": tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="decoder_input_ids"),
}
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
def tf_to_pt_weight_rename(self, tf_weight):
# Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models
# (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal.
# However, the name of that extra layer is the name of the MainLayer in the base model. We make the assumption
# here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's
# not the case, and I wasn't sure how else to go from the config to the correct MainLayer name!
# This override is only needed in the case where we're crossloading weights from PT. However, since weights are
# often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file.
# Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it
# or not.
encoder_model_type = self.config.encoder.model_type
if "encoder" in tf_weight and "decoder" not in tf_weight:
return (re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight),)
else:
return (tf_weight,)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str | None = None,
decoder_pretrained_model_name_or_path: str | None = None,
*model_args,
**kwargs,
) -> TFPreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An
example is `google/vit-base-patch16-224-in21k`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
`encoder_from_pt` should be set to `True`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to *None*):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
`decoder_from_pt` should be set to `True`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import TFVisionEncoderDecoderModel
>>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
>>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
... "google/vit-base-patch16-224-in21k", "google-bert/bert-base-uncased"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./vit-bert")
>>> # load fine-tuned model
>>> model = TFVisionEncoderDecoderModel.from_pretrained("./vit-bert")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder:
del kwargs["encoder_" + key]
for key in kwargs_decoder:
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
kwargs_encoder["name"] = "encoder"
kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
kwargs_decoder["name"] = "decoder"
kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# Make sure these 2 `keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
if encoder.name != "encoder":
raise ValueError("encoder model must be created with the name `encoder`.")
if decoder.name != "decoder":
raise ValueError("decoder model must be created with the name `decoder`.")
# instantiate config with corresponding kwargs
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@unpack_inputs
@add_start_docstrings_to_model_forward(
VISION_ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length")
)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: np.ndarray | tf.Tensor | None = None,
decoder_input_ids: np.ndarray | tf.Tensor | None = None,
decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
encoder_outputs: tuple | TFBaseModelOutput | None = None,
past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None,
decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
labels: np.ndarray | tf.Tensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
training: bool = False,
**kwargs,
) -> TFSeq2SeqLMOutput | tuple[tf.Tensor]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoTokenizer, TFVisionEncoderDecoderModel
>>> from PIL import Image
>>> import requests
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
>>> decoder_tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
>>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
>>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
... "google/vit-base-patch16-224-in21k", "openai-community/gpt2"
... )
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> img = Image.open(requests.get(url, stream=True).raw)
>>> # forward
>>> pixel_values = image_processor(images=img, return_tensors="tf").pixel_values # Batch size 1
>>> decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids # Batch size 1
>>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)
>>> # training
>>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("vit-gpt2")
>>> model = TFVisionEncoderDecoderModel.from_pretrained("vit-gpt2")
>>> # generation
>>> generated = model.generate(pixel_values, decoder_start_token_id=model.config.decoder.bos_token_id)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# Let the user be responsible for the expected format.
if encoder_outputs is not None:
if return_dict and not isinstance(encoder_outputs, ModelOutput):
raise ValueError(
"If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
)
if encoder_outputs is None:
encoder_inputs = {
"input_ids": pixel_values,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"training": training,
}
# Add arguments to encoder from `kwargs_encoder`
encoder_inputs.update(kwargs_encoder)
if "input_ids" in encoder_inputs:
encoder_inputs["pixel_values"] = encoder_inputs.pop("input_ids")
if encoder_inputs["pixel_values"] is None:
raise ValueError("You have to specify pixel_values")
# Handle the case where the inputs are passed as a single dict which contains `labels`.
# The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
# parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
if "labels" in encoder_inputs:
labels = encoder_inputs.pop("labels")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_input_ids" in encoder_inputs:
decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_attention_mask" in encoder_inputs:
decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
encoder_outputs = self.encoder(**encoder_inputs)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
batch_size, sequence_length = shape_list(encoder_hidden_states)[:2]
encoder_attention_mask = tf.ones(shape=(batch_size, sequence_length), dtype=tf.int32)
decoder_inputs = {
"input_ids": decoder_input_ids,
"attention_mask": decoder_attention_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
"inputs_embeds": decoder_inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"use_cache": use_cache,
"past_key_values": past_key_values,
"return_dict": return_dict,
"training": training,
}
# Add arguments to decoder from `kwargs_decoder`
decoder_inputs.update(kwargs_decoder)
decoder_outputs = self.decoder(**decoder_inputs)
logits = decoder_outputs[0]
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
warnings.warn(DEPRECATION_WARNING, FutureWarning)
loss = self.hf_compute_loss(labels, logits)
if not return_dict:
past_key_values = None
if use_cache:
past_key_values = decoder_outputs[1]
# The starting index of the remaining elements in `decoder_outputs`
start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
if not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
output = tuple(x for x in output if x is not None)
return output
return TFSeq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.decoder.use_cache else None
dec_hs = (
tf.convert_to_tensor(output.decoder_hidden_states) if self.config.decoder.output_hidden_states else None
)
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.decoder.output_attentions else None
enc_hs = (
tf.convert_to_tensor(output.encoder_hidden_states) if self.config.encoder.output_hidden_states else None
)
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.encoder.output_attentions else None
cross_attns = (
tf.convert_to_tensor(output.cross_attentions)
if self.config.decoder.output_attentions and output.cross_attentions is not None
else None
)
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
cross_attentions=cross_attns,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
decoder_attention_mask = decoder_inputs.get("attention_mask", None)
past_key_values = decoder_inputs.get("past_key_values")
input_dict = {
"pixel_values": None, # needs to be passed to make Keras.layer.__call__ happy
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
# TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
"encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
"past_key_values": past_key_values,
"use_cache": use_cache,
}
return input_dict
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the TFVisionEncoderDecoderModel directly is not supported. "
"Please use the respective methods of the wrapped objects (model.decoder.resize_token_embeddings(...))"
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "enc_to_dec_proj", None) is not None:
with tf.name_scope(self.enc_to_dec_proj.name):
self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size])
if getattr(self, "encoder", None) is not None:
with tf.name_scope(self.encoder.name):
self.encoder.build(None)
if getattr(self, "decoder", None) is not None:
with tf.name_scope(self.decoder.name):
self.decoder.build(None)
__all__ = ["TFVisionEncoderDecoderModel"]
| transformers/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py/0 | {
"file_path": "transformers/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py",
"repo_id": "transformers",
"token_count": 14853
} | 488 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VitDet model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
class VitDetConfig(BackboneConfigMixin, PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VitDetModel`]. It is used to instantiate an
VitDet model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VitDet
[google/vitdet-base-patch16-224](https://huggingface.co/google/vitdet-base-patch16-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of mlp hidden dim to embedding dim.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
pretrain_image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image during pretraining.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate.
window_block_indices (`list[int]`, *optional*, defaults to `[]`):
List of indices of blocks that should have window attention instead of regular global self-attention.
residual_block_indices (`list[int]`, *optional*, defaults to `[]`):
List of indices of blocks that should have an extra residual block after the MLP.
use_absolute_position_embeddings (`bool`, *optional*, defaults to `True`):
Whether to add absolute position embeddings to the patch embeddings.
use_relative_position_embeddings (`bool`, *optional*, defaults to `False`):
Whether to add relative position embeddings to the attention maps.
window_size (`int`, *optional*, defaults to 0):
The size of the attention window.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import VitDetConfig, VitDetModel
>>> # Initializing a VitDet configuration
>>> configuration = VitDetConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = VitDetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vitdet"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
mlp_ratio=4,
hidden_act="gelu",
dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=224,
pretrain_image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
drop_path_rate=0.0,
window_block_indices=[],
residual_block_indices=[],
use_absolute_position_embeddings=True,
use_relative_position_embeddings=False,
window_size=0,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.dropout_prob = dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.pretrain_image_size = pretrain_image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.drop_path_rate = drop_path_rate
self.window_block_indices = window_block_indices
self.residual_block_indices = residual_block_indices
self.use_absolute_position_embeddings = use_absolute_position_embeddings
self.use_relative_position_embeddings = use_relative_position_embeddings
self.window_size = window_size
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["VitDetConfig"]
| transformers/src/transformers/models/vitdet/configuration_vitdet.py/0 | {
"file_path": "transformers/src/transformers/models/vitdet/configuration_vitdet.py",
"repo_id": "transformers",
"token_count": 2839
} | 489 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Whisper."""
import json
import os
import re
import warnings
from functools import lru_cache
from typing import Optional
import numpy as np
from tokenizers import AddedToken, processors
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from .tokenization_whisper import LANGUAGES, TASK_IDS, TO_LANGUAGE_CODE, WhisperTokenizer, _decode_asr
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"tokenizer_file": "tokenizer.json",
"merges_file": "merges.txt",
"normalizer_file": "normalizer.json",
}
class WhisperTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Whisper tokenizer (backed by HuggingFace's *tokenizers* library).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
normalizer_file (`str`, *optional*):
Path to the normalizer_file file.
tokenizer_file (`str`, *optional*):
Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
contains everything needed to load the tokenizer.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token. The `decoder_start_token_id` is used to set the first token as
`"<|startoftranscript|>"` when generating.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Whisper tokenizer detect beginning of words by the preceding space).
language (`str`, *optional*):
The language of the transcription text. The corresponding language id token is appended to the start of the
sequence for multilingual speech recognition and speech translation tasks, e.g. for Spanish the token
`"<|es|>"` is appended to the start of sequence. This should be used for multilingual fine-tuning only.
task (`str`, *optional*):
Task identifier to append at the start of sequence (if any). This should be used for mulitlingual
fine-tuning, with `"transcribe"` for speech recognition and `"translate"` for speech translation.
predict_timestamps (`bool`, *optional*, defaults to `False`):
Whether to omit the `<|notimestamps|>` token at the start of the sequence.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = WhisperTokenizer
def __init__(
self,
vocab_file=None,
merges_file=None,
normalizer_file=None,
tokenizer_file=None,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
language=None,
task=None,
predict_timestamps=False,
**kwargs,
):
bos_token = (
AddedToken(bos_token, lstrip=False, rstrip=False, normalized=False, special=True)
if isinstance(bos_token, str)
else bos_token
)
eos_token = (
AddedToken(eos_token, lstrip=False, rstrip=False, normalized=False, special=True)
if isinstance(eos_token, str)
else eos_token
)
unk_token = (
AddedToken(unk_token, lstrip=False, rstrip=False, normalized=False, special=True)
if isinstance(unk_token, str)
else unk_token
)
super().__init__(
vocab_file,
merges_file,
tokenizer_file=tokenizer_file,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
self.add_bos_token = kwargs.pop("add_bos_token", False)
if normalizer_file is not None:
with open(normalizer_file, encoding="utf-8") as vocab_handle:
self.english_spelling_normalizer = json.load(vocab_handle)
else:
self.english_spelling_normalizer = None
self.timestamp_pat = re.compile(r"<\|(\d+\.\d+)\|>")
self.language = language
self.task = task
self.predict_timestamps = predict_timestamps
# Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._batch_encode_plus
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get("is_split_into_words", False)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*args, **kwargs)
# Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._encode_plus
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get("is_split_into_words", False)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*args, **kwargs)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._decode_with_timestamps
def _decode_with_timestamps(
self, token_ids, skip_special_tokens=False, time_precision=0.02, segment_size=1500
) -> str:
"""
Timestamp tokens are above the special tokens' id range and are ignored by `decode()`. This method decodes
given tokens with timestamps tokens annotated, e.g. "<|1.08|>".
"""
timestamp_begin = self.all_special_ids[-1] + 1
outputs = [[]]
cur_max_timestamp = 0.0
prev_segments_len = 0.0
penultimate_timestamp = 0.0
for i, token in enumerate(token_ids):
if token >= timestamp_begin:
timestamp = float((token - timestamp_begin) * time_precision)
if timestamp < cur_max_timestamp:
# next segment has started
last_was_single_ending = i >= 2 and not (
token_ids[i - 1] >= timestamp_begin and token_ids[i - 2] >= timestamp_begin
)
if last_was_single_ending:
prev_segments_len += time_precision * segment_size
else:
cur_max_timestamp = penultimate_timestamp
prev_segments_len += penultimate_timestamp
outputs = outputs[:-2]
penultimate_timestamp = cur_max_timestamp
cur_max_timestamp = timestamp
outputs.append(f"<|{(timestamp + prev_segments_len):.2f}|>")
outputs.append([])
else:
outputs[-1].append(token)
outputs = [
s if isinstance(s, str) else self.decode(s, skip_special_tokens=skip_special_tokens) for s in outputs
]
return "".join(outputs)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._compute_offsets
def _compute_offsets(self, token_ids, time_precision=0.02, segment_size=1500):
"""
Compute offsets for a given tokenized input
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
time_precision (`float`, *optional*, defaults to 0.02):
The time ratio to convert from token to time.
segment_size (`int`, *optional*, defaults to 1500):
The number of features in the input mel spectrogram.
"""
offsets = []
# ensure torch tensor of token ids is placed on cpu
if "torch" in str(type(token_ids)) and (hasattr(token_ids, "cpu") and callable(token_ids.cpu)):
token_ids = token_ids.cpu()
token_ids = np.array(token_ids)
if token_ids.shape[0] > 1 and len(token_ids.shape) > 1:
raise ValueError("Can only process a single input at a time")
timestamp_begin = self.all_special_ids[-1] + 1
timestamp_tokens = token_ids >= timestamp_begin
consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
if consecutive.shape[0] == 0 and timestamp_tokens.sum() <= 1:
# either there are no timestamps or there are no consecutive ones
return []
elif np.where(timestamp_tokens)[0][-1] + 1 not in consecutive:
# we add the final timestamp if it is not already in the list
consecutive = np.append(consecutive, np.where(timestamp_tokens)[0][-1] + 1)
last_slice = np.where(timestamp_tokens)[0][0]
cur_max_timestamp = 0
prev_segments_len = 0
for current_slice in consecutive:
sliced_tokens = token_ids[last_slice:current_slice]
if len(sliced_tokens) > 1:
start_timestamp_position = sliced_tokens[0].item() - timestamp_begin
end_timestamp_position = sliced_tokens[-1].item() - timestamp_begin
if start_timestamp_position < cur_max_timestamp:
# next segment has started
is_single_ending = last_slice >= 2 and not (
token_ids[last_slice - 2] >= timestamp_begin and token_ids[last_slice - 1] >= timestamp_begin
)
if is_single_ending:
prev_segments_len += segment_size
else:
prev_segments_len += cur_max_timestamp
cur_max_timestamp = end_timestamp_position
# strip timestamp tokens from the text output
sliced_tokens = self._preprocess_token_ids(sliced_tokens)
text = self._decode(sliced_tokens)
text = self._filter_timestamp_ids(text)
offsets.append(
{
"text": text,
"timestamp": (
start_timestamp_position * time_precision + prev_segments_len * time_precision,
end_timestamp_position * time_precision + prev_segments_len * time_precision,
),
}
)
last_slice = current_slice
return offsets
@lru_cache
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.timestamp_ids
def timestamp_ids(self, time_precision=0.02):
"""
Compute the timestamp token ids for a given precision and save to least-recently used (LRU) cache.
Args:
time_precision (`float`, *optional*, defaults to 0.02):
The time ratio to convert from token to time.
"""
return self.convert_tokens_to_ids([("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)])
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._preprocess_token_ids
def _preprocess_token_ids(self, token_ids, skip_special_tokens: bool = False):
"""
Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids.
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be
removed.
"""
if skip_special_tokens:
prompt_token_id = self.convert_tokens_to_ids("<|startofprev|>")
decoder_start_token_id = self.convert_tokens_to_ids("<|startoftranscript|>")
token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id)
return token_ids
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._filter_timestamp_ids
def _filter_timestamp_ids(self, token_ids):
return re.sub(self.timestamp_pat, "", token_ids)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.decode
def decode(
self,
token_ids,
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
output_offsets: bool = False,
time_precision: float = 0.02,
decode_with_timestamps: bool = False,
normalize: bool = False,
basic_normalize: bool = False,
remove_diacritics: bool = False,
**kwargs,
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding. Will remove the previous tokens (pre-prompt)
if present.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
output_offsets (`bool`, *optional*, defaults to `False`):
Whether or not to output the offsets of the tokens. This should only be set if the model predicted
timestamps. If there are previous tokens (pre-prompt) to decode, they will only appear in the decoded
text if they contain timestamp tokens.
time_precision (`float`, *optional*, defaults to 0.02):
The time ratio to convert from token to time.
decode_with_timestamps (`bool`, *optional*, defaults to `False`):
Whether or not to decode with timestamps included in the raw text.
normalize (`bool`, *optional*, defaults to `False`):
Whether or not to apply the English text normalizer to the decoded text. Only applicable when the
target text is in English. Otherwise, the basic text normalizer should be applied.
basic_normalize (`bool`, *optional*, defaults to `False`):
Whether or not to apply the Basic text normalizer to the decoded text. Applicable to multilingual
target text.
remove_diacritics (`bool`, *optional*, defaults to `False`):
Whether or not to remove diacritics when applying the Basic text normalizer. Removing diacritics may
destroy information in the decoded text, hence it should be used with caution.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
filtered_ids = self._preprocess_token_ids(
token_ids,
skip_special_tokens=skip_special_tokens,
)
text = super().decode(
filtered_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
normalize=normalize,
basic_normalize=basic_normalize,
remove_diacritics=remove_diacritics,
**kwargs,
)
if decode_with_timestamps:
# legacy method to decode timestamps when not included in the tokenizer vocabulary
text = self._decode_with_timestamps(
filtered_ids, time_precision=time_precision, skip_special_tokens=skip_special_tokens
)
else:
text = self._filter_timestamp_ids(text)
# retrieve offsets
if output_offsets:
offsets = self._compute_offsets(token_ids, time_precision=time_precision)
return {"text": text, "offsets": offsets}
return text
def _decode(
self, *args, normalize: bool = False, basic_normalize: bool = False, remove_diacritics: bool = False, **kwargs
) -> str:
text = super()._decode(*args, **kwargs)
if normalize:
clean_text = self._normalize(text)
return clean_text
elif basic_normalize:
clean_text = self._basic_normalize(text, remove_diacritics=remove_diacritics)
return clean_text
else:
return text
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._normalize
def _normalize(self, text):
warnings.warn(
"The private method `_normalize` is deprecated and will be removed in v5 of Transformers."
"You can normalize an input string using the Whisper English normalizer using the `normalize` method."
)
return self.normalize(text)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._basic_normalize
def _basic_normalize(self, text, remove_diacritics=False):
warnings.warn(
"The private method `_basic_normalize` is deprecated and will be removed in v5 of Transformers."
"You can normalize an input string using the Whisper basic normalizer using the `basic_normalize` method."
)
return self.basic_normalize(text, remove_diacritics=remove_diacritics)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.normalize
def normalize(self, text):
"""
Normalize a given string using the `EnglishTextNormalizer` class, which performs commons transformation on
english text.
"""
normalizer = EnglishTextNormalizer(self.english_spelling_normalizer)
return normalizer(text)
@staticmethod
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.basic_normalize
def basic_normalize(text, remove_diacritics=False):
"""
Normalize a given string using the `BasicTextNormalizer` class, which performs commons transformation on
multilingual text.
"""
normalizer = BasicTextNormalizer(remove_diacritics=remove_diacritics)
return normalizer(text)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
normalizer_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["normalizer_file"]
)
if self.english_spelling_normalizer is not None:
with open(normalizer_file, "w", encoding="utf-8") as f:
f.write(
json.dumps(self.english_spelling_normalizer, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
)
return tuple(files) + (normalizer_file,)
def set_prefix_tokens(
self, language: Optional[str] = None, task: Optional[str] = None, predict_timestamps: Optional[bool] = None
):
"""
Override the prefix tokens appended to the start of the label sequence. This method can be used standalone to
update the prefix tokens as required when fine-tuning. Example:
```python
>>> # instantiate the tokenizer and set the prefix token to Spanish
>>> tokenizer = WhisperTokenizerFast.from_pretrained("openai/whisper-tiny", language="spanish")
>>> # now switch the prefix token from Spanish to French
>>> tokenizer.set_prefix_tokens(language="french")
```
Args:
language (`str`, *optional*, defaults to `None`):
The language of the transcription text.
task (`str`, *optional*, defaults to `None`):
Task identifier to append at the start of sequence (if any).
predict_timestamps (`bool`, *optional*, defaults to `None`):
Whether to omit the `<|notimestamps|>` token at the start of the sequence.
"""
self.language = language if language is not None else self.language
self.task = task if task is not None else self.task
self.predict_timestamps = predict_timestamps if predict_timestamps is not None else self.predict_timestamps
prefix_token_ids = self.prefix_tokens
prefixes = self.convert_ids_to_tokens(prefix_token_ids)
eos = self.eos_token
eos_token_id = self.eos_token_id
prefix_template = " ".join([f"{token}:0" for token in prefixes])
self.backend_tokenizer.post_processor = processors.TemplateProcessing(
single=f"{prefix_template} $A:0 {eos}:0",
pair=f"{prefix_template} $A:0 $B:1 {eos}:1",
special_tokens=[
(eos, eos_token_id),
*zip(prefixes, prefix_token_ids),
],
)
@property
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.prefix_tokens
def prefix_tokens(self) -> list[int]:
bos_token_id = self.convert_tokens_to_ids("<|startoftranscript|>")
translate_token_id = self.convert_tokens_to_ids("<|translate|>")
transcribe_token_id = self.convert_tokens_to_ids("<|transcribe|>")
notimestamps_token_id = self.convert_tokens_to_ids("<|notimestamps|>")
langs = tuple(LANGUAGES.keys())
if self.language is not None:
self.language = self.language.lower()
if self.language in TO_LANGUAGE_CODE:
language_id = TO_LANGUAGE_CODE[self.language]
elif self.language in TO_LANGUAGE_CODE.values():
language_id = self.language
else:
is_language_code = len(self.language) == 2
raise ValueError(
f"Unsupported language: {self.language}. Language should be one of:"
f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}."
)
if self.task is not None:
if self.task not in TASK_IDS:
raise ValueError(f"Unsupported task: {self.task}. Task should be in: {TASK_IDS}")
bos_sequence = [bos_token_id]
if self.language is not None:
bos_sequence.append(bos_token_id + 1 + langs.index(language_id))
if self.task is not None:
bos_sequence.append(transcribe_token_id if self.task == "transcribe" else translate_token_id)
if not self.predict_timestamps:
bos_sequence.append(notimestamps_token_id)
return bos_sequence
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
"""Build model inputs from a sequence by appending eos_token_id."""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id]
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1]
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_decoder_prompt_ids
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
self.set_prefix_tokens(task=task, language=language, predict_timestamps=not no_timestamps)
# prefix tokens are of the form: <|startoftranscript|> <|lang_id|> <|task|> <|notimestamps|>
# we don't want to force the bos token at position 1, as this is the starting token
# when we generate, so we slice the prefix tokens to: <|lang_id|> <|task|> <|notimestamps|>
# to get the forced tokens
forced_tokens = self.prefix_tokens[1:]
forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_tokens)]
return forced_decoder_ids
def _decode_asr(self, model_outputs, *, return_timestamps, return_language, time_precision):
return _decode_asr(
self,
model_outputs,
return_timestamps=return_timestamps,
return_language=return_language,
time_precision=time_precision,
)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_prompt_ids
def get_prompt_ids(self, text: str, return_tensors="np"):
"""Converts prompt text to IDs that can be passed to [`~WhisperForConditionalGeneration.generate`]."""
batch_encoding = self("<|startofprev|>", " " + text.strip(), add_special_tokens=False)
# Check for special tokens
prompt_text_ids = batch_encoding["input_ids"][1:]
special_token_id = next((x for x in prompt_text_ids if x >= self.all_special_ids[0]), None)
if special_token_id is not None:
token = self.convert_ids_to_tokens(special_token_id)
raise ValueError(f"Encountered text in the prompt corresponding to disallowed special token: {token}.")
batch_encoding.convert_to_tensors(tensor_type=return_tensors)
return batch_encoding["input_ids"]
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._strip_prompt
def _strip_prompt(self, token_ids: list[int], prompt_token_id: int, decoder_start_token_id: int):
if not isinstance(token_ids, list):
token_ids = self._convert_to_list(token_ids)
# handle case of empty token_ids for decoding with timestamps.
# at this point token_ids is a list, so it is safe to use if not check.
if not token_ids:
return token_ids
has_prompt = token_ids[0] == prompt_token_id
if has_prompt:
if decoder_start_token_id in token_ids:
return token_ids[token_ids.index(decoder_start_token_id) :]
else:
return []
return token_ids
@staticmethod
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._convert_to_list
def _convert_to_list(token_ids):
# convert type to ndarray if necessary
if hasattr(token_ids, "numpy"):
if "torch" in str(type(token_ids)):
token_ids = token_ids.cpu().numpy()
elif "tensorflow" in str(type(token_ids)):
token_ids = token_ids.numpy()
elif "jaxlib" in str(type(token_ids)):
token_ids = token_ids.tolist()
# now the token ids are either a numpy array, or a list of lists
if isinstance(token_ids, np.ndarray):
token_ids = token_ids.tolist()
return token_ids
__all__ = ["WhisperTokenizerFast"]
| transformers/src/transformers/models/whisper/tokenization_whisper_fast.py/0 | {
"file_path": "transformers/src/transformers/models/whisper/tokenization_whisper_fast.py",
"repo_id": "transformers",
"token_count": 13026
} | 490 |
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import warnings
from functools import partial
from typing import Optional, Union
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau
from .trainer_pt_utils import LayerWiseDummyOptimizer, LayerWiseDummyScheduler
from .trainer_utils import SchedulerType
from .utils import logging
logger = logging.get_logger(__name__)
def _get_constant_lambda(_=None):
return 1
def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate, using the learning rate set in optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
return LambdaLR(optimizer, _get_constant_lambda, last_epoch=last_epoch)
def get_reduce_on_plateau_schedule(optimizer: Optimizer, **kwargs):
"""
Create a schedule with a constant learning rate that decreases when a metric has stopped improving.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
kwargs (`dict`, *optional*):
Extra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau`
for possible parameters.
Return:
`torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule.
"""
return ReduceLROnPlateau(optimizer, **kwargs)
def _get_constant_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps)
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def _get_linear_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_lambda = partial(
_get_linear_schedule_with_warmup_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def _get_cosine_schedule_with_warmup_lr_lambda(
current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float
):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
def get_cosine_schedule_with_warmup(
optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
num_cycles (`float`, *optional*, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_lambda = partial(
_get_cosine_schedule_with_warmup_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_cycles=num_cycles,
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda(
current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: int
):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
num_cycles (`int`, *optional*, defaults to 1):
The number of hard restarts to use.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_lambda = partial(
_get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_cycles=num_cycles,
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def _get_polynomial_decay_schedule_with_warmup_lr_lambda(
current_step: int,
*,
num_warmup_steps: int,
num_training_steps: int,
lr_end: float,
power: float,
lr_init: int,
):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lr_range = lr_init - lr_end
decay_steps = num_training_steps - num_warmup_steps
pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps
decay = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
def get_polynomial_decay_schedule_with_warmup(
optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1
):
"""
Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
lr_end (`float`, *optional*, defaults to 1e-7):
The end LR.
power (`float`, *optional*, defaults to 1.0):
Power factor.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
implementation at
https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_init = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be smaller than initial lr ({lr_init})")
lr_lambda = partial(
_get_polynomial_decay_schedule_with_warmup_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
lr_end=lr_end,
power=power,
lr_init=lr_init,
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def _get_inverse_sqrt_schedule_lr_lambda(current_step: int, *, num_warmup_steps: int, timescale: Optional[int] = None):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
shift = timescale - num_warmup_steps
decay = 1.0 / math.sqrt((current_step + shift) / timescale)
return decay
def get_inverse_sqrt_schedule(
optimizer: Optimizer, num_warmup_steps: int, timescale: Optional[int] = None, last_epoch: int = -1
):
"""
Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a
warmup period which increases lr linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
timescale (`int`, *optional*, defaults to `num_warmup_steps`):
Time scale.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
# Note: this implementation is adapted from
# https://github.com/google-research/big_vision/blob/f071ce68852d56099437004fd70057597a95f6ef/big_vision/utils.py#L930
if timescale is None:
timescale = num_warmup_steps or 10_000
lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale)
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def _get_cosine_schedule_with_warmup_lr_lambda(
current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float, min_lr_rate: float = 0.0
):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
factor = 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
factor = factor * (1 - min_lr_rate) + min_lr_rate
return max(0, factor)
def get_cosine_with_min_lr_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
num_cycles: float = 0.5,
last_epoch: int = -1,
min_lr: Optional[float] = None,
min_lr_rate: Optional[float] = None,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to min_lr, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
num_cycles (`float`, *optional*, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
min_lr (`float`, *optional*):
The minimum learning rate to reach after the cosine schedule.
min_lr_rate (`float`, *optional*):
The minimum learning rate as a ratio of the initial learning rate. If set, `min_lr` should not be set.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
if min_lr is not None and min_lr_rate is not None:
raise ValueError("Only one of min_lr or min_lr_rate should be set")
elif min_lr is not None:
min_lr_rate = min_lr / optimizer.defaults["lr"]
elif min_lr_rate is None:
raise ValueError("One of min_lr or min_lr_rate should be set through the `lr_scheduler_kwargs`")
lr_lambda = partial(
_get_cosine_schedule_with_warmup_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_cycles=num_cycles,
min_lr_rate=min_lr_rate,
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def _get_cosine_with_min_lr_schedule_with_warmup_lr_rate_lambda(
current_step: int,
*,
num_warmup_steps: int,
num_training_steps: int,
num_cycles: float,
min_lr_rate: float = 0.0,
warmup_lr_rate: Optional[float] = None,
):
current_step = float(current_step)
num_warmup_steps = float(num_warmup_steps)
num_training_steps = float(num_training_steps)
if current_step < num_warmup_steps:
if warmup_lr_rate is None:
return (current_step + 1.0) / max(1.0, num_warmup_steps)
else:
warmup_lr_rate = float(warmup_lr_rate)
return warmup_lr_rate + (1.0 - warmup_lr_rate) * (current_step) / (max(1, num_warmup_steps - 1))
progress = (current_step - num_warmup_steps + 1.0) / (max(1.0, num_training_steps - num_warmup_steps))
factor = 0.5 * (1.0 + math.cos(math.pi * num_cycles * 2.0 * progress))
factor = factor * (1 - min_lr_rate) + min_lr_rate
return max(0, factor)
def get_cosine_with_min_lr_schedule_with_warmup_lr_rate(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
num_cycles: float = 0.5,
last_epoch: int = -1,
min_lr: Optional[float] = None,
min_lr_rate: Optional[float] = None,
warmup_lr_rate: Optional[float] = None,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to min_lr, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
num_cycles (`float`, *optional*, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
min_lr (`float`, *optional*):
The minimum learning rate to reach after the cosine schedule.
min_lr_rate (`float`, *optional*):
The minimum learning rate as a ratio of the initial learning rate. If set, `min_lr` should not be set.
warmup_lr_rate (`float`, *optional*):
The minimum learning rate as a ratio of the start learning rate. If not set, `warmup_lr_rate` will be treated as float(1/num_warmup_steps).
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
if min_lr is not None and min_lr_rate is not None:
raise ValueError("Only one of min_lr or min_lr_rate should be set")
elif min_lr is not None:
min_lr_rate = min_lr / optimizer.defaults["lr"]
elif min_lr_rate is None:
raise ValueError("One of min_lr or min_lr_rate should be set through the `lr_scheduler_kwargs`")
lr_lambda = partial(
_get_cosine_with_min_lr_schedule_with_warmup_lr_rate_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_cycles=num_cycles,
min_lr_rate=min_lr_rate,
warmup_lr_rate=warmup_lr_rate,
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def _get_wsd_scheduler_lambda(
current_step: int,
*,
num_warmup_steps: int,
num_stable_steps: int,
num_decay_steps: int,
warmup_type: str,
decay_type: str,
min_lr_ratio: float,
num_cycles: float,
):
if current_step < num_warmup_steps:
progress = float(current_step) / float(max(1, num_warmup_steps))
if warmup_type == "linear":
factor = progress
elif warmup_type == "cosine":
factor = 0.5 * (1.0 - math.cos(math.pi * progress))
elif warmup_type == "1-sqrt":
factor = 1.0 - math.sqrt(1.0 - progress)
factor = factor * (1.0 - min_lr_ratio) + min_lr_ratio
return max(0.0, factor)
if current_step < num_warmup_steps + num_stable_steps:
return 1.0
if current_step < num_warmup_steps + num_stable_steps + num_decay_steps:
progress = float(current_step - num_warmup_steps - num_stable_steps) / float(max(1, num_decay_steps))
if decay_type == "linear":
factor = 1.0 - progress
elif decay_type == "cosine":
factor = 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
elif decay_type == "1-sqrt":
factor = 1.0 - math.sqrt(progress)
factor = factor * (1.0 - min_lr_ratio) + min_lr_ratio
return max(0.0, factor)
return min_lr_ratio
def get_wsd_schedule(
optimizer: Optimizer,
num_warmup_steps: int,
num_decay_steps: int,
num_training_steps: Optional[int] = None,
num_stable_steps: Optional[int] = None,
warmup_type: str = "linear",
decay_type: str = "cosine",
min_lr_ratio: float = 0,
num_cycles: float = 0.5,
last_epoch: int = -1,
):
"""
Create a schedule with a learning rate that has three stages:
1. warmup: increase from min_lr_ratio times the initial learning rate to the initial learning rate following a warmup_type.
2. stable: constant learning rate.
3. decay: decrease from the initial learning rate to min_lr_ratio times the initial learning rate following a decay_type.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_decay_steps (`int`):
The number of steps for the decay phase.
num_training_steps (`int`, *optional*):
The total number of training steps. This is the sum of the warmup, stable and decay steps. If `num_stable_steps` is not provided, the stable phase will be `num_training_steps - num_warmup_steps - num_decay_steps`.
num_stable_steps (`int`, *optional*):
The number of steps for the stable phase. Please ensure that `num_warmup_steps + num_stable_steps + num_decay_steps` equals `num_training_steps`, otherwise the other steps will default to the minimum learning rate.
warmup_type (`str`, *optional*, defaults to "linear"):
The type of warmup to use. Can be 'linear', 'cosine' or '1-sqrt'.
decay_type (`str`, *optional*, defaults to "cosine"):
The type of decay to use. Can be 'linear', 'cosine' or '1-sqrt'.
min_lr_ratio (`float`, *optional*, defaults to 0):
The minimum learning rate as a ratio of the initial learning rate.
num_cycles (`float`, *optional*, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
if num_training_steps is None and num_stable_steps is None:
raise ValueError("Either num_training_steps or num_stable_steps must be specified.")
if num_training_steps is not None and num_stable_steps is not None:
warnings.warn("Both num_training_steps and num_stable_steps are specified. num_stable_steps will be used.")
if warmup_type not in ["linear", "cosine", "1-sqrt"]:
raise ValueError(f"Unknown warmup type: {warmup_type}, expected 'linear', 'cosine' or '1-sqrt'")
if decay_type not in ["linear", "cosine", "1-sqrt"]:
raise ValueError(f"Unknown decay type: {decay_type}, expected 'linear', 'cosine' or '1-sqrt'")
if num_stable_steps is None:
num_stable_steps = num_training_steps - num_warmup_steps - num_decay_steps
lr_lambda = partial(
_get_wsd_scheduler_lambda,
num_warmup_steps=num_warmup_steps,
num_stable_steps=num_stable_steps,
num_decay_steps=num_decay_steps,
warmup_type=warmup_type,
decay_type=decay_type,
min_lr_ratio=min_lr_ratio,
num_cycles=num_cycles,
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
TYPE_TO_SCHEDULER_FUNCTION = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.INVERSE_SQRT: get_inverse_sqrt_schedule,
SchedulerType.REDUCE_ON_PLATEAU: get_reduce_on_plateau_schedule,
SchedulerType.COSINE_WITH_MIN_LR: get_cosine_with_min_lr_schedule_with_warmup,
SchedulerType.COSINE_WARMUP_WITH_MIN_LR: get_cosine_with_min_lr_schedule_with_warmup_lr_rate,
SchedulerType.WARMUP_STABLE_DECAY: get_wsd_schedule,
}
def get_scheduler(
name: Union[str, SchedulerType],
optimizer: Optimizer,
num_warmup_steps: Optional[int] = None,
num_training_steps: Optional[int] = None,
scheduler_specific_kwargs: Optional[dict] = None,
):
"""
Unified API to get any scheduler from its name.
Args:
name (`str` or `SchedulerType`):
The name of the scheduler to use.
optimizer (`torch.optim.Optimizer`):
The optimizer that will be used during training.
num_warmup_steps (`int`, *optional*):
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
num_training_steps (`int``, *optional*):
The number of training steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
scheduler_specific_kwargs (`dict`, *optional*):
Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler
parameters will cause the scheduler function to raise a TypeError.
"""
name = SchedulerType(name)
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
# If a `LayerWiseDummyOptimizer` is passed we extract the optimizer dict and
# recursively call `get_scheduler` to get the proper schedulers on each parameter
if optimizer is not None and isinstance(optimizer, LayerWiseDummyOptimizer):
optimizer_dict = optimizer.optimizer_dict
scheduler_dict = {}
for param in optimizer_dict:
scheduler_dict[param] = get_scheduler(
name,
optimizer=optimizer_dict[param],
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
scheduler_specific_kwargs=scheduler_specific_kwargs,
)
def scheduler_hook(param):
# Since the optimizer hook has been already attached we only need to
# attach the scheduler hook, the gradients have been zeroed here
scheduler_dict[param].step()
for param in optimizer_dict:
if param.requires_grad:
param.register_post_accumulate_grad_hook(scheduler_hook)
return LayerWiseDummyScheduler(optimizer_dict=optimizer_dict, lr=optimizer.defaults["lr"])
if name == SchedulerType.CONSTANT:
return schedule_func(optimizer)
if scheduler_specific_kwargs is None:
scheduler_specific_kwargs = {}
if name == SchedulerType.REDUCE_ON_PLATEAU:
return schedule_func(optimizer, **scheduler_specific_kwargs)
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
if name == SchedulerType.INVERSE_SQRT:
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
# wsd scheduler requires either num_training_steps or num_stable_steps
if name == SchedulerType.WARMUP_STABLE_DECAY:
return schedule_func(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
**scheduler_specific_kwargs,
)
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
return schedule_func(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
**scheduler_specific_kwargs,
)
class Adafactor(Optimizer):
"""
AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code:
https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://huggingface.co/papers/1804.04235 Note that
this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and
`warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
`relative_step=False`.
Arguments:
params (`Iterable[nn.parameter.Parameter]`):
Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (`float`, *optional*):
The external learning rate.
eps (`tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`):
Regularization constants for square gradient and parameter scale respectively
clip_threshold (`float`, *optional*, defaults to 1.0):
Threshold of root mean square of final gradient update
decay_rate (`float`, *optional*, defaults to -0.8):
Coefficient used to compute running averages of square
beta1 (`float`, *optional*):
Coefficient used for computing running averages of gradient
weight_decay (`float`, *optional*, defaults to 0.0):
Weight decay (L2 penalty)
scale_parameter (`bool`, *optional*, defaults to `True`):
If True, learning rate is scaled by root mean square
relative_step (`bool`, *optional*, defaults to `True`):
If True, time-dependent learning rate is computed instead of external learning rate
warmup_init (`bool`, *optional*, defaults to `False`):
Time-dependent learning rate computation depends on whether warm-up initialization is being used
This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.
Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3):
- Training without LR warmup or clip_threshold is not recommended.
- use scheduled LR warm-up to fixed LR
- use clip_threshold=1.0 (https://huggingface.co/papers/1804.04235)
- Disable relative updates
- Use scale_parameter=False
- Additional optimizer operations like gradient clipping should not be used alongside Adafactor
Example:
```python
Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)
```
Others reported the following combination to work well:
```python
Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
```
When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`]
scheduler as following:
```python
from transformers.optimization import Adafactor, AdafactorSchedule
optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
lr_scheduler = AdafactorSchedule(optimizer)
trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))
```
Usage:
```python
# replace AdamW with Adafactor
optimizer = Adafactor(
model.parameters(),
lr=1e-3,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
relative_step=False,
scale_parameter=False,
warmup_init=False,
)
```"""
def __init__(
self,
params,
lr=None,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
scale_parameter=True,
relative_step=True,
warmup_init=False,
):
if lr is not None and relative_step:
raise ValueError("Cannot combine manual `lr` and `relative_step=True` options")
if warmup_init and not relative_step:
raise ValueError("`warmup_init=True` requires `relative_step=True`")
defaults = {
"lr": lr,
"eps": eps,
"clip_threshold": clip_threshold,
"decay_rate": decay_rate,
"beta1": beta1,
"weight_decay": weight_decay,
"scale_parameter": scale_parameter,
"relative_step": relative_step,
"warmup_init": warmup_init,
}
super().__init__(params, defaults)
@staticmethod
def _get_lr(param_group, param_state):
rel_step_sz = param_group["lr"]
if param_group["relative_step"]:
min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
param_scale = 1.0
if param_group["scale_parameter"]:
param_scale = max(param_group["eps"][1], param_state["RMS"])
return param_scale * rel_step_sz
@staticmethod
def _get_options(param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group["beta1"] is not None
return factored, use_first_moment
@staticmethod
def _rms(tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
@staticmethod
def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):
# copy from fairseq's adafactor implementation:
# https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients.")
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state["step"] = 0
if use_first_moment:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
if factored:
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
else:
state["exp_avg_sq"] = torch.zeros_like(grad)
state["RMS"] = 0
else:
if use_first_moment:
state["exp_avg"] = state["exp_avg"].to(grad)
if factored:
state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
else:
state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
p_data_fp32 = p
if p.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state["step"] += 1
state["RMS"] = self._rms(p_data_fp32)
lr = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
update = (grad**2) + group["eps"][0]
if factored:
exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"]
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t))
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t))
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t))
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0))
update.mul_(lr)
if use_first_moment:
exp_avg = state["exp_avg"]
exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"]))
update = exp_avg
if group["weight_decay"] != 0:
p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr))
p_data_fp32.add_(-update)
if p.dtype in {torch.float16, torch.bfloat16}:
p.copy_(p_data_fp32)
return loss
class AdafactorSchedule(LambdaLR):
"""
Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g.,
for logging), this class creates a proxy object that retrieves the current lr values from the optimizer.
It returns `initial_lr` during startup and the actual `lr` during stepping.
"""
def __init__(self, optimizer, initial_lr=0.0):
def lr_lambda(_):
return initial_lr
for group in optimizer.param_groups:
group["initial_lr"] = initial_lr
super().__init__(optimizer, lr_lambda)
for group in optimizer.param_groups:
del group["initial_lr"]
def get_lr(self):
opt = self.optimizer
lrs = [
opt._get_lr(group, opt.state[group["params"][0]])
for group in opt.param_groups
if group["params"][0].grad is not None
]
if len(lrs) == 0:
lrs = self.base_lrs # if called before stepping
return lrs
def get_adafactor_schedule(optimizer, initial_lr=0.0):
"""
Get a proxy schedule for [`~optimization.Adafactor`]
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
initial_lr (`float`, *optional*, defaults to 0.0):
Initial lr
Return:
[`~optimization.Adafactor`] proxy schedule object.
"""
return AdafactorSchedule(optimizer, initial_lr)
| transformers/src/transformers/optimization.py/0 | {
"file_path": "transformers/src/transformers/optimization.py",
"repo_id": "transformers",
"token_count": 16831
} | 491 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Union, overload
from ..generation import GenerationConfig
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import Pipeline, build_pipeline_init_args
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES
logger = logging.get_logger(__name__)
@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True))
class ImageToTextPipeline(Pipeline):
"""
Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
Example:
```python
>>> from transformers import pipeline
>>> captioner = pipeline(model="ydshieh/vit-gpt2-coco-en")
>>> captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
[{'generated_text': 'two birds are standing next to each other '}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This image to text pipeline can currently be loaded from pipeline() using the following task identifier:
"image-to-text".
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text).
"""
_pipeline_calls_generate = True
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "vision")
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES
)
def _sanitize_parameters(self, max_new_tokens=None, generate_kwargs=None, prompt=None, timeout=None):
forward_params = {}
preprocess_params = {}
if prompt is not None:
preprocess_params["prompt"] = prompt
if timeout is not None:
preprocess_params["timeout"] = timeout
if max_new_tokens is not None:
forward_params["max_new_tokens"] = max_new_tokens
if generate_kwargs is not None:
if max_new_tokens is not None and "max_new_tokens" in generate_kwargs:
raise ValueError(
"`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use"
" only 1 version"
)
forward_params.update(generate_kwargs)
if self.assistant_model is not None:
forward_params["assistant_model"] = self.assistant_model
if self.assistant_tokenizer is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
return preprocess_params, forward_params, {}
@overload
def __call__(self, inputs: Union[str, "Image.Image"], **kwargs: Any) -> list[dict[str, Any]]: ...
@overload
def __call__(self, inputs: Union[list[str], list["Image.Image"]], **kwargs: Any) -> list[list[dict[str, Any]]]: ...
def __call__(self, inputs: Union[str, list[str], "Image.Image", list["Image.Image"]], **kwargs):
"""
Assign labels to the image(s) passed as inputs.
Args:
inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a HTTP(s) link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images.
max_new_tokens (`int`, *optional*):
The amount of maximum tokens to generate. By default it will use `generate` default.
generate_kwargs (`Dict`, *optional*):
Pass it to send all of these arguments directly to `generate` allowing full control of this function.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following key:
- **generated_text** (`str`) -- The generated text.
"""
# After deprecation of this is completed, remove the default `None` value for `images`
if "images" in kwargs:
inputs = kwargs.pop("images")
if inputs is None:
raise ValueError("Cannot call the image-to-text pipeline without an inputs argument!")
return super().__call__(inputs, **kwargs)
def preprocess(self, image, prompt=None, timeout=None):
image = load_image(image, timeout=timeout)
if prompt is not None:
logger.warning_once(
"Passing `prompt` to the `image-to-text` pipeline is deprecated and will be removed in version 4.48"
" of 🤗 Transformers. Use the `image-text-to-text` pipeline instead",
)
if not isinstance(prompt, str):
raise ValueError(
f"Received an invalid text input, got - {type(prompt)} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation."
)
model_type = self.model.config.model_type
if model_type == "git":
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.dtype)
input_ids = self.tokenizer(text=prompt, add_special_tokens=False).input_ids
input_ids = [self.tokenizer.cls_token_id] + input_ids
input_ids = torch.tensor(input_ids).unsqueeze(0)
model_inputs.update({"input_ids": input_ids})
elif model_type == "pix2struct":
model_inputs = self.image_processor(images=image, header_text=prompt, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.dtype)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.dtype)
text_inputs = self.tokenizer(prompt, return_tensors=self.framework)
model_inputs.update(text_inputs)
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation")
else:
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.dtype)
if self.model.config.model_type == "git" and prompt is None:
model_inputs["input_ids"] = None
return model_inputs
def _forward(self, model_inputs, **generate_kwargs):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"], list)
and all(x is None for x in model_inputs["input_ids"])
):
model_inputs["input_ids"] = None
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
inputs = model_inputs.pop(self.model.main_input_name)
model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs)
return model_outputs
def postprocess(self, model_outputs):
records = []
for output_ids in model_outputs:
record = {
"generated_text": self.tokenizer.decode(
output_ids,
skip_special_tokens=True,
)
}
records.append(record)
return records
| transformers/src/transformers/pipelines/image_to_text.py/0 | {
"file_path": "transformers/src/transformers/pipelines/image_to_text.py",
"repo_id": "transformers",
"token_count": 4223
} | 492 |
from typing import Any, Optional, Union, overload
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import ChunkPipeline, build_pipeline_init_args
if is_vision_available():
from PIL import Image
from ..image_utils import load_image, valid_images
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES
logger = logging.get_logger(__name__)
@add_end_docstrings(build_pipeline_init_args(has_image_processor=True))
class ZeroShotObjectDetectionPipeline(ChunkPipeline):
"""
Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of
objects when you provide an image and a set of `candidate_labels`.
Example:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... candidate_labels=["cat", "couch"],
... )
[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]
>>> detector(
... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png",
... candidate_labels=["head", "bird"],
... )
[{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-object-detection"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES)
@overload
def __call__(
self, image: Union[str, "Image.Image"], candidate_labels: Union[str, list[str]], **kwargs: Any
) -> list[dict[str, Any]]: ...
@overload
def __call__(self, image: list[dict[str, Any]], **kwargs: Any) -> list[list[dict[str, Any]]]: ...
def __call__(
self,
image: Union[str, "Image.Image", list[dict[str, Any]]],
candidate_labels: Optional[Union[str, list[str]]] = None,
**kwargs: Any,
) -> Union[list[dict[str, Any]], list[list[dict[str, Any]]]]:
"""
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
Args:
image (`str`, `PIL.Image` or `list[dict[str, Any]]`):
The pipeline handles three types of images:
- A string containing an http url pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
You can use this parameter to send directly a list of images, or a dataset or a generator like so:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... [
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... ]
... )
[[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]]
```
candidate_labels (`str` or `list[str]` or `list[list[str]]`):
What the model should recognize in the image.
threshold (`float`, *optional*, defaults to 0.1):
The probability necessary to make a prediction.
top_k (`int`, *optional*, defaults to None):
The number of top predictions that will be returned by the pipeline. If the provided number is `None`
or higher than the number of predictions available, it will default to the number of predictions.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of lists containing prediction results, one list per input image. Each list contains dictionaries
with the following keys:
- **label** (`str`) -- Text query corresponding to the found object.
- **score** (`float`) -- Score corresponding to the object (between 0 and 1).
- **box** (`dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a
dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys.
"""
if "text_queries" in kwargs:
candidate_labels = kwargs.pop("text_queries")
if isinstance(image, (str, Image.Image)):
inputs = {"image": image, "candidate_labels": candidate_labels}
elif isinstance(image, (list, tuple)) and valid_images(image):
return list(
super().__call__(
({"image": img, "candidate_labels": labels} for img, labels in zip(image, candidate_labels)),
**kwargs,
)
)
else:
"""
Supports the following format
- {"image": image, "candidate_labels": candidate_labels}
- [{"image": image, "candidate_labels": candidate_labels}]
- Generator and datasets
This is a common pattern in other multimodal pipelines, so we support it here as well.
"""
inputs = image
results = super().__call__(inputs, **kwargs)
return results
def _sanitize_parameters(self, **kwargs):
preprocess_params = {}
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
postprocess_params = {}
if "threshold" in kwargs:
postprocess_params["threshold"] = kwargs["threshold"]
if "top_k" in kwargs:
postprocess_params["top_k"] = kwargs["top_k"]
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, timeout=None):
image = load_image(inputs["image"], timeout=timeout)
candidate_labels = inputs["candidate_labels"]
if isinstance(candidate_labels, str):
candidate_labels = candidate_labels.split(",")
target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32)
for i, candidate_label in enumerate(candidate_labels):
text_inputs = self.tokenizer(candidate_label, return_tensors=self.framework)
image_features = self.image_processor(image, return_tensors=self.framework)
if self.framework == "pt":
image_features = image_features.to(self.dtype)
yield {
"is_last": i == len(candidate_labels) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _forward(self, model_inputs):
target_size = model_inputs.pop("target_size")
candidate_label = model_inputs.pop("candidate_label")
is_last = model_inputs.pop("is_last")
outputs = self.model(**model_inputs)
model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def postprocess(self, model_outputs, threshold=0.1, top_k=None):
results = []
for model_output in model_outputs:
label = model_output["candidate_label"]
model_output = BaseModelOutput(model_output)
outputs = self.image_processor.post_process_object_detection(
outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"]
)[0]
for index in outputs["scores"].nonzero():
score = outputs["scores"][index].item()
box = self._get_bounding_box(outputs["boxes"][index][0])
result = {"score": score, "label": label, "box": box}
results.append(result)
results = sorted(results, key=lambda x: x["score"], reverse=True)
if top_k:
results = results[:top_k]
return results
def _get_bounding_box(self, box: "torch.Tensor") -> dict[str, int]:
"""
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
Args:
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
Returns:
bbox (`dict[str, int]`): Dict containing the coordinates in corners format.
"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
xmin, ymin, xmax, ymax = box.int().tolist()
bbox = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| transformers/src/transformers/pipelines/zero_shot_object_detection.py/0 | {
"file_path": "transformers/src/transformers/pipelines/zero_shot_object_detection.py",
"repo_id": "transformers",
"token_count": 4764
} | 493 |
from typing import TYPE_CHECKING, Any, Optional
from ..utils import is_accelerate_available, is_torch_available, is_torch_xpu_available, logging
from .base import HfQuantizer
from .quantizers_utils import get_module_from_name
if is_torch_available():
import torch
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
class FineGrainedFP8HfQuantizer(HfQuantizer):
"""
FP8 quantization implementation supporting both standard and MoE models.
Supports both e4m3fn formats based on platform.
"""
requires_parameters_quantization = True
requires_calibration = False
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_torch_available():
raise ImportError(
"Using fp8 quantization requires torch >= 2.1.0"
"Please install the latest version of torch ( pip install --upgrade torch )"
)
if not is_accelerate_available():
raise ImportError("Loading an FP8 quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Converting into FP8 weights from tf/flax weights is currently not supported, "
"please make sure the weights are in PyTorch format."
)
if not (torch.cuda.is_available() or is_torch_xpu_available()):
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
if torch.cuda.is_available():
compute_capability = torch.cuda.get_device_capability()
major, minor = compute_capability
if (major < 8) or (major == 8 and minor < 9):
raise ValueError(
"FP8 quantized models is only supported on GPUs with compute capability >= 8.9 (e.g 4090/H100)"
f", actual = `{major}.{minor}`"
)
device_map = kwargs.get("device_map")
if device_map is None:
logger.warning_once(
"You have loaded an FP8 model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model. To remove this warning, pass device_map = 'cuda'. "
)
elif device_map is not None:
if (
not self.pre_quantized
and isinstance(device_map, dict)
and ("cpu" in device_map.values() or "disk" in device_map.values())
):
raise ValueError(
"You are attempting to load an FP8 model with a device_map that contains a cpu/disk device."
"This is not supported when the model is quantized on the fly. "
"Please use a quantized checkpoint or remove the cpu/disk device from the device_map."
)
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
logger.info("Setting dtype to torch.float32 as no dtype was specified in from_pretrained")
dtype = torch.float32
return dtype
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: dict[str, Any],
unexpected_keys: Optional[list[str]] = None,
):
"""
Quantizes weights to FP8 format using Block-wise quantization
"""
from ..modeling_utils import _load_parameter_into_model
param_value = param_value.to(target_device)
# Get FP8 min/max values
fp8_min = torch.finfo(torch.float8_e4m3fn).min
fp8_max = torch.finfo(torch.float8_e4m3fn).max
block_size_m, block_size_n = self.quantization_config.weight_block_size
rows, cols = param_value.shape[-2:]
if rows % block_size_m != 0 or cols % block_size_n != 0:
raise ValueError(
f"Matrix dimensions ({rows}, {cols}) must be divisible by block sizes ({block_size_m}, {block_size_n})"
)
param_value_orig_shape = param_value.shape
param_value = param_value.reshape(
-1, rows // block_size_m, block_size_m, cols // block_size_n, block_size_n
).permute(0, 1, 3, 2, 4)
# Calculate scaling factor for each block
max_abs = torch.amax(torch.abs(param_value), dim=(-1, -2))
scale = fp8_max / max_abs
scale_orig_shape = scale.shape
scale = scale.unsqueeze(-1).unsqueeze(-1)
# Quantize the weights
quantized_param = torch.clamp(param_value * scale, min=fp8_min, max=fp8_max).to(torch.float8_e4m3fn)
quantized_param = quantized_param.permute(0, 1, 3, 2, 4)
# Reshape back to matrix shape
quantized_param = quantized_param.reshape(param_value_orig_shape)
# Reshape scale to match the number of blocks
scale = scale.reshape(scale_orig_shape).squeeze().reciprocal()
# Load into the model
_load_parameter_into_model(model, param_name, quantized_param)
_load_parameter_into_model(model, param_name.rsplit(".", 1)[0] + ".weight_scale_inv", scale)
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: dict[str, Any],
**kwargs,
):
from ..integrations.finegrained_fp8 import FP8Linear
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, FP8Linear):
if self.pre_quantized or tensor_name == "bias":
if tensor_name == "weight" and param_value.dtype != torch.float8_e4m3fn:
raise ValueError("Expect quantized weights but got an unquantized weight")
return False
else:
if tensor_name == "weight_scale_inv":
raise ValueError("Expect unquantized weights but got a quantized weight_scale")
return True
return False
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[list[str]] = None,
**kwargs,
):
from ..integrations.finegrained_fp8 import replace_with_fp8_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_fp8_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]:
from ..integrations import FP8Linear
not_missing_keys = []
for name, module in model.named_modules():
if isinstance(module, FP8Linear):
for missing in missing_keys:
if (
(name in missing or name in f"{prefix}.{missing}")
and not missing.endswith(".weight")
and not missing.endswith(".bias")
):
not_missing_keys.append(missing)
return [k for k in missing_keys if k not in not_missing_keys]
def update_tp_plan(self, config):
if "Qwen3" in config.__class__.__name__:
text_plan = {
"layers.*.self_attn.q_proj.weight": "local_colwise",
"layers.*.self_attn.q_proj.weight_scale_inv": "local_colwise",
"layers.*.self_attn.k_proj.weight": "local_colwise",
"layers.*.self_attn.k_proj.weight_scale_inv": "local_colwise",
"layers.*.self_attn.v_proj.weight": "local_colwise",
"layers.*.self_attn.v_proj.weight_scale_inv": "local_colwise",
"layers.*.self_attn.o_proj.weight": "local_rowwise",
"layers.*.self_attn.o_proj.weight_scale_inv": "local_rowwise",
"layers.*.self_attn": "gather",
"layers.*.mlp.gate_proj.weight": "local_colwise",
"layers.*.mlp.gate_proj.weight_scale_inv": "local_colwise",
"layers.*.mlp.up_proj.weight": "local_colwise",
"layers.*.mlp.up_proj.weight_scale_inv": "local_colwise",
"layers.*.mlp.down_proj.weight": "local_rowwise",
"layers.*.mlp.down_proj.weight_scale_inv": "local_rowwise",
"layers.*.mlp": "gather",
}
config.base_model_tp_plan = text_plan
return config
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return False
def get_cuda_warm_up_factor(self):
# Pre-processing is done cleanly, so we can allocate everything here
return 2
| transformers/src/transformers/quantizers/quantizer_finegrained_fp8.py/0 | {
"file_path": "transformers/src/transformers/quantizers/quantizer_finegrained_fp8.py",
"repo_id": "transformers",
"token_count": 4323
} | 494 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import copy
import doctest
import functools
import gc
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import types
import unittest
from collections import UserDict, defaultdict
from collections.abc import Generator, Iterable, Iterator, Mapping
from dataclasses import MISSING, fields
from functools import cache, wraps
from io import StringIO
from pathlib import Path
from typing import Any, Callable, Optional, Union
from unittest import mock
from unittest.mock import patch
import huggingface_hub.utils
import requests
import urllib3
from huggingface_hub import delete_repo
from packaging import version
from transformers import Trainer
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_swanlab_available,
is_tensorboard_available,
is_trackio_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
TRITON_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_apollo_torch_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_auto_round_available,
is_av_available,
is_bitsandbytes_available,
is_bitsandbytes_multi_backend_available,
is_bs4_available,
is_compressed_tensors_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flash_attn_3_available,
is_flax_available,
is_flute_available,
is_fp_quant_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_gptqmodel_available,
is_grokadamw_available,
is_hadamard_available,
is_hqq_available,
is_huggingface_hub_greater_or_equal,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_kernels_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_mistral_common_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_optimum_quanto_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_psutil_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quark_available,
is_qutlass_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_schedulefree_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_spacy_available,
is_speech_available,
is_spqr_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_greater_or_equal,
is_torch_hpu_available,
is_torch_mlu_available,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_optimi_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchcodec_available,
is_torchdynamo_available,
is_torchvision_available,
is_triton_available,
is_vision_available,
is_vptq_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
from accelerate.utils.imports import is_fp8_available
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
IS_XPU_SYSTEM = getattr(torch.version, "xpu", None) is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
IS_XPU_SYSTEM = False
logger = transformers_logging.get_logger(__name__)
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_flaky_tests = parse_flag_from_env("RUN_FLAKY", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def skip_if_not_implemented(test_func):
@functools.wraps(test_func)
def wrapper(*args, **kwargs):
try:
return test_func(*args, **kwargs)
except NotImplementedError as e:
raise unittest.SkipTest(f"Test skipped due to NotImplementedError: {e}")
return wrapper
def apply_skip_if_not_implemented(cls):
"""
Class decorator to apply @skip_if_not_implemented to all test methods.
"""
for attr_name in dir(cls):
if attr_name.startswith("test_"):
attr = getattr(cls, attr_name)
if callable(attr):
setattr(cls, attr_name, skip_if_not_implemented(attr))
return cls
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_apollo_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when APOLLO isn't installed.
https://github.com/zhuhanqing/APOLLO
"""
return unittest.skipUnless(is_apollo_torch_available(), "test requires APOLLO")(test_case)
def require_torch_optimi(test_case):
"""
Decorator marking a test that requires torch-optimi. These tests are skipped when torch-optimi isn't installed.
https://github.com/jxnl/torch-optimi
"""
return unittest.skipUnless(is_torch_optimi_available(), "test requires torch-optimi")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_schedulefree(test_case):
"""
Decorator marking a test that requires schedulefree. These tests are skipped when schedulefree isn't installed.
https://github.com/facebookresearch/schedule_free
"""
return unittest.skipUnless(is_schedulefree_available(), "test requires schedulefree")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_triton(min_version: str = TRITON_MIN_VERSION):
"""
Decorator marking a test that requires triton. These tests are skipped when triton isn't installed.
"""
def decorator(test_case):
return unittest.skipUnless(is_triton_available(min_version), f"test requires triton version >= {min_version}")(
test_case
)
return decorator
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
logger.warning_once(
"TensorFlow test-related code, including `require_tf2onnx`, is deprecated and will be removed in "
"Transformers v4.55"
)
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_greater_or_equal(version: str):
"""
Decorator marking a test that requires PyTorch version >= `version`.
These tests are skipped when PyTorch version is less than `version`.
"""
def decorator(test_case):
return unittest.skipUnless(is_torch_greater_or_equal(version), f"test requires PyTorch version >= {version}")(
test_case
)
return decorator
def require_huggingface_hub_greater_or_equal(version: str):
"""
Decorator marking a test that requires huggingface_hub version >= `version`.
These tests are skipped when huggingface_hub version is less than `version`.
"""
def decorator(test_case):
return unittest.skipUnless(
is_huggingface_hub_greater_or_equal(version), f"test requires huggingface_hub version >= {version}"
)(test_case)
return decorator
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_kernels(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_kernels_available(), "test requires Flash Attention")(test_case)
def require_flash_attn_3(test_case):
"""
Decorator marking a test that requires Flash Attention 3.
These tests are skipped when Flash Attention 3 isn't installed.
"""
return unittest.skipUnless(is_flash_attn_3_available(), "test requires Flash Attention 3")(test_case)
def require_read_token(test_case):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
if isinstance(test_case, type):
for attr_name in dir(test_case):
attr = getattr(test_case, attr_name)
if isinstance(attr, types.FunctionType):
if getattr(attr, "__require_read_token__", False):
continue
wrapped = require_read_token(attr)
setattr(test_case, attr_name, wrapped)
return test_case
else:
if getattr(test_case, "__require_read_token__", False):
return test_case
@functools.wraps(test_case)
def wrapper(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return test_case(*args, **kwargs)
else: # Allow running locally with the default token env variable
# dealing with static/class methods and called by `self.xxx`
if "staticmethod" in inspect.getsource(test_case).strip():
if len(args) > 0 and isinstance(args[0], unittest.TestCase):
return test_case(*args[1:], **kwargs)
return test_case(*args, **kwargs)
wrapper.__require_read_token__ = True
return wrapper
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torchcodec(test_case):
"""
Decorator marking a test that requires Torchcodec.
These tests are skipped when Torchcodec isn't installed.
"""
return unittest.skipUnless(is_torchcodec_available(), "test requires Torchcodec")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
logger.warning_once(
"TensorFlow test-related code, including `require_tensorflow_probability`, is deprecated and will be "
"removed in Transformers v4.55"
)
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
logger.warning_once(
"TensorFlow test-related code, including `require_tf`, is deprecated and will be removed in Transformers v4.55"
)
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
logger.warning_once(
"JAX test-related code, including `require_flax`, is deprecated and will be removed in Transformers v4.55"
)
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
logger.warning_once(
"TensorFlow test-related code, including `require_tensorflow_text`, is deprecated and will be "
"removed in Transformers v4.55"
)
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU CUDA setup (in PyTorch). These tests are skipped on a machine without
multiple CUDA GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple CUDA GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")(
test_case
)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_non_hpu(test_case):
"""
Decorator marking a test that should be skipped for HPU.
"""
return unittest.skipUnless(torch_device != "hpu", "test requires a non-HPU")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_non_xpu(test_case):
"""
Decorator marking a test that should be skipped for XPU.
"""
return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
def require_torch_multi_hpu(test_case):
"""
Decorator marking a test that requires a multi-HPU setup (in PyTorch). These tests are skipped on a machine without
multiple HPUs.
To run *only* the multi_hpu tests, assuming all test names contain multi_hpu: $ pytest -sv ./tests -k "multi_hpu"
"""
if not is_torch_hpu_available():
return unittest.skip(reason="test requires PyTorch HPU")(test_case)
return unittest.skipUnless(torch.hpu.device_count() > 1, "test requires multiple HPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
if torch_device == "mlu" and not is_torch_mlu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but MLU is unavailable. Please double-check your testing environment."
)
if torch_device == "hpu" and not is_torch_hpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but HPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif is_torch_npu_available():
torch_device = "npu"
elif is_torch_mlu_available():
torch_device = "mlu"
elif is_torch_hpu_available():
torch_device = "hpu"
elif is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torchao_version_greater_or_equal(torchao_version):
def decorator(test_case):
correct_torchao_version = is_torchao_available() and version.parse(
version.parse(importlib.metadata.version("torchao")).base_version
) >= version.parse(torchao_version)
return unittest.skipUnless(
correct_torchao_version, f"Test requires torchao with the version greater than {torchao_version}."
)(test_case)
return decorator
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_mps(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "mps", "test requires MPS")(test_case)
def require_large_cpu_ram(test_case, memory: float = 80):
"""Decorator marking a test that requires a CPU RAM with more than `memory` GiB of memory."""
if not is_psutil_available():
return test_case
import psutil
return unittest.skipUnless(
psutil.virtual_memory().total / 1024**3 > memory,
f"test requires a machine with more than {memory} GiB of CPU RAM memory",
)(test_case)
def require_torch_large_gpu(test_case, memory: float = 20):
"""Decorator marking a test that requires a CUDA GPU with more than `memory` GiB of memory."""
if torch_device != "cuda":
return unittest.skip(reason=f"test requires a CUDA GPU with more than {memory} GiB of memory")(test_case)
return unittest.skipUnless(
torch.cuda.get_device_properties(0).total_memory / 1024**3 > memory,
f"test requires a GPU with more than {memory} GiB of memory",
)(test_case)
def require_torch_large_accelerator(test_case, memory: float = 20):
"""Decorator marking a test that requires an accelerator with more than `memory` GiB of memory."""
if torch_device != "cuda" and torch_device != "xpu":
return unittest.skip(reason=f"test requires a GPU or XPU with more than {memory} GiB of memory")(test_case)
torch_accelerator_module = getattr(torch, torch_device)
return unittest.skipUnless(
torch_accelerator_module.get_device_properties(0).total_memory / 1024**3 > memory,
f"test requires a GPU or XPU with more than {memory} GiB of memory",
)(test_case)
def require_torch_gpu_if_bnb_not_multi_backend_enabled(test_case):
"""
Decorator marking a test that requires a GPU if bitsandbytes multi-backend feature is not enabled.
"""
if is_bitsandbytes_available() and is_bitsandbytes_multi_backend_available():
return test_case
return require_torch_gpu(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_fp8(test_case):
"""Decorator marking a test that requires supports for fp8"""
return unittest.skipUnless(is_accelerate_available() and is_fp8_available(), "test requires fp8 support")(
test_case
)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_deterministic_for_xpu(test_case):
@wraps(test_case)
def wrapper(*args, **kwargs):
if is_torch_xpu_available():
original_state = torch.are_deterministic_algorithms_enabled()
try:
torch.use_deterministic_algorithms(True)
return test_case(*args, **kwargs)
finally:
torch.use_deterministic_algorithms(original_state)
else:
return test_case(*args, **kwargs)
return wrapper
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_swanlab(test_case):
"""
Decorator marking a test that requires swanlab.
These tests are skipped when swanlab isn't installed.
"""
return unittest.skipUnless(is_swanlab_available(), "test requires swanlab")(test_case)
def require_trackio(test_case):
"""
Decorator marking a test that requires trackio.
These tests are skipped when trackio isn't installed.
"""
return unittest.skipUnless(is_trackio_available(), "test requires trackio")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_vptq(test_case):
"""
Decorator marking a test that requires vptq
"""
return unittest.skipUnless(is_vptq_available(), "test requires vptq")(test_case)
def require_spqr(test_case):
"""
Decorator marking a test that requires spqr
"""
return unittest.skipUnless(is_spqr_available(), "test requires spqr")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
eetq_available = is_eetq_available()
if eetq_available:
try:
import eetq # noqa: F401
except ImportError as exc:
if "shard_checkpoint" in str(exc):
# EETQ 1.0.0 is currently broken with the latest transformers because it tries to import the removed
# shard_checkpoint function, see https://github.com/NetEase-FuXi/EETQ/issues/34.
# TODO: Remove once eetq releases a fix and this release is used in CI
eetq_available = False
return unittest.skipUnless(eetq_available, "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(
is_gptqmodel_available() or is_auto_gptq_available(), "test requires gptqmodel or auto-gptq"
)(test_case)
def require_hqq(test_case):
"""
Decorator for hqq dependency
"""
return unittest.skipUnless(is_hqq_available(), "test requires hqq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_auto_round(test_case):
"""
Decorator for auto_round dependency
"""
return unittest.skipUnless(is_auto_round_available(), "test requires autoround")(test_case)
def require_optimum_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_optimum_quanto_available(), "test requires optimum-quanto")(test_case)
def require_compressed_tensors(test_case):
"""
Decorator for compressed_tensors dependency
"""
return unittest.skipUnless(is_compressed_tensors_available(), "test requires compressed_tensors")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_quark(test_case):
"""
Decorator for quark dependency
"""
return unittest.skipUnless(is_quark_available(), "test requires quark")(test_case)
def require_flute_hadamard(test_case):
"""
Decorator marking a test that requires higgs and hadamard
"""
return unittest.skipUnless(
is_flute_available() and is_hadamard_available(), "test requires flute and fast_hadamard_transform"
)(test_case)
def require_fp_quant(test_case):
"""
Decorator marking a test that requires fp_quant and qutlass
"""
return unittest.skipUnless(is_fp_quant_available(), "test requires fp_quant")(test_case)
def require_qutlass(test_case):
"""
Decorator marking a test that requires qutlass
"""
return unittest.skipUnless(is_qutlass_available(), "test requires qutlass")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def require_speech(test_case):
"""
Decorator marking a test that requires speech. These tests are skipped when speech isn't available.
"""
return unittest.skipUnless(is_speech_available(), "test requires torchaudio")(test_case)
def require_openai(test_case):
"""
Decorator marking a test that requires openai
"""
return unittest.skipUnless(is_openai_available(), "test requires openai")(test_case)
def require_mistral_common(test_case):
"""
Decorator marking a test that requires mistral-common. These tests are skipped when mistral-common isn't available.
"""
return unittest.skipUnless(is_mistral_common_available(), "test requires mistral-common")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
def get_steps_per_epoch(trainer: Trainer) -> int:
training_args = trainer.args
train_dataloader = trainer.get_train_dataloader()
initial_training_values = trainer.set_initial_training_values(
args=training_args,
dataloader=train_dataloader,
total_train_batch_size=training_args.per_device_train_batch_size,
)
steps_per_epoch = initial_training_values[1]
return steps_per_epoch
def evaluate_side_effect_factory(
side_effect_values: list[dict[str, float]],
) -> Generator[dict[str, float], None, None]:
"""
Function that returns side effects for the _evaluate method.
Used when we're unsure of exactly how many times _evaluate will be called.
"""
yield from side_effect_values
while True:
yield side_effect_values[-1]
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
def set_model_tester_for_less_flaky_test(test_case):
target_num_hidden_layers = 1
# TODO (if possible): Avoid exceptional cases
exceptional_classes = [
"ZambaModelTester",
"Zamba2ModelTester",
"RwkvModelTester",
"AriaVisionText2TextModelTester",
"GPTNeoModelTester",
"DPTModelTester",
]
if test_case.model_tester.__class__.__name__ in exceptional_classes:
target_num_hidden_layers = None
if hasattr(test_case.model_tester, "out_features") or hasattr(test_case.model_tester, "out_indices"):
target_num_hidden_layers = None
if hasattr(test_case.model_tester, "num_hidden_layers") and target_num_hidden_layers is not None:
test_case.model_tester.num_hidden_layers = target_num_hidden_layers
if (
hasattr(test_case.model_tester, "vision_config")
and "num_hidden_layers" in test_case.model_tester.vision_config
and target_num_hidden_layers is not None
):
test_case.model_tester.vision_config = copy.deepcopy(test_case.model_tester.vision_config)
if isinstance(test_case.model_tester.vision_config, dict):
test_case.model_tester.vision_config["num_hidden_layers"] = 1
else:
test_case.model_tester.vision_config.num_hidden_layers = 1
if (
hasattr(test_case.model_tester, "text_config")
and "num_hidden_layers" in test_case.model_tester.text_config
and target_num_hidden_layers is not None
):
test_case.model_tester.text_config = copy.deepcopy(test_case.model_tester.text_config)
if isinstance(test_case.model_tester.text_config, dict):
test_case.model_tester.text_config["num_hidden_layers"] = 1
else:
test_case.model_tester.text_config.num_hidden_layers = 1
# A few model class specific handling
# For Albert
if hasattr(test_case.model_tester, "num_hidden_groups"):
test_case.model_tester.num_hidden_groups = test_case.model_tester.num_hidden_layers
def set_config_for_less_flaky_test(config):
target_attrs = [
"rms_norm_eps",
"layer_norm_eps",
"norm_eps",
"norm_epsilon",
"layer_norm_epsilon",
"batch_norm_eps",
]
for target_attr in target_attrs:
setattr(config, target_attr, 1.0)
# norm layers (layer/group norm, etc.) could cause flaky tests when the tensors have very small variance.
# (We don't need the original epsilon values to check eager/sdpa matches)
attrs = ["text_config", "vision_config", "text_encoder", "audio_encoder", "decoder"]
for attr in attrs:
if hasattr(config, attr):
for target_attr in target_attrs:
setattr(getattr(config, attr), target_attr, 1.0)
def set_model_for_less_flaky_test(model):
# Another way to make sure norm layers have desired epsilon. (Some models don't set it from its config.)
target_names = (
"LayerNorm",
"GroupNorm",
"BatchNorm",
"RMSNorm",
"BatchNorm2d",
"BatchNorm1d",
"BitGroupNormActivation",
"WeightStandardizedConv2d",
)
target_attrs = ["eps", "epsilon", "variance_epsilon"]
if is_torch_available() and isinstance(model, torch.nn.Module):
for module in model.modules():
if type(module).__name__.endswith(target_names):
for attr in target_attrs:
if hasattr(module, attr):
setattr(module, attr, 1.0)
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
class TemporaryHubRepo:
"""Create a temporary Hub repository and return its `RepoUrl` object. This is similar to
`tempfile.TemporaryDirectory` and can be used as a context manager. For example:
with TemporaryHubRepo(token=self._token) as temp_repo:
...
Upon exiting the context, the repository and everything contained in it are removed.
Example:
```python
with TemporaryHubRepo(token=self._token) as temp_repo:
model.push_to_hub(tmp_repo.repo_id, token=self._token)
```
"""
def __init__(self, namespace: Optional[str] = None, token: Optional[str] = None) -> None:
self.token = token
with tempfile.TemporaryDirectory() as tmp_dir:
repo_id = Path(tmp_dir).name
if namespace is not None:
repo_id = f"{namespace}/{repo_id}"
self.repo_url = huggingface_hub.create_repo(repo_id, token=self.token)
def __enter__(self):
return self.repo_url
def __exit__(self, exc, value, tb):
delete_repo(repo_id=self.repo_url.repo_id, token=self.token, missing_ok=True)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.repo_root_dir_str, self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple(nested_simplify(item, decimals) for item in obj)
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)) or obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32, np.float16)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path) as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: list[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occurred while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self._thread_id = threading.get_ident()
self._extra_info = []
def patched_with_thread_info(func):
def wrap(*args, **kwargs):
self._extra_info.append(threading.get_ident())
return func(*args, **kwargs)
return wrap
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", side_effect=patched_with_thread_info(urllib3.connectionpool.log.debug)
)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
assert len(self.mock.call_args_list) == len(self._extra_info)
for thread_id, call in zip(self._extra_info, self.mock.call_args_list):
if thread_id != self._thread_id:
continue
# code 307: the URL being requested by the user has moved to a temporary location
if call.args[-2] == 307:
continue
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Please note that our push tests use `pytest-rerunfailures`, which prompts the CI to rerun certain types of
failed tests. More specifically, if the test exception contains any substring in `FLAKY_TEST_FAILURE_PATTERNS`
(in `.circleci/create_circleci_config.py`), it will be rerun. If you find a recurrent pattern of failures,
expand `FLAKY_TEST_FAILURE_PATTERNS` in our CI configuration instead of using `is_flaky`.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
logger.error(f"Test failed with {err} at try {retry_count}/{max_attempts}.")
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return unittest.skipUnless(_run_flaky_tests, "test is flaky")(wrapper)
return decorator
def hub_retry(max_attempts: int = 5, wait_before_retry: Optional[float] = 2):
"""
To decorate tests that download from the Hub. They can fail due to a
variety of network issues such as timeouts, connection resets, etc.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*, defaults to 2):
If provided, will wait that number of seconds before retrying the test.
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
# We catch all exceptions related to network issues from requests
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.ReadTimeout,
requests.exceptions.HTTPError,
requests.exceptions.RequestException,
) as err:
logger.error(
f"Test failed with {err} at try {retry_count}/{max_attempts} as it couldn't connect to the specified Hub repository."
)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_first(test_case):
"""
Decorator marking a test with order(1). When pytest-order plugin is installed, tests marked with this decorator
are guaranteed to run first.
This is especially useful in some test settings like on a Gaudi instance where a Gaudi device can only be used by a
single process at a time. So we make sure all tests that run in a subprocess are launched first, to avoid device
allocation conflicts.
"""
import pytest
return pytest.mark.order(1)(test_case)
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", "600"))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f"{results['error']}")
def run_test_using_subprocess(func):
"""
To decorate a test to run in a subprocess using the `subprocess` module. This could avoid potential GPU memory
issues (GPU OOM or a test that causes many subsequential failing with `CUDA error: device-side assert triggered`).
"""
import pytest
@functools.wraps(func)
def wrapper(*args, **kwargs):
if os.getenv("_INSIDE_SUB_PROCESS", None) == "1":
func(*args, **kwargs)
else:
test = " ".join(os.environ.get("PYTEST_CURRENT_TEST").split(" ")[:-1])
try:
import copy
env = copy.deepcopy(os.environ)
env["_INSIDE_SUB_PROCESS"] = "1"
# This prevents the entries in `short test summary info` given by the subprocess being truncated. so the
# full information can be passed to the parent pytest process.
# See: https://docs.pytest.org/en/stable/explanation/ci.html
env["CI"] = "true"
# If not subclass of `unitTest.TestCase` and `pytestconfig` is used: try to grab and use the arguments
if "pytestconfig" in kwargs:
command = list(kwargs["pytestconfig"].invocation_params.args)
for idx, x in enumerate(command):
if x in kwargs["pytestconfig"].args:
test = test.split("::")[1:]
command[idx] = "::".join([f"{func.__globals__['__file__']}"] + test)
command = [f"{sys.executable}", "-m", "pytest"] + command
command = [x for x in command if x not in ["--no-summary"]]
# Otherwise, simply run the test with no option at all
else:
command = [f"{sys.executable}", "-m", "pytest", f"{test}"]
subprocess.run(command, env=env, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
exception_message = e.stdout.decode()
lines = exception_message.split("\n")
# Add a first line with more informative information instead of just `= test session starts =`.
# This makes the `short test summary info` section more useful.
if "= test session starts =" in lines[0]:
text = ""
for line in lines[1:]:
if line.startswith("FAILED "):
text = line[len("FAILED ") :]
text = "".join(text.split(" - ")[1:])
elif line.startswith("=") and line.endswith("=") and " failed in " in line:
break
elif len(text) > 0:
text += f"\n{line}"
text = "(subprocess) " + text
lines = [text] + lines
exception_message = "\n".join(lines)
raise pytest.fail(exception_message, pytrace=False)
return wrapper
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresponding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )(.*?```)"
codeblocks = re.split(codeblock_pattern, string, flags=re.DOTALL)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", "0"))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
if not callable(dispatch_table["default"]):
return dispatch_table["default"]
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values or None, will return then directly.
if not callable(fn):
return fn
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {
"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed,
"default": torch.manual_seed,
}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache,
"cpu": None,
"default": None,
}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count,
"cpu": lambda: 0,
"default": lambda: 1,
}
BACKEND_RESET_MAX_MEMORY_ALLOCATED = {
"cuda": torch.cuda.reset_max_memory_allocated,
"cpu": None,
"default": None,
}
BACKEND_MAX_MEMORY_ALLOCATED = {
"cuda": torch.cuda.max_memory_allocated,
"cpu": 0,
"default": 0,
}
BACKEND_RESET_PEAK_MEMORY_STATS = {
"cuda": torch.cuda.reset_peak_memory_stats,
"cpu": None,
"default": None,
}
BACKEND_MEMORY_ALLOCATED = {
"cuda": torch.cuda.memory_allocated,
"cpu": 0,
"default": 0,
}
BACKEND_SYNCHRONIZE = {
"cuda": torch.cuda.synchronize,
"cpu": None,
"default": None,
}
BACKEND_TORCH_ACCELERATOR_MODULE = {
"cuda": torch.cuda,
"cpu": None,
"default": None,
}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
BACKEND_RESET_MAX_MEMORY_ALLOCATED = {"default": None}
BACKEND_RESET_PEAK_MEMORY_STATS = {"default": None}
BACKEND_MAX_MEMORY_ALLOCATED = {"default": 0}
BACKEND_MEMORY_ALLOCATED = {"default": 0}
BACKEND_SYNCHRONIZE = {"default": None}
BACKEND_TORCH_ACCELERATOR_MODULE = {"default": None}
if is_torch_hpu_available():
BACKEND_MANUAL_SEED["hpu"] = torch.hpu.manual_seed
BACKEND_DEVICE_COUNT["hpu"] = torch.hpu.device_count
BACKEND_TORCH_ACCELERATOR_MODULE["hpu"] = torch.hpu
if is_torch_mlu_available():
BACKEND_EMPTY_CACHE["mlu"] = torch.mlu.empty_cache
BACKEND_MANUAL_SEED["mlu"] = torch.mlu.manual_seed
BACKEND_DEVICE_COUNT["mlu"] = torch.mlu.device_count
BACKEND_TORCH_ACCELERATOR_MODULE["mlu"] = torch.mlu
if is_torch_npu_available():
BACKEND_EMPTY_CACHE["npu"] = torch.npu.empty_cache
BACKEND_MANUAL_SEED["npu"] = torch.npu.manual_seed
BACKEND_DEVICE_COUNT["npu"] = torch.npu.device_count
BACKEND_TORCH_ACCELERATOR_MODULE["npu"] = torch.npu
if is_torch_xpu_available():
BACKEND_EMPTY_CACHE["xpu"] = torch.xpu.empty_cache
BACKEND_MANUAL_SEED["xpu"] = torch.xpu.manual_seed
BACKEND_DEVICE_COUNT["xpu"] = torch.xpu.device_count
BACKEND_RESET_MAX_MEMORY_ALLOCATED["xpu"] = torch.xpu.reset_peak_memory_stats
BACKEND_RESET_PEAK_MEMORY_STATS["xpu"] = torch.xpu.reset_peak_memory_stats
BACKEND_MAX_MEMORY_ALLOCATED["xpu"] = torch.xpu.max_memory_allocated
BACKEND_MEMORY_ALLOCATED["xpu"] = torch.xpu.memory_allocated
BACKEND_SYNCHRONIZE["xpu"] = torch.xpu.synchronize
BACKEND_TORCH_ACCELERATOR_MODULE["xpu"] = torch.xpu
if is_torch_xla_available():
BACKEND_EMPTY_CACHE["xla"] = torch.cuda.empty_cache
BACKEND_MANUAL_SEED["xla"] = torch.cuda.manual_seed
BACKEND_DEVICE_COUNT["xla"] = torch.cuda.device_count
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
def backend_reset_max_memory_allocated(device: str):
return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED)
def backend_reset_peak_memory_stats(device: str):
return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS)
def backend_max_memory_allocated(device: str):
return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED)
def backend_memory_allocated(device: str):
return _device_agnostic_dispatch(device, BACKEND_MEMORY_ALLOCATED)
def backend_synchronize(device: str):
return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE)
def backend_torch_accelerator_module(device: str):
return _device_agnostic_dispatch(device, BACKEND_TORCH_ACCELERATOR_MODULE)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
device_spec_dir, _ = os.path.split(os.path.realpath(device_spec_path))
sys.path.append(device_spec_dir)
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
def compare_pipeline_output_to_hub_spec(output, hub_spec):
missing_keys = []
unexpected_keys = []
all_field_names = {field.name for field in fields(hub_spec)}
matching_keys = sorted([key for key in output if key in all_field_names])
# Fields with a MISSING default are required and must be in the output
for field in fields(hub_spec):
if field.default is MISSING and field.name not in output:
missing_keys.append(field.name)
# All output keys must match either a required or optional field in the Hub spec
for output_key in output:
if output_key not in all_field_names:
unexpected_keys.append(output_key)
if missing_keys or unexpected_keys:
error = ["Pipeline output does not match Hub spec!"]
if matching_keys:
error.append(f"Matching keys: {matching_keys}")
if missing_keys:
error.append(f"Missing required keys in pipeline output: {missing_keys}")
if unexpected_keys:
error.append(f"Keys in pipeline output that are not in Hub spec: {unexpected_keys}")
raise KeyError("\n".join(error))
@require_torch
def cleanup(device: str, gc_collect=False):
if gc_collect:
gc.collect()
backend_empty_cache(device)
torch._dynamo.reset()
# Type definition of key used in `Expectations` class.
DeviceProperties = tuple[Optional[str], Optional[int], Optional[int]]
# Helper type. Makes creating instances of `Expectations` smoother.
PackedDeviceProperties = tuple[Optional[str], Union[None, int, tuple[int, int]]]
@cache
def get_device_properties() -> DeviceProperties:
"""
Get environment device properties.
"""
if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM:
import torch
major, minor = torch.cuda.get_device_capability()
if IS_ROCM_SYSTEM:
return ("rocm", major, minor)
else:
return ("cuda", major, minor)
elif IS_XPU_SYSTEM:
import torch
# To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def
arch = torch.xpu.get_device_capability()["architecture"]
gen_mask = 0x000000FF00000000
gen = (arch & gen_mask) >> 32
return ("xpu", gen, None)
else:
return (torch_device, None, None)
def unpack_device_properties(
properties: Optional[PackedDeviceProperties] = None,
) -> DeviceProperties:
"""
Unpack a `PackedDeviceProperties` tuple into consistently formatted `DeviceProperties` tuple. If properties is None, it is fetched.
"""
if properties is None:
return get_device_properties()
device_type, major_minor = properties
if major_minor is None:
major, minor = None, None
elif isinstance(major_minor, int):
major, minor = major_minor, None
else:
major, minor = major_minor
return device_type, major, minor
class Expectations(UserDict[PackedDeviceProperties, Any]):
def get_expectation(self) -> Any:
"""
Find best matching expectation based on environment device properties.
"""
return self.find_expectation(get_device_properties())
def unpacked(self) -> list[tuple[DeviceProperties, Any]]:
return [(unpack_device_properties(k), v) for k, v in self.data.items()]
@staticmethod
def is_default(expectation_key: PackedDeviceProperties) -> bool:
"""
This function returns True if the expectation_key is the Default expectation (None, None).
When an Expectation dict contains a Default value, it is generally because the test existed before Expectations.
When we modify a test to use Expectations for a specific hardware, we don't want to affect the tests on other
hardwares. Thus we set the previous value as the Default expectation with key (None, None) and add a value for
the specific hardware with key (hardware_type, (major, minor)).
"""
return all(p is None for p in expectation_key)
@staticmethod
def score(properties: DeviceProperties, other: DeviceProperties) -> float:
"""
Returns score indicating how similar two instances of the `Properties` tuple are.
Rules are as follows:
* Matching `type` adds one point, semi-matching `type` adds 0.1 point (e.g. cuda and rocm).
* If types match, matching `major` adds another point, and then matching `minor` adds another.
* The Default expectation (None, None) is worth 0.5 point, which is better than semi-matching. More on this
in the `is_default` function.
"""
device_type, major, minor = properties
other_device_type, other_major, other_minor = other
score = 0
# Matching device type, maybe major and minor
if device_type is not None and device_type == other_device_type:
score += 1
if major is not None and major == other_major:
score += 1
if minor is not None and minor == other_minor:
score += 1
# Semi-matching device type, which carries less importance than the default expectation
elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]:
score = 0.1
# Default expectation
if Expectations.is_default(other):
score = 0.5
return score
def find_expectation(self, properties: DeviceProperties = (None, None, None)) -> Any:
"""
Find best matching expectation based on provided device properties. We score each expectation, and to
distinguish between expectations with the same score, we use the major and minor version numbers, prioritizing
most recent versions.
"""
(result_key, result) = max(
self.unpacked(),
key=lambda x: (
Expectations.score(properties, x[0]), # x[0] is a device properties tuple (device_type, major, minor)
x[0][1] if x[0][1] is not None else -1, # This key is the major version, -1 if major is None
x[0][2] if x[0][2] is not None else -1, # This key is the minor version, -1 if minor is None
),
)
if Expectations.score(properties, result_key) == 0:
raise ValueError(f"No matching expectation found for {properties}")
return result
def __repr__(self):
return f"{self.data}"
def patch_torch_compile_force_graph():
"""
Patch `torch.compile` to always use `fullgraph=True`.
This is useful when some `torch.compile` tests are running with `fullgraph=False` and we want to be able to run
them with `fullgraph=True` in some occasion (without introducing new tests) to make sure there is no graph break.
After PR #40137, `CompileConfig.fullgraph` is `False` by default, this patch is necessary.
"""
force_fullgraph = os.environ.get("TORCH_COMPILE_FORCE_FULLGRAPH", "")
force_fullgraph = force_fullgraph.lower() in ("yes", "true", "on", "t", "y", "1")
if force_fullgraph:
import torch
orig_method = torch.compile
def patched(*args, **kwargs):
# In `torch_compile`, all arguments except `model` is keyword only argument.
kwargs["fullgraph"] = True
return orig_method(*args, **kwargs)
torch.compile = patched
def torchrun(script: str, nproc_per_node: int, is_torchrun: bool = True, env: Optional[dict] = None):
"""Run the `script` using `torchrun` command for multi-processing in a subprocess. Captures errors as necessary."""
with tempfile.NamedTemporaryFile(mode="w+", suffix=".py") as tmp:
tmp.write(script)
tmp.flush()
tmp.seek(0)
if is_torchrun:
cmd = (
f"torchrun --nproc_per_node {nproc_per_node} --master_port {get_torch_dist_unique_port()} {tmp.name}"
).split()
else:
cmd = ["python3", tmp.name]
# Note that the subprocess will be waited for here, and raise an error if not successful
try:
_ = subprocess.run(cmd, capture_output=True, env=env, text=True, check=True)
except subprocess.CalledProcessError as e:
raise Exception(f"The following error was captured: {e.stderr}")
| transformers/src/transformers/testing_utils.py/0 | {
"file_path": "transformers/src/transformers/testing_utils.py",
"repo_id": "transformers",
"token_count": 49097
} | 495 |
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from PIL import Image
from ..masking_utils import create_causal_mask
from ..models.auto.auto_factory import _get_model_class
from ..models.auto.configuration_auto import AutoConfig
from ..models.auto.modeling_auto import MODEL_FOR_PRETRAINING_MAPPING, MODEL_MAPPING
from ..models.auto.processing_auto import PROCESSOR_MAPPING_NAMES, AutoProcessor
from ..models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES, AutoTokenizer
from .import_utils import is_torch_available
if is_torch_available():
import torch
import torch.nn as nn
# Print the matrix with words as row labels
GREEN = "\033[92m"
YELLOW = "\033[93m"
RESET = "\033[0m"
BLACK_SQUARE = "■"
WHITE_SQUARE = "⬚"
def generate_attention_matrix_from_mask(
words, mask, img_token="<img>", sliding_window=None, token_type_ids=None, image_seq_length=None
):
"""
Generates an attention matrix from a given attention mask.
Optionally applies a sliding window mask (e.g., for Gemma2/3) and
marks regions where image tokens occur based on the specified `img_token`.
"""
mask = mask.int()
if mask.ndim == 3:
mask = mask[0, :, :]
if mask.ndim == 4:
mask = mask[0, 0, :, :]
n = len(words)
max_word_length = max(len(repr(word)) for word in words)
first_img_idx = 0
output = []
for i, k in enumerate(words):
if k == img_token and not first_img_idx:
first_img_idx = i
mask[i, i] = 2 # Mark yellow regions
if first_img_idx > 0 and (k != img_token or i == n - 1):
if i == n - 1:
i += 1
mask[first_img_idx:i, first_img_idx:i] = 2 # Mark yellow regions
first_img_idx = 0
# Generate sliding window mask (size = 4), excluding img_token
sliding_window_mask = None
if sliding_window is not None:
sliding_window_mask = [[1 if (0 <= i - j < sliding_window) else 0 for j in range(n)] for i in range(n)]
row_dummy = " ".join(
f"{YELLOW}{BLACK_SQUARE}{RESET}"
if mask[0, j]
else f"{GREEN}{BLACK_SQUARE}{RESET}"
if 0 == j
else BLACK_SQUARE
if mask[0, j]
else WHITE_SQUARE
for j in range(n)
)
if token_type_ids is not None:
is_special = token_type_ids == 1
token_type_buckets = torch.where(
(token_type_ids.cumsum(-1) % 5 + is_special).bool(), token_type_ids.cumsum(-1), 0
)
boundaries = torch.arange(0, image_seq_length + 1, image_seq_length)
token_type_buckets = torch.bucketize(token_type_buckets, boundaries=boundaries)
# Print headers
legend = f"{GREEN}{BLACK_SQUARE}{RESET}: i == j (diagonal) {YELLOW}{BLACK_SQUARE}{RESET}: token_type_ids"
output.append(" " + legend)
f_string = " " * (max_word_length + 5) + "Attention Matrix".ljust(len(row_dummy) // 2)
if sliding_window is not None:
f_string += "Sliding Window Mask"
output.append(f_string)
vertical_header = []
for idx, word in enumerate(words):
if mask[idx, idx] == 2:
vertical_header.append([f"{YELLOW}{k}{RESET}" for k in list(str(idx).rjust(len(str(n))))])
else:
vertical_header.append(list(str(idx).rjust(len(str(n)))))
vertical_header = list(map(list, zip(*vertical_header))) # Transpose
for row in vertical_header:
output.append(
(max_word_length + 5) * " " + " ".join(row) + " | " + " ".join(row)
if sliding_window is not None
else ""
)
for i, word in enumerate(words):
word_repr = repr(word).ljust(max_word_length)
colored_word = f"{YELLOW}{word_repr}{RESET}" if img_token in word else word_repr
row_display = " ".join(
f"{YELLOW}{BLACK_SQUARE}{RESET}"
if img_token in words[j] and mask[i, j] and img_token in word
else f"{GREEN}{BLACK_SQUARE}{RESET}"
if i == j
else BLACK_SQUARE
if mask[i, j]
else WHITE_SQUARE
for j in range(n)
)
sliding_window_row = ""
if sliding_window is not None:
sliding_window_row = " ".join(
f"{YELLOW}{BLACK_SQUARE}{RESET}"
if img_token in words[j] and img_token in word and token_type_buckets[0, i] == token_type_buckets[0, j]
else f"{GREEN}{BLACK_SQUARE}{RESET}"
if i == j
else BLACK_SQUARE
if sliding_window_mask[i][j]
else WHITE_SQUARE
for j in range(n)
)
output.append(f"{colored_word}: {str(i).rjust(2)} {row_display} | {sliding_window_row}")
return "\n".join(output)
class AttentionMaskVisualizer:
def __init__(self, model_name: str):
config = AutoConfig.from_pretrained(model_name)
self.image_token = "<img>"
if hasattr(config.get_text_config(), "sliding_window"):
self.sliding_window = getattr(config.get_text_config(), "sliding_window", None)
try:
mapped_cls = _get_model_class(config, MODEL_MAPPING)
except Exception:
mapped_cls = _get_model_class(config, MODEL_FOR_PRETRAINING_MAPPING)
if mapped_cls is None:
raise ValueError(f"Model name {model_name} is not supported for attention visualization")
self.mapped_cls = mapped_cls
class _ModelWrapper(mapped_cls, nn.Module):
def __init__(self, config, model_name):
nn.Module.__init__(self)
self.dummy_module = nn.Linear(1, 1)
self.config = config
self.model = _ModelWrapper(config, model_name)
self.model.to(config.dtype)
self.repo_id = model_name
self.config = config
def __call__(self, input_sentence: str, suffix=""):
self.visualize_attention_mask(input_sentence, suffix=suffix)
def visualize_attention_mask(self, input_sentence: str, suffix=""):
model = self.model
kwargs = {}
image_seq_length = None
if self.config.model_type in PROCESSOR_MAPPING_NAMES:
img = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg?download=true"
img = Image.open(requests.get(img, stream=True).raw)
image_seq_length = 5
processor = AutoProcessor.from_pretrained(self.repo_id, image_seq_length=image_seq_length)
if hasattr(processor, "image_token"):
image_token = processor.image_token
else:
image_token = processor.tokenizer.convert_ids_to_tokens([processor.image_token_id])[0]
if image_token:
input_sentence = input_sentence.replace("<img>", image_token)
inputs = processor(images=img, text=input_sentence, suffix=suffix, return_tensors="pt")
self.image_token = processor.tokenizer.convert_ids_to_tokens([processor.image_token_id])[0]
attention_mask = inputs["attention_mask"]
if "token_type_ids" in inputs: # TODO inspect signature of update causal mask
kwargs["token_type_ids"] = inputs["token_type_ids"]
tokens = processor.tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
elif self.config.model_type in TOKENIZER_MAPPING_NAMES:
tokenizer = AutoTokenizer.from_pretrained(self.repo_id)
tokens = tokenizer.tokenize(input_sentence)
attention_mask = tokenizer(input_sentence, return_tensors="pt")["attention_mask"]
else:
raise ValueError(f"Model type {model.config.model_type} does not support attention visualization")
model.config._attn_implementation = "eager"
model.train()
batch_size, seq_length = attention_mask.shape
input_embeds = torch.zeros((batch_size, seq_length, model.config.hidden_size), dtype=self.model.dtype)
cache_position = torch.arange(seq_length)
causal_mask = create_causal_mask(
config=model.config,
input_embeds=input_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=None,
)
if causal_mask is not None:
attention_mask = ~causal_mask.bool()
else:
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1).expand(batch_size, 1, seq_length, seq_length)
top_bottom_border = "##" * (
len(f"Attention visualization for {self.config.model_type} | {self.mapped_cls}") + 4
) # Box width adjusted to text length
side_border = "##"
print(f"\n{top_bottom_border}")
print(
"##"
+ f" Attention visualization for \033[1m{self.config.model_type}:{self.repo_id}\033[0m {self.mapped_cls.__name__}".center(
len(top_bottom_border)
)
+ " "
+ side_border,
)
print(f"{top_bottom_border}")
f_string = generate_attention_matrix_from_mask(
tokens,
attention_mask,
img_token=self.image_token,
sliding_window=getattr(self.config, "sliding_window", None),
token_type_ids=kwargs.get("token_type_ids"),
image_seq_length=image_seq_length,
)
print(f_string)
print(f"{top_bottom_border}")
| transformers/src/transformers/utils/attention_visualizer.py/0 | {
"file_path": "transformers/src/transformers/utils/attention_visualizer.py",
"repo_id": "transformers",
"token_count": 4526
} | 496 |
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class ASTFeatureExtractor(metaclass=DummyObject):
_backends = ["speech"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["speech"])
class Speech2TextFeatureExtractor(metaclass=DummyObject):
_backends = ["speech"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["speech"])
| transformers/src/transformers/utils/dummy_speech_objects.py/0 | {
"file_path": "transformers/src/transformers/utils/dummy_speech_objects.py",
"repo_id": "transformers",
"token_count": 166
} | 497 |
# Copyright 2020 Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def format_time(t):
"Format `t` (in seconds) to (h):mm:ss"
t = int(t)
h, m, s = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def html_progress_bar(value, total, prefix, label, width=300):
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def text_to_html_table(items):
"Put the texts in `items` in an HTML table."
html_code = """<table border="1" class="dataframe">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt)
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class NotebookProgressBar:
"""
A progress par for display in a notebook.
Class attributes (overridden by derived classes)
- **warmup** (`int`) -- The number of iterations to do at the beginning while ignoring `update_every`.
- **update_every** (`float`) -- Since calling the time takes some time, we only do it every presumed
`update_every` seconds. The progress bar uses the average time passed up until now to guess the next value
for which it will call the update.
Args:
total (`int`):
The total number of iterations to reach.
prefix (`str`, *optional*):
A prefix to add before the progress bar.
leave (`bool`, *optional*, defaults to `True`):
Whether or not to leave the progress bar once it's completed. You can always call the
[`~utils.notebook.NotebookProgressBar.close`] method to make the bar disappear.
parent ([`~notebook.NotebookTrainingTracker`], *optional*):
A parent object (like [`~utils.notebook.NotebookTrainingTracker`]) that spawns progress bars and handle
their display. If set, the object passed must have a `display()` method.
width (`int`, *optional*, defaults to 300):
The width (in pixels) that the bar will take.
Example:
```python
import time
pbar = NotebookProgressBar(100)
for val in range(100):
pbar.update(val)
time.sleep(0.07)
pbar.update(100)
```"""
warmup = 5
update_every = 0.2
def __init__(
self,
total: int,
prefix: Optional[str] = None,
leave: bool = True,
parent: Optional["NotebookTrainingTracker"] = None,
width: int = 300,
):
self.total = total
self.prefix = "" if prefix is None else prefix
self.leave = leave
self.parent = parent
self.width = width
self.last_value = None
self.comment = None
self.output = None
self.value = None
self.label = None
if "VSCODE_PID" in os.environ:
self.update_every = 0.5 # Adjusted for smooth updated as html rending is slow on VS Code
# This is the only adjustment required to optimize training html rending
def update(self, value: int, force_update: bool = False, comment: Optional[str] = None):
"""
The main method to update the progress bar to `value`.
Args:
value (`int`):
The value to use. Must be between 0 and `total`.
force_update (`bool`, *optional*, defaults to `False`):
Whether or not to force and update of the internal state and display (by default, the bar will wait for
`value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute
since the last update to avoid adding boilerplate).
comment (`str`, *optional*):
A comment to add on the left of the progress bar.
"""
self.value = value
if comment is not None:
self.comment = comment
if self.last_value is None:
self.start_time = self.last_time = time.time()
self.start_value = self.last_value = value
self.elapsed_time = self.predicted_remaining = None
self.first_calls = self.warmup
self.wait_for = 1
self.update_bar(value)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
current_time = time.time()
self.elapsed_time = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
self.average_time_per_item = self.elapsed_time / (value - self.start_value)
else:
self.average_time_per_item = None
if value >= self.total:
value = self.total
self.predicted_remaining = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
self.predicted_remaining = self.average_time_per_item * (self.total - value)
self.update_bar(value)
self.last_value = value
self.last_time = current_time
if (self.average_time_per_item is None) or (self.average_time_per_item == 0):
self.wait_for = 1
else:
self.wait_for = max(int(self.update_every / self.average_time_per_item), 1)
def update_bar(self, value, comment=None):
spaced_value = " " * (len(str(self.total)) - len(str(value))) + str(value)
if self.elapsed_time is None:
self.label = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
self.label = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
self.label = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
f" {format_time(self.predicted_remaining)}"
)
if self.average_time_per_item == 0:
self.label += ", +inf it/s"
else:
self.label += f", {1 / self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def display(self):
self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
self.output = disp.display(disp.HTML(self.html_code), display_id=True)
else:
self.output.update(disp.HTML(self.html_code))
def close(self):
"Closes the progress bar."
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class NotebookTrainingTracker(NotebookProgressBar):
"""
An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics.
Args:
num_steps (`int`): The number of steps during training. column_names (`list[str]`, *optional*):
The list of column names for the metrics table (will be inferred from the first call to
[`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).
"""
def __init__(self, num_steps, column_names=None):
super().__init__(num_steps)
self.inner_table = None if column_names is None else [column_names]
self.child_bar = None
def display(self):
self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
self.output = disp.display(disp.HTML(self.html_code), display_id=True)
else:
self.output.update(disp.HTML(self.html_code))
def write_line(self, values):
"""
Write the values in the inner table.
Args:
values (`dict[str, float]`): The values to display.
"""
if self.inner_table is None:
self.inner_table = [list(values.keys()), list(values.values())]
else:
columns = self.inner_table[0]
for key in values:
if key not in columns:
columns.append(key)
self.inner_table[0] = columns
if len(self.inner_table) > 1:
last_values = self.inner_table[-1]
first_column = self.inner_table[0][0]
if last_values[0] != values[first_column]:
# write new line
self.inner_table.append([values.get(c, "No Log") for c in columns])
else:
# update last line
new_values = values
for c in columns:
if c not in new_values:
new_values[c] = last_values[columns.index(c)]
self.inner_table[-1] = [new_values[c] for c in columns]
else:
self.inner_table.append([values[c] for c in columns])
def add_child(self, total, prefix=None, width=300):
"""
Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be
easily updated).
Args:
total (`int`): The number of iterations for the child progress bar.
prefix (`str`, *optional*): A prefix to write on the left of the progress bar.
width (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar.
"""
self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width)
return self.child_bar
def remove_child(self):
"""
Closes the child progress bar.
"""
self.child_bar = None
self.display()
class NotebookProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation, optimized for Jupyter Notebooks or
Google colab.
"""
def __init__(self):
self.training_tracker = None
self.prediction_bar = None
self._force_next_update = False
def on_train_begin(self, args, state, control, **kwargs):
self.first_column = "Epoch" if args.eval_strategy == IntervalStrategy.EPOCH else "Step"
self.training_loss = 0
self.last_log = 0
column_names = [self.first_column] + ["Training Loss"]
if args.eval_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names)
def on_step_end(self, args, state, control, **kwargs):
epoch = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1,
comment=f"Epoch {epoch}/{state.num_train_epochs}",
force_update=self._force_next_update,
)
self._force_next_update = False
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if not has_length(eval_dataloader):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
self.prediction_bar = self.training_tracker.add_child(len(eval_dataloader))
else:
self.prediction_bar = NotebookProgressBar(len(eval_dataloader))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def on_predict(self, args, state, control, **kwargs):
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
# Only for when there is no evaluation
if args.eval_strategy == IntervalStrategy.NO and "loss" in logs:
values = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
values["Step"] = state.global_step
self.training_tracker.write_line(values)
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
if self.training_tracker is not None:
values = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
values["Training Loss"] = log["loss"]
break
if self.first_column == "Epoch":
values["Epoch"] = int(state.epoch)
else:
values["Step"] = state.global_step
metric_key_prefix = "eval"
for k in metrics:
if k.endswith("_loss"):
metric_key_prefix = re.sub(r"\_loss$", "", k)
_ = metrics.pop("total_flos", None)
_ = metrics.pop("epoch", None)
_ = metrics.pop(f"{metric_key_prefix}_runtime", None)
_ = metrics.pop(f"{metric_key_prefix}_samples_per_second", None)
_ = metrics.pop(f"{metric_key_prefix}_steps_per_second", None)
_ = metrics.pop(f"{metric_key_prefix}_jit_compilation_time", None)
for k, v in metrics.items():
splits = k.split("_")
name = " ".join([part.capitalize() for part in splits[1:]])
if name == "Loss":
# Single dataset
name = "Validation Loss"
values[name] = v
self.training_tracker.write_line(values)
self.training_tracker.remove_child()
self.prediction_bar = None
# Evaluation takes a long time so we should force the next update.
self._force_next_update = True
def on_train_end(self, args, state, control, **kwargs):
self.training_tracker.update(
state.global_step,
comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}",
force_update=True,
)
self.training_tracker = None
| transformers/src/transformers/utils/notebook.py/0 | {
"file_path": "transformers/src/transformers/utils/notebook.py",
"repo_id": "transformers",
"token_count": 7036
} | 498 |
# Copyright 2025 Eduard Durech and SGLang team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Usage:
# RUN_SLOW=1 pytest -s tests/generation/test_flash_attention_parity.py
import unittest
import pytest
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.testing_utils import require_flash_attn, require_flash_attn_3, require_torch_gpu, slow
class FlashAttentionParityTest(unittest.TestCase):
# From https://github.com/sgl-project/sglang/blob/main/python/sglang/test/test_utils.py
def _lcs(self, X, Y):
m = len(X)
n = len(Y)
L = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif X[i - 1] == Y[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
return L[m][n]
# From https://github.com/sgl-project/sglang/blob/main/python/sglang/test/test_utils.py
def _calculate_rouge_l(self, output_strs_list1, output_strs_list2):
rouge_l_scores = []
for s1, s2 in zip(output_strs_list1, output_strs_list2):
lcs_len = self._lcs(s1, s2)
precision = lcs_len / len(s1) if len(s1) > 0 else 0
recall = lcs_len / len(s2) if len(s2) > 0 else 0
if precision + recall > 0:
fmeasure = (2 * precision * recall) / (precision + recall)
else:
fmeasure = 0.0
rouge_l_scores.append(fmeasure)
return rouge_l_scores
def _benchmark_generation(self, model, inputs, n_warmup=3, n_runs=5):
for _ in range(n_warmup):
model.generate(**inputs, max_new_tokens=20, do_sample=False)
torch.cuda.synchronize()
start_time = torch.cuda.Event(enable_timing=True)
end_time = torch.cuda.Event(enable_timing=True)
start_time.record()
for _ in range(n_runs):
model.generate(**inputs, max_new_tokens=20, do_sample=False)
end_time.record()
torch.cuda.synchronize()
return start_time.elapsed_time(end_time) / n_runs
@pytest.mark.flash_attn_3_test
@require_torch_gpu
@require_flash_attn
@require_flash_attn_3
@slow
def test_flash_attention_2_3_parity(self):
model_id = "meta-llama/Llama-3.2-1B-Instruct"
prompt = "The ETH AI Center is"
# 1. Load FA2 model and tokenizer
model_2 = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
).to("cuda")
tokenizer = AutoTokenizer.from_pretrained(model_id)
# 2. Load FA3 model
try:
model_3 = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
attn_implementation="flash_attention_3",
).to("cuda")
except (ValueError, ImportError) as e:
pytest.skip(f"Could not load Flash Attention 3 model, skipping test. Error: {e}")
# 3. Generate with both models
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
with torch.no_grad():
output_2 = model_2.generate(
**inputs, max_new_tokens=20, do_sample=False, output_scores=True, return_dict_in_generate=True
)
output_3 = model_3.generate(
**inputs, max_new_tokens=20, do_sample=False, output_scores=True, return_dict_in_generate=True
)
# 4. Correctness check
# 4a. Logits
logits_2 = torch.stack(output_2.scores)
logits_3 = torch.stack(output_3.scores)
torch.testing.assert_close(logits_2, logits_3, atol=1e-3, rtol=1e-3)
logprobs_2 = torch.nn.functional.log_softmax(logits_2, dim=-1)
logprobs_3 = torch.nn.functional.log_softmax(logits_3, dim=-1)
max_logprob_diff = torch.max(torch.abs(logprobs_2 - logprobs_3)).item()
# 4b. Generated text
text_2 = tokenizer.decode(output_2.sequences[0], skip_special_tokens=True)
text_3 = tokenizer.decode(output_3.sequences[0], skip_special_tokens=True)
rouge_score = self._calculate_rouge_l([text_2], [text_3])[0]
assert rouge_score > 0.99, f"Generated texts do not match (ROUGE-L: {rouge_score})"
# 5. Performance check
with torch.no_grad():
time_2 = self._benchmark_generation(model_2, inputs)
time_3 = self._benchmark_generation(model_3, inputs)
print(f"\n--- Flash Attention {2, 3} Parity Test on {model_id} ---")
print(f"Prompt: '{prompt}'")
print(f"Generated text with Flash Attention 2: {text_2}")
print(f"Generated text with Flash Attention 3: {text_3}")
print(f"ROUGE-L: {rouge_score}")
print(f"Max absolute difference in logprobs: {max_logprob_diff:.5e}")
print(f"Flash Attention 2 latency: {time_2:.2f} ms")
print(f"Flash Attention 3 latency: {time_3:.2f} ms")
print(f"Speed-up: {time_2 / time_3:.2f}x")
print("---")
| transformers/tests/generation/test_flash_attention_parity.py/0 | {
"file_path": "transformers/tests/generation/test_flash_attention_parity.py",
"repo_id": "transformers",
"token_count": 2700
} | 499 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import sys
import tempfile
import unittest
from collections import OrderedDict
from pathlib import Path
import pytest
from huggingface_hub import Repository
import transformers
from transformers import BertConfig, GPT2Model, is_safetensors_available, is_torch_available
from transformers.models.auto.configuration_auto import CONFIG_MAPPING
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_torch,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
if is_torch_available():
import torch
from test_module.custom_modeling import CustomModel
from transformers import (
AutoBackbone,
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTableQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertModel,
FunnelBaseModel,
FunnelModel,
GenerationMixin,
GPT2Config,
GPT2LMHeadModel,
ResNetBackbone,
RobertaForMaskedLM,
T5Config,
T5ForConditionalGeneration,
TapasConfig,
TapasForQuestionAnswering,
TimmBackbone,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
)
@require_torch
class AutoModelTest(unittest.TestCase):
def setUp(self):
transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0
@slow
def test_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModel.from_pretrained(model_name)
model, loading_info = AutoModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
self.assertEqual(len(loading_info["missing_keys"]), 0)
# When using PyTorch checkpoint, the expected value is `8`. With `safetensors` checkpoint (if it is
# installed), the expected value becomes `7`.
EXPECTED_NUM_OF_UNEXPECTED_KEYS = 7 if is_safetensors_available() else 8
self.assertEqual(len(loading_info["unexpected_keys"]), EXPECTED_NUM_OF_UNEXPECTED_KEYS)
self.assertEqual(len(loading_info["mismatched_keys"]), 0)
self.assertEqual(len(loading_info["error_msgs"]), 0)
@slow
def test_model_for_pretraining_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForPreTraining.from_pretrained(model_name)
model, loading_info = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForPreTraining)
# Only one value should not be initialized and in the missing keys.
for value in loading_info.values():
self.assertEqual(len(value), 0)
@slow
def test_lmhead_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelWithLMHead.from_pretrained(model_name)
model, loading_info = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_causal_lm(self):
model_name = "openai-community/gpt2"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = AutoModelForCausalLM.from_pretrained(model_name)
model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, GPT2LMHeadModel)
@slow
def test_model_for_masked_lm(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForMaskedLM.from_pretrained(model_name)
model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
model_name = "google-t5/t5-base"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, T5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
model, loading_info = AutoModelForSequenceClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
@slow
def test_table_question_answering_model_from_pretrained(self):
model_name = "google/tapas-base"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, TapasConfig)
model = AutoModelForTableQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForTableQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TapasForQuestionAnswering)
@slow
def test_token_classification_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForTokenClassification.from_pretrained(model_name)
model, loading_info = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForTokenClassification)
@slow
def test_auto_backbone_timm_model_from_pretrained(self):
# Configs can't be loaded for timm models
model = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True)
with pytest.raises(ValueError):
# We can't pass output_loading_info=True as we're loading from timm
AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TimmBackbone)
# Check kwargs are correctly passed to the backbone
model = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, out_indices=(-2, -1))
self.assertEqual(model.out_indices, [-2, -1])
# Check out_features cannot be passed to Timm backbones
with self.assertRaises(ValueError):
_ = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, out_features=["stage1"])
@slow
def test_auto_backbone_from_pretrained(self):
model = AutoBackbone.from_pretrained("microsoft/resnet-18")
model, loading_info = AutoBackbone.from_pretrained("microsoft/resnet-18", output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, ResNetBackbone)
# Check kwargs are correctly passed to the backbone
model = AutoBackbone.from_pretrained("microsoft/resnet-18", out_indices=[-2, -1])
self.assertEqual(model.out_indices, [-2, -1])
self.assertEqual(model.out_features, ["stage3", "stage4"])
model = AutoBackbone.from_pretrained("microsoft/resnet-18", out_features=["stage2", "stage4"])
self.assertEqual(model.out_indices, [2, 4])
self.assertEqual(model.out_features, ["stage2", "stage4"])
def test_from_pretrained_identifier(self):
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, BertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_identifier_from_model_type(self):
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(model, RobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_pretrained_with_tuple_values(self):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
model = AutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(model, FunnelModel)
config = copy.deepcopy(model.config)
config.architectures = ["FunnelBaseModel"]
model = AutoModel.from_config(config)
self.assertIsInstance(model, FunnelBaseModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModel.from_pretrained(tmp_dir)
self.assertIsInstance(model, FunnelBaseModel)
def test_from_pretrained_dynamic_model_local(self):
try:
AutoConfig.register("custom", CustomConfig)
AutoModel.register(CustomConfig, CustomModel)
config = CustomConfig(hidden_size=32)
model = CustomModel(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
new_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in MODEL_MAPPING._extra_content:
del MODEL_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_model_distant(self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False)
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test the dynamic module is loaded only once.
reloaded_model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True)
self.assertIs(model.__class__, reloaded_model.__class__)
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# Test the dynamic module is reloaded if we force it.
reloaded_model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model", trust_remote_code=True, force_download=True
)
self.assertIsNot(model.__class__, reloaded_model.__class__)
# This one uses a relative import to a util file, this checks it is downloaded and used properly.
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_with_util", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test the dynamic module is loaded only once.
reloaded_model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model_with_util", trust_remote_code=True
)
self.assertIs(model.__class__, reloaded_model.__class__)
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# Test the dynamic module is reloaded if we force it.
reloaded_model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model_with_util", trust_remote_code=True, force_download=True
)
self.assertIsNot(model.__class__, reloaded_model.__class__)
def test_from_pretrained_dynamic_model_distant_with_ref(self):
model = AutoModel.from_pretrained("hf-internal-testing/ref_to_test_dynamic_model", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# This one uses a relative import to a util file, this checks it is downloaded and used properly.
model = AutoModel.from_pretrained(
"hf-internal-testing/ref_to_test_dynamic_model_with_util", trust_remote_code=True
)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
def test_from_pretrained_dynamic_model_with_period(self):
# We used to have issues where repos with "." in the name would cause issues because the Python
# import machinery would treat that as a directory separator, so we test that case
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_v1.0")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_v1.0", trust_remote_code=False)
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_v1.0", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test that it works with a custom cache dir too
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model_v1.0", trust_remote_code=True, cache_dir=tmp_dir
)
self.assertEqual(model.__class__.__name__, "NewModel")
def test_new_model_registration(self):
AutoConfig.register("custom", CustomConfig)
auto_classes = [
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
]
try:
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(ValueError):
auto_class.register(BertConfig, CustomModel)
auto_class.register(CustomConfig, CustomModel)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
auto_class.register(BertConfig, BertModel)
# Now that the config is registered, it can be used as any other config with the auto-API
tiny_config = BertModelTester(self).get_config()
config = CustomConfig(**tiny_config.to_dict())
model = auto_class.from_config(config)
self.assertIsInstance(model, CustomModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
new_model = auto_class.from_pretrained(tmp_dir)
# The model is a CustomModel but from the new dynamically imported class.
self.assertIsInstance(new_model, CustomModel)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
for mapping in (
MODEL_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
):
if CustomConfig in mapping._extra_content:
del mapping._extra_content[CustomConfig]
def test_from_pretrained_dynamic_model_conflict(self):
class NewModelConfigLocal(BertConfig):
model_type = "new-model"
class NewModel(BertModel):
config_class = NewModelConfigLocal
try:
AutoConfig.register("new-model", NewModelConfigLocal)
AutoModel.register(NewModelConfigLocal, NewModel)
# If remote code is not set, the default is to use local
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model")
self.assertEqual(model.config.__class__.__name__, "NewModelConfigLocal")
# If remote code is disabled, we load the local one.
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False)
self.assertEqual(model.config.__class__.__name__, "NewModelConfigLocal")
# If remote is enabled, we load from the Hub
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True)
self.assertEqual(model.config.__class__.__name__, "NewModelConfig")
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
if NewModelConfigLocal in MODEL_MAPPING._extra_content:
del MODEL_MAPPING._extra_content[NewModelConfigLocal]
def test_repo_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, "bert-base is not a local folder and is not a valid model identifier"
):
_ = AutoModel.from_pretrained("bert-base")
def test_revision_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"
):
_ = AutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa")
def test_model_file_not_found(self):
with self.assertRaisesRegex(
EnvironmentError,
"hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin",
):
_ = AutoModel.from_pretrained("hf-internal-testing/config-no-model")
def test_model_from_tf_suggestion(self):
with self.assertRaisesRegex(EnvironmentError, "Use `from_tf=True` to load this model"):
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only")
def test_model_from_flax_suggestion(self):
with self.assertRaisesRegex(EnvironmentError, "Use `from_flax=True` to load this model"):
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
@unittest.skip("Failing on main")
def test_cached_model_has_minimum_calls_to_head(self):
# Make sure we have cached the model.
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter["GET"], 0)
self.assertEqual(counter["HEAD"], 1)
self.assertEqual(counter.total_calls, 1)
# With a sharded checkpoint
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded")
with RequestCounter() as counter:
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded")
self.assertEqual(counter["GET"], 0)
self.assertEqual(counter["HEAD"], 1)
self.assertEqual(counter.total_calls, 1)
def test_attr_not_existing(self):
from transformers.models.auto.auto_factory import _LazyAutoMapping
_CONFIG_MAPPING_NAMES = OrderedDict([("bert", "BertConfig")])
_MODEL_MAPPING_NAMES = OrderedDict([("bert", "GhostModel")])
_MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES)
with pytest.raises(ValueError, match=r"Could not find GhostModel neither in .* nor in .*!"):
_MODEL_MAPPING[BertConfig]
_MODEL_MAPPING_NAMES = OrderedDict([("bert", "BertModel")])
_MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES)
self.assertEqual(_MODEL_MAPPING[BertConfig], BertModel)
_MODEL_MAPPING_NAMES = OrderedDict([("bert", "GPT2Model")])
_MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES)
self.assertEqual(_MODEL_MAPPING[BertConfig], GPT2Model)
def test_dynamic_saving_from_local_repo(self):
with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as tmp_dir_out:
_ = Repository(local_dir=tmp_dir, clone_from="hf-internal-testing/tiny-random-custom-architecture")
model = AutoModelForCausalLM.from_pretrained(tmp_dir, trust_remote_code=True)
model.save_pretrained(tmp_dir_out)
_ = AutoModelForCausalLM.from_pretrained(tmp_dir_out, trust_remote_code=True)
self.assertTrue((Path(tmp_dir_out) / "modeling_fake_custom.py").is_file())
self.assertTrue((Path(tmp_dir_out) / "configuration_fake_custom.py").is_file())
def test_custom_model_patched_generation_inheritance(self):
"""
Tests that our inheritance patching for generate-compatible models works as expected. Without this feature,
old Hub models lose the ability to call `generate`.
"""
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/test_dynamic_model_generation", trust_remote_code=True
)
self.assertTrue(model.__class__.__name__ == "NewModelForCausalLM")
# It inherits from GenerationMixin. This means it can `generate`. Because `PreTrainedModel` is scheduled to
# stop inheriting from `GenerationMixin` in v4.50, this check will fail if patching is not present.
self.assertTrue(isinstance(model, GenerationMixin))
# More precisely, it directly inherits from GenerationMixin. This check would fail prior to v4.45 (inheritance
# patching was added in v4.45)
self.assertTrue("GenerationMixin" in str(model.__class__.__bases__))
| transformers/tests/models/auto/test_modeling_auto.py/0 | {
"file_path": "transformers/tests/models/auto/test_modeling_auto.py",
"repo_id": "transformers",
"token_count": 11161
} | 500 |
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Blenderbot model."""
import tempfile
import unittest
from transformers import BlenderbotConfig, is_torch_available
from transformers.testing_utils import (
backend_empty_cache,
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_fp16,
slow,
torch_device,
)
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotTokenizer
from transformers.models.blenderbot.modeling_blenderbot import (
BlenderbotDecoder,
BlenderbotEncoder,
BlenderbotForCausalLM,
)
def prepare_blenderbot_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class BlenderbotModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return BlenderbotConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def get_pipeline_config(self):
config = self.get_config()
config.max_position_embeddings = 100
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BlenderbotModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = BlenderbotModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = BlenderbotEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = BlenderbotDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class BlenderbotModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (BlenderbotModel, BlenderbotForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": BlenderbotModel,
"summarization": BlenderbotForConditionalGeneration,
"text-generation": BlenderbotForCausalLM,
"text2text-generation": BlenderbotForConditionalGeneration,
"translation": BlenderbotForConditionalGeneration,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = True
test_pruning = False
test_missing_keys = False
def setUp(self):
self.model_tester = BlenderbotModelTester(self)
self.config_tester = ConfigTester(self, config_class=BlenderbotConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = BlenderbotForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
@unittest.skipUnless(torch_device != "cpu", "3B test too slow on CPU.")
@require_torch
@require_sentencepiece
@require_tokenizers
class Blenderbot3BIntegrationTests(unittest.TestCase):
ckpt = "facebook/blenderbot-3B"
@cached_property
def tokenizer(self):
return BlenderbotTokenizer.from_pretrained(self.ckpt)
@slow
def test_generation_from_short_input_same_as_parlai_3B(self):
FASTER_GEN_KWARGS = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
TOK_DECODE_KW = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
backend_empty_cache(torch_device)
model = BlenderbotForConditionalGeneration.from_pretrained(self.ckpt).half().to(torch_device)
src_text = ["Sam"]
model_inputs = self.tokenizer(src_text, return_tensors="pt").to(torch_device)
generated_utterances = model.generate(**model_inputs, **FASTER_GEN_KWARGS)
tgt_text = 'Sam is a great name. It means "sun" in Gaelic.'
generated_txt = self.tokenizer.batch_decode(generated_utterances, **TOK_DECODE_KW)
assert generated_txt[0].strip() == tgt_text
src_text = (
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel"
" like i'm going to throw up.\nand why is that?"
)
model_inputs = self.tokenizer([src_text], return_tensors="pt").to(torch_device)
generated_ids = model.generate(**model_inputs, **FASTER_GEN_KWARGS)[0]
reply = self.tokenizer.decode(generated_ids, **TOK_DECODE_KW)
assert "I think it's because we are so worried about what people think of us." == reply.strip()
del model
class BlenderbotStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=50,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = BlenderbotConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = BlenderbotDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = BlenderbotDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# past_key_values = model(input_ids, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(
next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, use_cache=True
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class BlenderbotStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (BlenderbotDecoder, BlenderbotForCausalLM) if is_torch_available() else ()
test_pruning = False
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = BlenderbotStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=BlenderbotConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="Decoder cannot keep gradients")
def test_flex_attention_with_grads():
return
| transformers/tests/models/blenderbot/test_modeling_blenderbot.py/0 | {
"file_path": "transformers/tests/models/blenderbot/test_modeling_blenderbot.py",
"repo_id": "transformers",
"token_count": 9721
} | 501 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch chameleon model."""
import copy
import unittest
import requests
from parameterized import parameterized
from transformers import ChameleonConfig, is_torch_available, is_vision_available, set_seed
from transformers.testing_utils import (
Expectations,
require_bitsandbytes,
require_read_token,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_vision_available():
from PIL import Image
if is_torch_available():
import torch
from transformers import (
ChameleonForConditionalGeneration,
ChameleonModel,
ChameleonProcessor,
)
class ChameleonModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=35,
is_training=False,
use_input_mask=True,
use_labels=True,
vocab_size=99,
image_token_id=4,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
vq_num_embeds=5,
vq_embed_dim=5,
vq_channel_multiplier=[1, 4],
vq_img_token_start_id=10, # has to be less than vocab size when added with vq_num_embeds
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.image_token_id = image_token_id
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
self.vq_num_embeds = vq_num_embeds
self.vq_embed_dim = vq_embed_dim
self.vq_channel_multiplier = vq_channel_multiplier
self.vq_img_token_start_id = vq_img_token_start_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
# create dummy vocab map for image2bpe mapping if it needs remapping
# we assume that vocab size is big enough to account for image tokens somewhere in the beginning
# same way as in real ckpt, when img tokens are in first half of embeds
# we will need "vq_num_embeds" amount of tokens
vocab_map = {i: chr(i) for i in range(self.vocab_size)}
vocab_map[self.image_token_id] = "<image>"
start = self.vq_img_token_start_id
end = self.vq_img_token_start_id + self.vq_num_embeds
for i in range(start, end):
image_token_infix = "".join(chr(ord("A") + int(c)) for c in str(i))
# dummy str for each image token, anything starting with IMGIMG
vocab_map[i] = f"IMGIMG{image_token_infix}Z"
return ChameleonConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
vocabulary_map={v: k for k, v in vocab_map.items()},
vq_config=self.get_vq_config(),
)
def get_vq_config(self):
return {
"embed_dim": self.vq_embed_dim,
"num_embeddings": self.vq_num_embeds,
"latent_channels": self.vq_embed_dim,
"in_channels": 3,
"base_channels": 32, # we have a GroupNorm of 32 groups, so can't do less
"channel_multiplier": self.vq_channel_multiplier,
}
def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = ChameleonModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class ChameleonModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ChameleonModel, ChameleonForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": ChameleonModel,
"text-generation": ChameleonForConditionalGeneration,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
fx_compatible = False
def setUp(self):
self.model_tester = ChameleonModelTester(self)
self.config_tester = ConfigTester(self, config_class=ChameleonConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@parameterized.expand([("linear",), ("dynamic",)])
def test_model_rope_scaling(self, scaling_type):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
short_input = ids_tensor([1, 10], config.vocab_size)
long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
original_model = ChameleonModel(config)
original_model.to(torch_device)
original_model.eval()
original_short_output = original_model(short_input).last_hidden_state
original_long_output = original_model(long_input).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
config.rope_scaling = {"type": scaling_type, "factor": 10.0}
scaled_model = ChameleonModel(config)
scaled_model.to(torch_device)
scaled_model.eval()
scaled_short_output = scaled_model(short_input).last_hidden_state
scaled_long_output = scaled_model(long_input).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5))
@unittest.skip("Chameleon forces some token ids to be -inf!")
def test_batching_equivalence(self):
pass
@unittest.skip("Chameleon VQ model cannot be squishes more due to hardcoded layer params in model code")
def test_model_is_small(self):
pass
class ChameleonVision2SeqModelTester(ChameleonModelTester):
def __init__(self, parent, image_size=10, **kwargs):
super().__init__(parent, **kwargs)
self.image_size = image_size
self.image_seq_length = 25
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[:, : self.image_seq_length] = self.image_token_id
attention_mask = input_ids.ne(self.pad_token_id).to(torch_device)
pixel_values = floats_tensor([self.batch_size, 3, self.image_size, self.image_size])
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class ChameleonVision2SeqModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (ChameleonModel, ChameleonForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"image-text-to-text": ChameleonForConditionalGeneration,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
fx_compatible = False
def setUp(self):
self.model_tester = ChameleonVision2SeqModelTester(self)
self.config_tester = ConfigTester(self, config_class=ChameleonConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip("Chameleon forces some token ids to be -inf!")
def test_batching_equivalence(self):
pass
@unittest.skip("Chameleon cannot do offload because it uses `self.linear.weight` in forward")
def test_cpu_offload(self):
pass
@unittest.skip("Chameleon cannot do offload because it uses `self.linear.weight` in forward")
def test_disk_offload_bin(self):
pass
@unittest.skip("Chameleon cannot do offload because it uses `self.linear.weight` in forward")
def test_disk_offload_safetensors(self):
pass
@unittest.skip("Chameleon VQ model cannot be squishes more due to hardcoded layer params in model code")
def test_model_is_small(self):
pass
@unittest.skip("Chameleon applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Chameleon applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # the below tests modify dict in-place
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
@require_torch
class ChameleonIntegrationTest(unittest.TestCase):
@slow
@require_bitsandbytes
@require_read_token
def test_model_7b(self):
model = ChameleonForConditionalGeneration.from_pretrained(
"facebook/chameleon-7b", load_in_4bit=True, device_map="auto"
)
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
image = Image.open(
requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw
)
prompt = "<image>Describe what do you see here and tell me about the history behind it?"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(model.device, torch.float16)
# greedy generation outputs
EXPECTED_TEXT_COMPLETIONS = Expectations(
{
("xpu", 3): ['Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue dot in the center representing the star Altair. The star map is set against a black background, with the constellations visible in the night'],
("cuda", 7): ['Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue dot in the center representing the star Alpha Centauri. The star map is a representation of the night sky, showing the positions of stars in'],
("cuda", 8): ['Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue dot representing the position of the star Alpha Centauri. Alpha Centauri is the brightest star in the constellation Centaurus and is located'],
}
) # fmt: skip
EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation()
generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False)
text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
@require_bitsandbytes
@require_read_token
def test_model_7b_batched(self):
model = ChameleonForConditionalGeneration.from_pretrained(
"facebook/chameleon-7b", load_in_4bit=True, device_map="auto"
)
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
image = Image.open(
requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw
)
image_2 = Image.open(
requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw
)
prompts = [
"<image>Describe what do you see here and tell me about the history behind it?",
"What constellation is this image showing?<image>",
]
inputs = processor(images=[image, image_2], text=prompts, padding=True, return_tensors="pt").to(
model.device, torch.float16
)
# greedy generation outputs
EXPECTED_TEXT_COMPLETIONS = Expectations(
{
("xpu", 3): [
'Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue dot in the center representing the star Alpha Centauri. The star map is a representation of the night sky, showing the positions of stars in',
'What constellation is this image showing?The image shows the constellation of Orion.The image shows the constellation of Orion.The image shows the constellation of Orion.The image shows the constellation of Orion.',
],
("cuda", 7): [
'Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue dot representing the position of the star Alpha Centauri. Alpha Centauri is the brightest star in the constellation Centaurus and is located',
'What constellation is this image showing?The image shows the constellation of Orion.The image shows the constellation of Orion.The image shows the constellation of Orion.The image shows the constellation of Orion.',
],
("cuda", 8): [
'Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue dot representing the position of the star Alpha Centauri. Alpha Centauri is the brightest star in the constellation Centaurus and is located',
'What constellation is this image showing?The image shows the constellation of Orion.The image shows the constellation of Orion.The image shows the constellation of Orion.The image shows the constellation of Orion.',
],
}
) # fmt: skip
EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation()
generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False)
text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
@require_bitsandbytes
@require_read_token
def test_model_7b_multi_image(self):
model = ChameleonForConditionalGeneration.from_pretrained(
"facebook/chameleon-7b", load_in_4bit=True, device_map="auto"
)
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
image = Image.open(
requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw
)
image_2 = Image.open(
requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw
)
prompt = "What do these two images have in common?<image><image>"
inputs = processor(images=[image, image_2], text=prompt, return_tensors="pt").to(model.device, torch.float16)
# greedy generation outputs
EXPECTED_TEXT_COMPLETION = ['What do these two images have in common?The two images show a connection between the night sky and the internet. The first image shows a starry night sky, with the stars arranged in a pattern that resembles the structure of the internet. The'] # fmt: skip
generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False)
text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
| transformers/tests/models/chameleon/test_modeling_chameleon.py/0 | {
"file_path": "transformers/tests/models/chameleon/test_modeling_chameleon.py",
"repo_id": "transformers",
"token_count": 8716
} | 502 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Cohere2 model."""
import unittest
import pytest
from packaging import version
from parameterized import parameterized
from pytest import mark
from transformers import AutoModelForCausalLM, AutoTokenizer, Cohere2Config, is_torch_available, pipeline
from transformers.generation.configuration_utils import GenerationConfig
from transformers.testing_utils import (
Expectations,
cleanup,
is_flash_attn_2_available,
require_flash_attn,
require_read_token,
require_torch,
require_torch_large_accelerator,
slow,
torch_device,
)
from ...models.cohere.test_modeling_cohere import CohereModelTest, CohereModelTester
from ...test_configuration_common import ConfigTester
if is_torch_available():
import torch
from transformers import (
Cohere2ForCausalLM,
Cohere2Model,
)
class Cohere2ModelTester(CohereModelTester):
config_class = Cohere2Config
if is_torch_available():
model_class = Cohere2Model
for_causal_lm_class = Cohere2ForCausalLM
@require_torch
class Cohere2ModelTest(CohereModelTest, unittest.TestCase):
all_model_classes = (Cohere2Model, Cohere2ForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": Cohere2Model,
"text-generation": Cohere2ForCausalLM,
}
if is_torch_available()
else {}
)
_is_stateful = True
def setUp(self):
self.model_tester = Cohere2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=Cohere2Config, hidden_size=37)
@unittest.skip("Failing because of unique cache (HybridCache)")
def test_model_outputs_equivalence(self, **kwargs):
pass
@unittest.skip("Cohere2's forcefully disables sdpa due to softcapping")
def test_sdpa_can_dispatch_non_composite_models(self):
pass
@unittest.skip("Cohere2's eager attn/sdpa attn outputs are expected to be different")
def test_eager_matches_sdpa_generate(self):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip("Cohere2 has HybridCache which is not compatible with assisted decoding")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip("Cohere2 has HybridCache which is not compatible with assisted decoding")
def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type):
pass
@pytest.mark.generate
@unittest.skip("Cohere2 has HybridCache which is not compatible with assisted decoding")
def test_assisted_decoding_sample(self):
pass
@unittest.skip("Cohere2 has HybridCache which is not compatible with dola decoding")
def test_dola_decoding_sample(self):
pass
@unittest.skip("Cohere2 has HybridCache and doesn't support continue from past kv")
def test_generate_continue_from_past_key_values(self):
pass
@unittest.skip("Cohere2 has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate(self):
pass
@unittest.skip("Cohere2 has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate_dict_outputs_use_cache(self):
pass
@unittest.skip("Cohere2 has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate_low_memory(self):
pass
@unittest.skip("Cohere2 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.")
def test_generate_with_static_cache(self):
pass
@unittest.skip("Cohere2 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@unittest.skip("Cohere2 has HybridCache and doesn't support progressive generation using input embeds.")
def test_generate_continue_from_inputs_embeds(self):
pass
@slow
@require_read_token
@require_torch_large_accelerator
class Cohere2IntegrationTest(unittest.TestCase):
input_text = ["Hello I am doing", "Hi today"]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model_bf16(self):
model_id = "CohereForAI/c4ai-command-r7b-12-2024"
EXPECTED_TEXTS = [
"<BOS_TOKEN>Hello I am doing a project for a school assignment and I need to create a website for a fictional company. I have",
"<PAD><PAD><BOS_TOKEN>Hi today I'm going to show you how to make a simple and easy to make a chocolate cake.\n",
]
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, attn_implementation="eager").to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=False)
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_fp16(self):
model_id = "CohereForAI/c4ai-command-r7b-12-2024"
# fmt: off
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): ["<BOS_TOKEN>Hello I am doing a project for my school and I need to create a website for a fictional company. I have the", "<PAD><PAD><BOS_TOKEN>Hi today I'm going to show you how to make a simple and easy to make a chocolate cake.\n"],
(None, None): ["<BOS_TOKEN>Hello I am doing a project for a school assignment and I need to create a website for a fictional company. I have", "<PAD><PAD><BOS_TOKEN>Hi today I'm going to show you how to make a simple and easy to make a chocolate cake.\n"],
("cuda", 8): ['<BOS_TOKEN>Hello I am doing a project for my school and I need to create a website for a fictional company. I have the', "<PAD><PAD><BOS_TOKEN>Hi today I'm going to show you how to make a simple and easy to make a chocolate cake.\n"],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
# fmt: on
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16, attn_implementation="eager").to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=False)
self.assertEqual(output_text, EXPECTED_TEXT)
def test_model_pipeline_bf16(self):
# See https://github.com/huggingface/transformers/pull/31747 -- pipeline was broken for Cohere2 before this PR
model_id = "CohereForAI/c4ai-command-r7b-12-2024"
# EXPECTED_TEXTS should match the same non-pipeline test, minus the special tokens
EXPECTED_TEXTS = [
"Hello I am doing a project for a school assignment and I need to create a website for a fictional company. I have",
"Hi today I'm going to show you how to make a simple and easy to make a chocolate cake.\n",
]
model = AutoModelForCausalLM.from_pretrained(
model_id, dtype=torch.bfloat16, attn_implementation="flex_attention"
).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
output = pipe(self.input_text, max_new_tokens=20, do_sample=False, padding=True)
self.assertEqual(output[0][0]["generated_text"], EXPECTED_TEXTS[0])
self.assertEqual(output[1][0]["generated_text"], EXPECTED_TEXTS[1])
@require_flash_attn
@mark.flash_attn_test
def test_model_flash_attn(self):
# See https://github.com/huggingface/transformers/issues/31953 --- flash attn was generating garbage for Gemma2, especially in long context
model_id = "CohereForAI/c4ai-command-r7b-12-2024"
EXPECTED_TEXTS = [
'<BOS_TOKEN>Hello I am doing a project for my school and I need to create a website for a fictional company. I have the logo and the name of the company. I need a website that is simple and easy to navigate. I need a home page, about us, services, contact us, and a gallery. I need the website to be responsive and I need it to be able to be hosted on a server. I need the website to be done in a week. I need the website to be done in HTML,',
"<PAD><PAD><BOS_TOKEN>Hi today I'm going to show you how to make a simple and easy to make a chocolate cake.\n\nThis recipe is very simple and easy to make.\n\nYou will need:\n\n* 2 cups of flour\n* 1 cup of sugar\n* 1/2 cup of cocoa powder\n* 1 teaspoon of baking powder\n* 1 teaspoon of baking soda\n* 1/2 teaspoon of salt\n* 2 eggs\n* 1 cup of milk\n",
] # fmt: skip
model = AutoModelForCausalLM.from_pretrained(
model_id, attn_implementation="flash_attention_2", dtype="float16"
).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=100, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=False)
self.assertEqual(output_text, EXPECTED_TEXTS)
@pytest.mark.torch_export_test
def test_export_static_cache(self):
if version.parse(torch.__version__) < version.parse("2.5.0"):
self.skipTest(reason="This test requires torch >= 2.5 to run.")
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
convert_and_export_with_cache,
)
model_id = "CohereForAI/c4ai-command-r7b-12-2024"
# fmt: off
EXPECTED_TEXT_COMPLETIONS = Expectations(
{
("xpu", 3): ["Hello I am doing a project for a friend and I am stuck on a few things. I have a 2004 Ford F-"],
(None, None): ["Hello I am doing a project on the effects of social media on mental health. I have a few questions. 1. What is the relationship"],
("cuda", 8): ['Hello I am doing a project for a friend and I am stuck on a few things. I have a 2004 Ford F-'],
}
)
EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation()
# fmt: on
tokenizer = AutoTokenizer.from_pretrained(model_id, pad_token="<PAD>", padding_side="right")
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = AutoModelForCausalLM.from_pretrained(
"CohereForAI/c4ai-command-r7b-12-2024",
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=30,
cache_config={
"batch_size": batch_size,
"max_cache_len": 30,
},
),
)
prompts = ["Hello I am doing"]
prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = 30 - prompt_token_ids.shape[-1]
# Static Cache + export
exported_program = convert_and_export_with_cache(model)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
@parameterized.expand([("flash_attention_2",), ("sdpa",), ("flex_attention",), ("eager",)])
@require_read_token
def test_generation_beyond_sliding_window(self, attn_implementation: str):
"""Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a HybridCache).
Outputs for every attention functions should be coherent and identical.
"""
if attn_implementation == "flash_attention_2" and not is_flash_attn_2_available():
self.skipTest("FlashAttention2 is required for this test.")
# TODO: if we can specify not to compile when `flex` attention is used?
if attn_implementation == "flex_attention":
self.skipTest(
"Flex attention will compile (see `compile_friendly_flex_attention`) which causes triton issue."
)
if torch_device == "xpu" and attn_implementation == "flash_attention_2":
self.skipTest(reason="Intel XPU doesn't support falsh_attention_2 as of now.")
model_id = "CohereForAI/c4ai-command-r7b-12-2024"
EXPECTED_COMPLETIONS = [
" the mountains, the lakes, the rivers, the forests, the trees, the birds, the animals",
", green, yellow, orange, purple, pink, brown, black, white, grey, silver",
]
input_text = [
"This is a nice place. " * 200 + "I really enjoy the scenery,", # This is larger than 1024 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
# We use `sliding_window=1024` instead of the origin value `4096` in the config to avoid GPU OOM
model = AutoModelForCausalLM.from_pretrained(
model_id, attn_implementation=attn_implementation, dtype=torch.float16, sliding_window=1024
).to(torch_device)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.sliding_window)
out = model.generate(**inputs, max_new_tokens=20)[:, input_size:]
output_text = tokenizer.batch_decode(out)
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
| transformers/tests/models/cohere2/test_modeling_cohere2.py/0 | {
"file_path": "transformers/tests/models/cohere2/test_modeling_cohere2.py",
"repo_id": "transformers",
"token_count": 6032
} | 503 |
# Copyright 2018 Microsoft Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
class DebertaModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
relative_attention=False,
position_biased_input=True,
pos_att_type="None",
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.relative_attention = relative_attention
self.position_biased_input = position_biased_input
self.pos_att_type = pos_att_type
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return DebertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
relative_attention=self.relative_attention,
position_biased_input=self.position_biased_input,
pos_att_type=self.pos_att_type,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def check_loss_output(self, result):
self.parent.assertListEqual(list(result.loss.size()), [])
def create_and_check_deberta_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaModel(config=config)
model.to(torch_device)
model.eval()
sequence_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)[0]
sequence_output = model(input_ids, token_type_ids=token_type_ids)[0]
sequence_output = model(input_ids)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def create_and_check_deberta_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_deberta_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DebertaForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(result)
def create_and_check_deberta_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DebertaForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_deberta_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class DebertaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
test_torchscript = False
test_pruning = False
test_head_masking = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = DebertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_deberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/deberta-base"
model = DebertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker")
def test_torch_fx_output_loss(self):
pass
@unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker")
def test_torch_fx(self):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class DebertaModelIntegrationTest(unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
pass
@slow
def test_inference_no_head(self):
model = DebertaModel.from_pretrained("microsoft/deberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]]
)
torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
| transformers/tests/models/deberta/test_modeling_deberta.py/0 | {
"file_path": "transformers/tests/models/deberta/test_modeling_deberta.py",
"repo_id": "transformers",
"token_count": 5349
} | 504 |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeepseekVLHybridImageProcessor
if is_torchvision_available():
from transformers import DeepseekVLHybridImageProcessorFast
class DeepseekVLHybridImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
high_res_size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
high_res_image_mean=[0.5, 0.5, 0.5],
high_res_image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 18, "width": 18}
high_res_size = high_res_size if high_res_size is not None else {"height": 36, "width": 36}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.high_res_size = high_res_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.high_res_image_mean = high_res_image_mean
self.high_res_image_std = high_res_image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"high_res_image_mean": self.high_res_image_mean,
"high_res_image_std": self.high_res_image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"high_res_size": self.high_res_size,
}
def expected_output_image_shape(self, images):
max_size = max(self.size["height"], self.size["width"])
return self.num_channels, max_size, max_size
def expected_output_high_res_image_shape(self, images):
max_size = max(self.high_res_size["height"], self.high_res_size["width"])
return self.num_channels, max_size, max_size
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class DeepseekVLHybridImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeepseekVLHybridImageProcessor if is_vision_available() else None
fast_image_processing_class = DeepseekVLHybridImageProcessorFast if is_torchvision_available() else None
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.setUp with ViT->DeepseekVLHybrid
def setUp(self):
super().setUp()
self.image_processor_tester = DeepseekVLHybridImageProcessingTester(self)
@property
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.image_processor_dict with ViT->DeepseekVLHybrid
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.test_image_processor_from_dict_with_kwargs
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "high_res_image_mean"))
self.assertTrue(hasattr(image_processing, "high_res_image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "high_res_size"))
def test_call_pil_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_numpy_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pytorch_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(self.image_processor_tester.batch_size, *expected_output_image_shape),
)
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.high_res_pixel_values, encoding_fast.high_res_pixel_values
)
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors=None)
encoding_fast = image_processor_fast(dummy_images, return_tensors=None)
# Overwrite as the outputs are not always all of the same shape (kept for BC)
for i in range(len(encoding_slow.pixel_values)):
self._assert_slow_fast_tensors_equivalence(
torch.from_numpy(encoding_slow.pixel_values[i]), encoding_fast.pixel_values[i]
)
for i in range(len(encoding_slow.high_res_pixel_values)):
self._assert_slow_fast_tensors_equivalence(
torch.from_numpy(encoding_slow.high_res_pixel_values[i]), encoding_fast.high_res_pixel_values[i]
)
@unittest.skip(reason="Not supported")
def test_call_numpy_4_channels(self):
pass
| transformers/tests/models/deepseek_vl_hybrid/test_image_processing_deepseek_vl_hybrid.py/0 | {
"file_path": "transformers/tests/models/deepseek_vl_hybrid/test_image_processing_deepseek_vl_hybrid.py",
"repo_id": "transformers",
"token_count": 5347
} | 505 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DETR model."""
import inspect
import math
import unittest
from transformers import DetrConfig, ResNetConfig, is_torch_available, is_vision_available
from transformers.testing_utils import Expectations, require_timm, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DetrForObjectDetection, DetrForSegmentation, DetrModel
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class DetrModelTester:
def __init__(
self,
parent,
batch_size=8,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=8,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_queries=12,
num_channels=3,
min_size=200,
max_size=200,
n_targets=8,
num_labels=91,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_queries = num_queries
self.num_channels = num_channels
self.min_size = min_size
self.max_size = max_size
self.n_targets = n_targets
self.num_labels = num_labels
# we also set the expected seq length for both encoder and decoder
self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32)
self.decoder_seq_length = self.num_queries
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size])
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, pixel_mask, labels
def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=10,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 2, 1],
hidden_act="relu",
num_labels=3,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
return DetrConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
num_queries=self.num_queries,
num_labels=self.num_labels,
use_timm_backbone=False,
backbone_config=resnet_config,
backbone=None,
use_pretrained_backbone=False,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def create_and_check_detr_model(self, config, pixel_values, pixel_mask, labels):
model = DetrModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)
)
def create_and_check_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = DetrForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
class DetrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DetrModel,
DetrForObjectDetection,
DetrForSegmentation,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-feature-extraction": DetrModel,
"image-segmentation": DetrForSegmentation,
"object-detection": DetrForObjectDetection,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_torchscript = False
test_pruning = False
test_head_masking = False
test_missing_keys = False
zero_init_hidden_state = True
test_torch_exportable = True
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ in ["DetrForObjectDetection", "DetrForSegmentation"]:
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
target["masks"] = torch.ones(
self.model_tester.n_targets,
self.model_tester.min_size,
self.model_tester.max_size,
device=torch_device,
dtype=torch.float,
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = DetrModelTester(self)
self.config_tester = ConfigTester(self, config_class=DetrConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_detr_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_detr_model(*config_and_inputs)
def test_detr_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_detr_object_detection_head_model(*config_and_inputs)
# TODO: check if this works again for PyTorch 2.x.y
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="DETR does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="DETR does not use inputs_embeds")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="DETR does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="DETR is not a generative model")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="DETR does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@slow
@unittest.skip(reason="TODO Niels: fix me!")
def test_model_outputs_equivalence(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = self.model_tester.decoder_seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
decoder_key_length = self.model_tester.decoder_seq_length
encoder_key_length = self.model_tester.encoder_seq_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "DetrForObjectDetection":
correct_outlen += 2
# Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks
if model_class.__name__ == "DetrForSegmentation":
correct_outlen += 3
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_retain_grad_hidden_states_attentions(self):
# removed retain_grad and grad on decoder_hidden_states, as queries don't require grad
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_auxiliary_loss(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.auxiliary_loss = True
# only test for object detection and segmentation model
for model_class in self.all_model_classes[1:]:
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
outputs = model(**inputs)
self.assertIsNotNone(outputs.auxiliary_outputs)
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = ["pixel_values", "pixel_mask"]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else []
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["pixel_values", "pixel_mask"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_different_timm_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# let's pick a random timm backbone
config.backbone = "tf_mobilenetv3_small_075"
config.backbone_config = None
config.use_timm_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "DetrForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels + 1,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "DetrForSegmentation":
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
def test_hf_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Load a pretrained HF checkpoint as backbone
config.backbone = "microsoft/resnet-18"
config.backbone_config = None
config.use_timm_backbone = False
config.use_pretrained_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "DetrForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels + 1,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "DetrForSegmentation":
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
def test_greyscale_images(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# use greyscale pixel values
inputs_dict["pixel_values"] = floats_tensor(
[self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size]
)
# let's set num_channels to 1
config.num_channels = 1
config.backbone_config.num_channels = 1
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertTrue(outputs)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
configs_no_init.init_xavier_std = 1e9
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "bbox_attention" in name and "bias" not in name:
self.assertLess(
100000,
abs(param.data.max().item()),
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
TOLERANCE = 1e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_timm
@require_vision
@slow
class DetrModelIntegrationTestsTimmBackbone(unittest.TestCase):
@cached_property
def default_image_processor(self):
return DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") if is_vision_available() else None
def test_inference_no_head(self):
model = DetrModel.from_pretrained("facebook/detr-resnet-50").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**encoding)
expected_shape = torch.Size((1, 100, 256))
assert outputs.last_hidden_state.shape == expected_shape
expected_slices = Expectations(
{
(None, None):
[
[0.0616, -0.5146, -0.4032],
[-0.7629, -0.4934, -1.7153],
[-0.4768, -0.6403, -0.7826],
],
("rocm", (9, 5)):
[
[ 0.0616, -0.5146, -0.4032],
[-0.7629, -0.4934, -1.7153],
[-0.4768, -0.6403, -0.7826],
],
}
) # fmt: skip
expected_slice = torch.tensor(expected_slices.get_expectation(), device=torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
def test_inference_object_detection_head(self):
model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
pixel_values = encoding["pixel_values"].to(torch_device)
pixel_mask = encoding["pixel_mask"].to(torch_device)
with torch.no_grad():
outputs = model(pixel_values, pixel_mask)
# verify outputs
expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1))
self.assertEqual(outputs.logits.shape, expected_shape_logits)
expected_slices = Expectations(
{
(None, None):
[
[-19.1194, -0.0893, -11.0154],
[-17.3640, -1.8035, -14.0219],
[-20.0461, -0.5837, -11.1060],
],
("rocm", (9, 5)):
[
[-19.1194, -0.0893, -11.0154],
[-17.3640, -1.8035, -14.0219],
[-20.0461, -0.5837, -11.1060],
],
}
) # fmt: skip
expected_slice_logits = torch.tensor(expected_slices.get_expectation(), device=torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=2e-4, atol=2e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
expected_slice_boxes = torch.tensor(
[
[0.4433, 0.5302, 0.8852],
[0.5494, 0.2517, 0.0529],
[0.4998, 0.5360, 0.9955],
]
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=2e-4, atol=2e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
outputs, threshold=0.3, target_sizes=[image.size[::-1]]
)[0]
expected_scores = torch.tensor([0.9982, 0.9960, 0.9955, 0.9988, 0.9987]).to(torch_device)
expected_labels = [75, 75, 63, 17, 17]
expected_slice_boxes = torch.tensor([40.1615, 70.8090, 175.5476, 117.9810]).to(torch_device)
self.assertEqual(len(results["scores"]), 5)
torch.testing.assert_close(results["scores"], expected_scores, rtol=2e-4, atol=2e-4)
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=2e-4, atol=2e-4)
def test_inference_panoptic_segmentation_head(self):
model = DetrForSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
pixel_values = encoding["pixel_values"].to(torch_device)
pixel_mask = encoding["pixel_mask"].to(torch_device)
with torch.no_grad():
outputs = model(pixel_values, pixel_mask)
# verify outputs
expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1))
self.assertEqual(outputs.logits.shape, expected_shape_logits)
expected_slices = Expectations(
{
(None, None):
[
[-18.1565, -1.7568, -13.5029],
[-16.8888, -1.4138, -14.1028],
[-17.5709, -2.5080, -11.8654],
],
("rocm", (9, 5)):
[
[-18.1565, -1.7568, -13.5029],
[-16.8888, -1.4138, -14.1028],
[-17.5709, -2.5080, -11.8654],
],
}
) # fmt: skip
expected_slice_logits = torch.tensor(expected_slices.get_expectation(), device=torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=2e-4, atol=2e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
expected_slices = Expectations(
{
(None, None):
[
[0.5344, 0.1789, 0.9285],
[0.4420, 0.0572, 0.0875],
[0.6630, 0.6887, 0.1017],
],
("rocm", (9, 5)):
[
[0.5344, 0.1789, 0.9285],
[0.4420, 0.0572, 0.0875],
[0.6630, 0.6887, 0.1017],
],
}
) # fmt: skip
expected_slice_boxes = torch.tensor(expected_slices.get_expectation(), device=torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=2e-4, atol=2e-4)
expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267))
self.assertEqual(outputs.pred_masks.shape, expected_shape_masks)
expected_slices = Expectations(
{
(None, None):
[
[-7.7557, -10.8788, -11.9797],
[-11.8880, -16.4328, -17.7450],
[-14.7315, -19.7382, -20.3003],
],
("rocm", (9, 5)):
[
[ -7.7558, -10.8789, -11.9798],
[-11.8882, -16.4330, -17.7452],
[-14.7317, -19.7384, -20.3005],
],
}
) # fmt: skip
expected_slice_masks = torch.tensor(expected_slices.get_expectation(), device=torch_device)
torch.testing.assert_close(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, rtol=2e-3, atol=2e-3)
# verify postprocessing
results = image_processor.post_process_panoptic_segmentation(
outputs, threshold=0.3, target_sizes=[image.size[::-1]]
)[0]
expected_shape = torch.Size([480, 640])
expected_slice_segmentation = torch.tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype=torch.int32).to(
torch_device
)
expected_number_of_segments = 5
expected_first_segment = {"id": 1, "label_id": 17, "was_fused": False, "score": 0.9941}
number_of_unique_segments = len(torch.unique(results["segmentation"]))
self.assertTrue(
number_of_unique_segments, expected_number_of_segments + 1
) # we add 1 for the background class
self.assertTrue(results["segmentation"].shape, expected_shape)
torch.testing.assert_close(results["segmentation"][:3, :3], expected_slice_segmentation, rtol=1e-4, atol=1e-4)
self.assertTrue(len(results["segments_info"]), expected_number_of_segments)
predicted_first_segment = results["segments_info"][0]
self.assertEqual(predicted_first_segment["id"], expected_first_segment["id"])
self.assertEqual(predicted_first_segment["label_id"], expected_first_segment["label_id"])
self.assertEqual(predicted_first_segment["was_fused"], expected_first_segment["was_fused"])
self.assertAlmostEqual(predicted_first_segment["score"], expected_first_segment["score"], places=3)
@require_vision
@require_torch
@slow
class DetrModelIntegrationTests(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
if is_vision_available()
else None
)
def test_inference_no_head(self):
model = DetrModel.from_pretrained("facebook/detr-resnet-50", revision="no_timm").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**encoding)
expected_shape = torch.Size((1, 100, 256))
assert outputs.last_hidden_state.shape == expected_shape
expected_slices = Expectations(
{
(None, None):
[
[0.0616, -0.5146, -0.4032],
[-0.7629, -0.4934, -1.7153],
[-0.4768, -0.6403, -0.7826],
],
("rocm", (9, 5)):
[
[ 0.0616, -0.5146, -0.4032],
[-0.7629, -0.4934, -1.7153],
[-0.4768, -0.6403, -0.7826]
],
}
) # fmt: skip
expected_slice = torch.tensor(expected_slices.get_expectation(), device=torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| transformers/tests/models/detr/test_modeling_detr.py/0 | {
"file_path": "transformers/tests/models/detr/test_modeling_detr.py",
"repo_id": "transformers",
"token_count": 16733
} | 506 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
from unittest.util import safe_repr
import pytest
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, FalconMambaConfig, is_torch_available
from transformers.testing_utils import (
Expectations,
cleanup,
require_bitsandbytes,
require_torch,
require_torch_accelerator,
require_torch_large_accelerator,
require_torch_multi_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconMambaCache,
FalconMambaForCausalLM,
FalconMambaModel,
)
# Copied from transformers.tests.models.mamba.MambaModelTester with Mamba->FalconMamba,mamba->falcon_mamba
class FalconMambaModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
intermediate_size=32,
hidden_act="silu",
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
num_labels=3,
num_choices=4,
scope=None,
tie_word_embeddings=True,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
self.tie_word_embeddings = tie_word_embeddings
# Ignore copy
def get_large_model_config(self):
return FalconMambaConfig.from_pretrained("tiiuae/falcon-mamba-7b")
def prepare_config_and_inputs(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = ids_tensor([self.batch_size, self.seq_length], 1)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config(
gradient_checkpointing=gradient_checkpointing,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx,
reorder_and_upcast_attn=reorder_and_upcast_attn,
)
return (
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
return FalconMambaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
intermediate_size=self.intermediate_size,
activation_function=self.hidden_act,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
gradient_checkpointing=gradient_checkpointing,
tie_word_embeddings=self.tie_word_embeddings,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
return (
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_falcon_mamba_model(self, config, input_ids, *args):
config.output_hidden_states = True
model = FalconMambaModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.hidden_states), config.num_hidden_layers + 1)
def create_and_check_causal_lm(self, config, input_ids, *args):
model = FalconMambaForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_state_equivalency(self, config, input_ids, *args):
model = FalconMambaModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
output_whole = outputs.last_hidden_state
outputs = model(
input_ids[:, :-1],
use_cache=True,
cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device),
)
output_one = outputs.last_hidden_state
# Using the state computed on the first inputs, we will get the same output
outputs = model(
input_ids[:, -1:],
use_cache=True,
cache_params=outputs.cache_params,
cache_position=torch.arange(config.conv_kernel, config.conv_kernel + 1, device=input_ids.device),
)
output_two = outputs.last_hidden_state
self.parent.assertTrue(torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5))
# TODO the original mamba does not support decoding more than 1 token neither do we
def create_and_check_falcon_mamba_cached_slow_forward_and_backwards(
self, config, input_ids, *args, gradient_checkpointing=False
):
model = FalconMambaModel(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
# create cache
cache = model(input_ids, use_cache=True).cache_params
cache.reset()
# use cache
token_emb = model.embeddings(input_ids)
outputs = model.layers[0].mixer.slow_forward(
token_emb, cache, cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device)
)
loss = torch.log1p(torch.abs(outputs.sum()))
self.parent.assertEqual(loss.shape, ())
self.parent.assertEqual(outputs.shape, (self.batch_size, self.seq_length, self.hidden_size))
loss.backward()
def create_and_check_falcon_mamba_lm_head_forward_and_backwards(
self, config, input_ids, *args, gradient_checkpointing=False
):
model = FalconMambaForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def prepare_config_and_inputs_for_common(self):
(
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
# Copied from transformers.tests.models.mamba.MambaModelTest with Mamba->Falcon,mamba->falcon_mamba,FalconMambaCache->MambaCache
class FalconMambaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (FalconMambaModel, FalconMambaForCausalLM) if is_torch_available() else ()
has_attentions = False # FalconMamba does not support attentions
fx_compatible = False # FIXME let's try to support this @ArthurZucker
test_torchscript = False # FIXME let's try to support this @ArthurZucker
test_missing_keys = False
test_model_parallel = False
test_pruning = False
test_head_masking = False # FalconMamba does not have attention heads
pipeline_model_mapping = (
{"feature-extraction": FalconMambaModel, "text-generation": FalconMambaForCausalLM}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = FalconMambaModelTester(self)
self.config_tester = ConfigTester(
self, config_class=FalconMambaConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"]
)
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, (list, tuple)):
max_value, min_value = max(member), min(member)
if not isinstance(container, list):
raise TypeError("container should be a list or tuple")
elif len(container) != 2:
raise ValueError("container should have 2 elements")
expected_min, expected_max = container
is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max)
if not is_inside_interval:
standardMsg = f"{safe_repr(member)} not found in {safe_repr(container)}"
self.fail(self._formatMessage(msg, standardMsg))
def test_config(self):
self.config_tester.run_common_tests()
def test_falcon_mamba_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_falcon_mamba_model(*config_and_inputs)
def test_falcon_mamba_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm(*config_and_inputs)
def test_state_equivalency(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_state_equivalency(*config_and_inputs)
def test_falcon_mamba_cached_slow_forward_and_backwards(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_falcon_mamba_cached_slow_forward_and_backwards(*config_and_inputs)
def test_falcon_mamba_lm_head_forward_and_backwards(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_falcon_mamba_lm_head_forward_and_backwards(*config_and_inputs)
def test_initialization(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
config.rescale_prenorm_residual = True
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if "dt_proj.bias" in name:
dt = torch.exp(
torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min))
+ math.log(config.time_step_min)
).clamp(min=config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
if param.requires_grad:
self.assertTrue(param.data.max().item() <= inv_dt[1])
self.assertTrue(param.data.min().item() >= inv_dt[0])
elif "A_log" in name:
A = torch.arange(1, config.state_size + 1, dtype=torch.float32)[None, :]
A = A.expand(config.intermediate_size, -1).contiguous()
torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
if param.requires_grad:
# check if it's a ones like
torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
else:
if param.requires_grad:
if (
"mixer.conv1d.weight" in name
or "mixer.dt_proj.weight" in name
or "mixer.out_proj.weight" in name
):
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
@slow
# Ignore copy
def test_model_from_pretrained(self):
model = FalconMambaModel.from_pretrained("tiiuae/falcon-mamba-7b", dtype=torch.float16)
self.assertIsNotNone(model)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, FalconMambaCache): # MODIFIED PART START
recursive_check(tuple_object.conv_states, dict_object.conv_states)
recursive_check(tuple_object.ssm_states, dict_object.ssm_states)
elif isinstance(tuple_object, (list, tuple)): # MODIFIED PART END
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(tuple_object, dict_object, atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
@unittest.skip("Mamba models do not support DDP.")
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch
@require_torch_accelerator
@slow
class FalconMambaIntegrationTests(unittest.TestCase):
def setUp(self):
self.model_id = "tiiuae/falcon-mamba-7b"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.text = "Hello today"
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
# On T4, get `NotImplementedError: Cannot copy out of meta tensor; no data!`
@require_torch_large_accelerator
def test_generation_fp16(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16, device_map="auto")
inputs = self.tokenizer(self.text, return_tensors="pt").to(torch_device)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
EXPECTED_OUTPUTS = Expectations(
{
("cuda", 7): "Hello today I am going to show you how to make a simple and easy to make paper plane.\nStep",
("cuda", 8): 'Hello today Iava,\n\nI am writing to you today to discuss the importance of maintaining a healthy lifestyle',
}
) # fmt: skip
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
self.assertEqual(
self.tokenizer.batch_decode(out, skip_special_tokens=False)[0],
EXPECTED_OUTPUT,
)
@require_bitsandbytes
def test_generation_4bit(self):
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = AutoModelForCausalLM.from_pretrained(self.model_id, quantization_config=quantization_config).to(
torch_device
)
inputs = self.tokenizer(self.text, return_tensors="pt").to(torch_device)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
self.assertEqual(
self.tokenizer.batch_decode(out, skip_special_tokens=False)[0],
"Hello today Iava,\n\nI'm sorry to hear that you're having trouble with the ",
)
@pytest.mark.torch_compile_test
def test_generation_torch_compile(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16).to(torch_device)
model = torch.compile(model)
inputs = self.tokenizer(self.text, return_tensors="pt").to(torch_device)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
print(self.tokenizer.batch_decode(out, skip_special_tokens=False)[0])
self.assertEqual(
self.tokenizer.batch_decode(out, skip_special_tokens=False)[0],
"Hello today Iava,\n\nI am writing to you today to discuss the importance of maintaining a healthy lifestyle",
)
def test_batched_generation(self):
model_id = "tiiuae/falcon-mamba-7b"
tok = AutoTokenizer.from_pretrained(model_id)
tok.pad_token_id = tok.eos_token_id
texts = ["Hello today", "Hello my name is Younes and today"]
EXPECTED_OUTPUTS = Expectations(
{
("cuda", 7): [
'Hello today I will be talking about the “Theory of Relativity” by Albert Einstein.\nThe',
'Hello my name is Younes and today I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
("cuda", 8): [
'Hello today I am going to talk about the “Theory of Relativity” by Albert Einstein.\n',
'Hello my name is Younes and today I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
}
) # fmt: skip
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
inputs = tok(texts, return_tensors="pt", padding=True, return_token_type_ids=False).to(torch_device)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map=0, dtype=torch.float16)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
out = tok.batch_decode(out, skip_special_tokens=True)
self.assertListEqual(out, EXPECTED_OUTPUT)
# We test the same generations with inputs_embeds
with torch.no_grad():
inputs_embeds = model.get_input_embeddings()(inputs.pop("input_ids"))
inputs["inputs_embeds"] = inputs_embeds
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
out = tok.batch_decode(out, skip_special_tokens=True)
EXPECTED_OUTPUTS = Expectations(
{
("cuda", 7): [
' I will be talking about the “Theory of Relativity” by Albert Einstein.\nThe',
' I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
("cuda", 8): [
' I am going to talk about the “Theory of Relativity” by Albert Einstein.\n',
' I will be talking about the importance of the internet in our lives.\nThe internet is a global'
],
}
) # fmt: skip
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
self.assertListEqual(out, EXPECTED_OUTPUT)
@require_torch_multi_accelerator
def test_training_kernel(self):
model_id = "tiiuae/falcon-mamba-7b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", dtype=torch.float16)
tokenizer.pad_token_id = tokenizer.eos_token_id
text = "Hello today"
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
with torch.no_grad():
logits = torch.argmax(model(**inputs).logits, dim=-1)
out_no_training = tokenizer.batch_decode(logits)
model.train()
lm_logits = model(**inputs).logits
next_token = torch.argmax(lm_logits, dim=-1)
out_training = tokenizer.batch_decode(next_token)
# Just verify backward works
loss = (1 - lm_logits).mean()
loss.backward()
self.assertEqual(out_training, out_no_training)
| transformers/tests/models/falcon_mamba/test_modeling_falcon_mamba.py/0 | {
"file_path": "transformers/tests/models/falcon_mamba/test_modeling_falcon_mamba.py",
"repo_id": "transformers",
"token_count": 11408
} | 507 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Glm4 model."""
import unittest
import pytest
from transformers import AutoModelForCausalLM, AutoTokenizer, Glm4Config, is_torch_available
from transformers.testing_utils import (
Expectations,
cleanup,
require_flash_attn,
require_torch,
require_torch_large_accelerator,
require_torch_large_gpu,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
Glm4ForCausalLM,
Glm4ForSequenceClassification,
Glm4ForTokenClassification,
Glm4Model,
)
class Glm4ModelTester(CausalLMModelTester):
if is_torch_available():
config_class = Glm4Config
base_model_class = Glm4Model
causal_lm_class = Glm4ForCausalLM
sequence_classification_class = Glm4ForSequenceClassification
token_classification_class = Glm4ForTokenClassification
@require_torch
class Glm4ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Glm4ModelTester
all_model_classes = (
(Glm4Model, Glm4ForCausalLM, Glm4ForSequenceClassification, Glm4ForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": Glm4Model,
"text-classification": Glm4ForSequenceClassification,
"token-classification": Glm4ForTokenClassification,
"text-generation": Glm4ForCausalLM,
"zero-shot": Glm4ForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
@slow
@require_torch_large_accelerator
class Glm4IntegrationTest(unittest.TestCase):
input_text = ["Hello I am doing", "Hi today"]
model_id = "THUDM/GLM-4-9B-0414"
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model_9b_fp16(self):
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
],
("cuda", 7): [],
("cuda", 8): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
def test_model_9b_bf16(self):
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common mistakes that people make when they are learning English.",
],
("cuda", 7): [],
("cuda", 8): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
def test_model_9b_eager(self):
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and who",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
],
("cuda", 7): [],
("cuda", 8): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
dtype=torch.bfloat16,
attn_implementation="eager",
)
model.to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
def test_model_9b_sdpa(self):
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common mistakes that people make when they are learning English.",
],
("cuda", 7): [],
("cuda", 8): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
dtype=torch.bfloat16,
attn_implementation="sdpa",
)
model.to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
@require_flash_attn
@require_torch_large_gpu
@pytest.mark.flash_attn_test
def test_model_9b_flash_attn(self):
EXPECTED_TEXTS = Expectations(
{
("cuda", 7): [],
("cuda", 8): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
)
model.to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
| transformers/tests/models/glm4/test_modeling_glm4.py/0 | {
"file_path": "transformers/tests/models/glm4/test_modeling_glm4.py",
"repo_id": "transformers",
"token_count": 4046
} | 508 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from typing import Optional
import pytest
from transformers import BertTokenizer, BertTokenizerFast, GroundingDinoProcessor
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_torch_available():
import torch
from transformers.models.grounding_dino.modeling_grounding_dino import GroundingDinoObjectDetectionOutput
if is_vision_available():
from transformers import GroundingDinoImageProcessor
@require_torch
@require_vision
class GroundingDinoProcessorTest(ProcessorTesterMixin, unittest.TestCase):
from_pretrained_id = "IDEA-Research/grounding-dino-base"
processor_class = GroundingDinoProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]","[CLS]","[SEP]","[PAD]","[MASK]","want","##want","##ed","wa","un","runn","##ing",",","low","lowest"] # fmt: skip
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
image_processor_map = {
"do_resize": True,
"size": None,
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
"do_rescale": True,
"rescale_factor": 1 / 255,
"do_pad": True,
}
cls.image_processor_file = os.path.join(cls.tmpdirname, IMAGE_PROCESSOR_NAME)
with open(cls.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(image_processor_map, fp)
image_processor = GroundingDinoImageProcessor()
tokenizer = BertTokenizer.from_pretrained(cls.from_pretrained_id)
processor = GroundingDinoProcessor(image_processor, tokenizer)
processor.save_pretrained(cls.tmpdirname)
cls.batch_size = 7
cls.num_queries = 5
cls.embed_dim = 5
cls.seq_length = 5
def prepare_text_inputs(self, batch_size: Optional[int] = None, modality: Optional[str] = None):
labels = ["a cat", "remote control"]
labels_longer = ["a person", "a car", "a dog", "a cat"]
if batch_size is None:
return labels
if batch_size < 1:
raise ValueError("batch_size must be greater than 0")
if batch_size == 1:
return [labels]
return [labels, labels_longer] + [labels] * (batch_size - 2)
@classmethod
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.get_tokenizer with CLIP->Bert
def get_tokenizer(cls, **kwargs):
return BertTokenizer.from_pretrained(cls.tmpdirname, **kwargs)
@classmethod
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.get_rust_tokenizer with CLIP->Bert
def get_rust_tokenizer(cls, **kwargs):
return BertTokenizerFast.from_pretrained(cls.tmpdirname, **kwargs)
@classmethod
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.get_image_processor with CLIP->GroundingDino
def get_image_processor(cls, **kwargs):
return GroundingDinoImageProcessor.from_pretrained(cls.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def get_fake_grounding_dino_output(self):
torch.manual_seed(42)
return GroundingDinoObjectDetectionOutput(
pred_boxes=torch.rand(self.batch_size, self.num_queries, 4),
logits=torch.rand(self.batch_size, self.num_queries, self.embed_dim),
input_ids=self.get_fake_grounding_dino_input_ids(),
)
def get_fake_grounding_dino_input_ids(self):
input_ids = torch.tensor([101, 1037, 4937, 1012, 102])
return torch.stack([input_ids] * self.batch_size, dim=0)
def test_post_process_grounded_object_detection(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GroundingDinoProcessor(tokenizer=tokenizer, image_processor=image_processor)
grounding_dino_output = self.get_fake_grounding_dino_output()
post_processed = processor.post_process_grounded_object_detection(grounding_dino_output)
self.assertEqual(len(post_processed), self.batch_size)
self.assertEqual(list(post_processed[0].keys()), ["scores", "boxes", "text_labels", "labels"])
self.assertEqual(post_processed[0]["boxes"].shape, (self.num_queries, 4))
self.assertEqual(post_processed[0]["scores"].shape, (self.num_queries,))
expected_scores = torch.tensor([0.7050, 0.7222, 0.7222, 0.6829, 0.7220])
torch.testing.assert_close(post_processed[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4)
expected_box_slice = torch.tensor([0.6908, 0.4354, 1.0737, 1.3947])
torch.testing.assert_close(post_processed[0]["boxes"][0], expected_box_slice, rtol=1e-4, atol=1e-4)
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.test_save_load_pretrained_default with CLIP->GroundingDino,GroundingDinoTokenizer->BertTokenizer
def test_save_load_pretrained_default(self):
tokenizer_slow = self.get_tokenizer()
tokenizer_fast = self.get_rust_tokenizer()
image_processor = self.get_image_processor()
with tempfile.TemporaryDirectory() as tmpdir:
processor_slow = GroundingDinoProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
processor_slow.save_pretrained(tmpdir)
processor_slow = GroundingDinoProcessor.from_pretrained(tmpdir, use_fast=False)
processor_fast = GroundingDinoProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
processor_fast.save_pretrained(tmpdir)
processor_fast = GroundingDinoProcessor.from_pretrained(tmpdir)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, BertTokenizer)
self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, GroundingDinoImageProcessor)
self.assertIsInstance(processor_fast.image_processor, GroundingDinoImageProcessor)
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.test_save_load_pretrained_additional_features with CLIP->GroundingDino,GroundingDinoTokenizer->BertTokenizer
def test_save_load_pretrained_additional_features(self):
with tempfile.TemporaryDirectory() as tmpdir:
processor = GroundingDinoProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()
)
processor.save_pretrained(tmpdir)
tokenizer_add_kwargs = BertTokenizer.from_pretrained(tmpdir, bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = GroundingDinoImageProcessor.from_pretrained(
tmpdir, do_normalize=False, padding_value=1.0
)
processor = GroundingDinoProcessor.from_pretrained(
tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, BertTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, GroundingDinoImageProcessor)
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.test_image_processor with CLIP->GroundingDino
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GroundingDinoProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_image_proc = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
for key in input_image_proc:
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.test_tokenizer with CLIP->GroundingDino
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GroundingDinoProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GroundingDinoProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values", "pixel_mask"]
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
# Copied from tests.models.clip.test_processing_clip.CLIPProcessorTest.test_tokenizer_decode with CLIP->GroundingDino
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GroundingDinoProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_text_preprocessing_equivalence(self):
processor = GroundingDinoProcessor.from_pretrained(self.tmpdirname)
# check for single input
formatted_labels = "a cat. a remote control."
labels = ["a cat", "a remote control"]
inputs1 = processor(text=formatted_labels, return_tensors="pt")
inputs2 = processor(text=labels, return_tensors="pt")
self.assertTrue(
torch.allclose(inputs1["input_ids"], inputs2["input_ids"]),
f"Input ids are not equal for single input: {inputs1['input_ids']} != {inputs2['input_ids']}",
)
# check for batched input
formatted_labels = ["a cat. a remote control.", "a car. a person."]
labels = [["a cat", "a remote control"], ["a car", "a person"]]
inputs1 = processor(text=formatted_labels, return_tensors="pt", padding=True)
inputs2 = processor(text=labels, return_tensors="pt", padding=True)
self.assertTrue(
torch.allclose(inputs1["input_ids"], inputs2["input_ids"]),
f"Input ids are not equal for batched input: {inputs1['input_ids']} != {inputs2['input_ids']}",
)
| transformers/tests/models/grounding_dino/test_processing_grounding_dino.py/0 | {
"file_path": "transformers/tests/models/grounding_dino/test_processing_grounding_dino.py",
"repo_id": "transformers",
"token_count": 5074
} | 509 |
# Copyright (C) 2024 THL A29 Limited, a Tencent company and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch HunYuanMoEV1 model."""
import unittest
import pytest
from transformers import HunYuanMoEV1Config, is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
if is_torch_available():
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
HunYuanMoEV1ForCausalLM,
HunYuanMoEV1ForSequenceClassification,
HunYuanMoEV1Model,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class HunYuanMoEV1ModelTester(CausalLMModelTester):
config_class = HunYuanMoEV1Config
if is_torch_available():
base_model_class = HunYuanMoEV1Model
causal_lm_class = HunYuanMoEV1ForCausalLM
sequence_class = HunYuanMoEV1ForSequenceClassification
@require_torch
class HunYuanMoEV1ModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(
HunYuanMoEV1Model,
HunYuanMoEV1ForCausalLM,
HunYuanMoEV1ForSequenceClassification,
)
if is_torch_available()
else ()
)
test_headmasking = False
test_pruning = False
model_tester_class = HunYuanMoEV1ModelTester
pipeline_model_mapping = (
{
"feature-extraction": HunYuanMoEV1Model,
"text-generation": HunYuanMoEV1ForCausalLM,
"text-classification": HunYuanMoEV1ForSequenceClassification,
}
if is_torch_available()
else {}
)
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@unittest.skip("Hunyuan model Unsupported")
@pytest.mark.torch_compile_test
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("Hunyuan model Unsupported")
@pytest.mark.torch_compile_test
def test_generate_compile_model_forward(self):
pass
@unittest.skip("Hunyuan model Unsupported")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@unittest.skip("Hunyuan model Unsupported")
def test_generate_with_static_cache(self):
pass
@require_torch
class HunYuanMoEV1IntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_generation(self):
# we will compele this when model file change over
# pass
EXPECTED_ANSWER = "\nOkay, I need to write a short summary about the benefits of regular exercise. Let me start by recalling what I know. First,"
prompt = "Write a short summary of the benefits of regular exercise"
tokenizer = AutoTokenizer.from_pretrained("tencent/Hunyuan-A13B-Instruct")
model = AutoModelForCausalLM.from_pretrained("tencent/Hunyuan-A13B-Instruct", device_map="auto")
messages = [
{"role": "user", "content": prompt},
]
tokenized_chat = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
)
generated_ids = model.generate(tokenized_chat.to(model.device), max_new_tokens=30, top_k=1)
text = tokenizer.decode(generated_ids[0])
output = text.split("<think>")[1]
self.assertEqual(EXPECTED_ANSWER, output)
| transformers/tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py/0 | {
"file_path": "transformers/tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py",
"repo_id": "transformers",
"token_count": 1750
} | 510 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch LLaMA model."""
import unittest
import pytest
from packaging import version
from transformers import AutoTokenizer, StaticCache, is_torch_available
from transformers.generation.configuration_utils import GenerationConfig
from transformers.testing_utils import (
Expectations,
cleanup,
require_read_token,
require_torch,
require_torch_accelerator,
run_test_using_subprocess,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
LlamaConfig,
LlamaForCausalLM,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
LlamaModel,
LlamaTokenizer,
)
from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding
class LlamaModelTester(CausalLMModelTester):
if is_torch_available():
config_class = LlamaConfig
base_model_class = LlamaModel
causal_lm_class = LlamaForCausalLM
sequence_class = LlamaForSequenceClassification
token_class = LlamaForTokenClassification
@require_torch
class LlamaModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(
LlamaModel,
LlamaForCausalLM,
LlamaForSequenceClassification,
LlamaForQuestionAnswering,
LlamaForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
"question-answering": LlamaForQuestionAnswering,
"token-classification": LlamaForTokenClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
fx_compatible = False # Broken by attention refactor cc @Cyrilvallez
model_tester_class = LlamaModelTester
rotary_embedding_layer = LlamaRotaryEmbedding # Enables RoPE tests if set
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = LlamaForCausalLM if is_torch_available() else None
@require_torch_accelerator
@require_read_token
class LlamaIntegrationTest(unittest.TestCase):
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
# TODO (joao): automatic compilation, i.e. compilation when `cache_implementation="static"` is used, leaves
# some memory allocated in the cache, which means some object is not being released properly. This causes some
# unoptimal memory usage, e.g. after certain tests a 7B model in FP16 no longer fits in a 24GB GPU.
# Investigate the root cause.
cleanup(torch_device, gc_collect=True)
@slow
def test_llama_3_1_hard(self):
"""
An integration test for llama 3.1. It tests against a long output to ensure the subtle numerical differences
from llama 3.1.'s RoPE can be detected
"""
expected_texts = Expectations(
{
("rocm", (9, 5)): 'Tell me about the french revolution. The french revolution was a period of radical social and political upheaval in France that lasted from 1789 until 1799. It was a time of great change and upheaval, marked by the overthrow of the monarchy, the rise of the middle class, and the eventual establishment of the First French Republic.\nThe revolution began in 1789 with the Estates-General, a representative assembly that had not met since 1614. The Third Estate, which represented the common people, demanded greater representation and eventually broke away to form the National Assembly. This marked the beginning of the end of the absolute monarchy and the rise of the middle class.\n',
("cuda", None): 'Tell me about the french revolution. The french revolution was a period of radical political and social upheaval in France that lasted from 1789 until 1799. It was a time of great change and upheaval, marked by the overthrow of the monarchy, the rise of the middle class, and the eventual establishment of the First French Republic.\nThe revolution began in 1789 with the Estates-General, a representative assembly that had not met since 1614. The Third Estate, which represented the common people, demanded greater representation and eventually broke away to form the National Assembly. The National Assembly adopted the Declaration of the Rights of Man and of the Citizen, which enshr',
}
) # fmt: skip
EXPECTED_TEXT = expected_texts.get_expectation()
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
model = LlamaForCausalLM.from_pretrained(
"meta-llama/Meta-Llama-3.1-8B-Instruct", device_map="auto", dtype=torch.bfloat16
)
input_text = ["Tell me about the french revolution."]
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=128, do_sample=False)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
@slow
def test_model_7b_logits_bf16(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-hf", device_map="auto", dtype=torch.bfloat16, attn_implementation="eager"
)
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(torch_device))
# Expected mean on dim = -1
# fmt: off
expected_means = Expectations(
{
("xpu", 3): torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]]),
("cuda", 7): torch.tensor([[-6.5061, -4.1147, -4.9669, -3.2038, 0.8069, -2.9694, 1.2864, -3.3786]]),
("cuda", 8): torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]]),
("rocm", (9, 4)): torch.tensor([[-6.5094, -4.1329, -4.9754, -3.5042, 0.8082, -2.9443, 1.2830, -3.3539]]),
})
expected_mean = expected_means.get_expectation().to(torch_device)
actual_mean = out.logits.float().mean(-1)
self.assertTrue(
torch.allclose(
expected_mean,
actual_mean,
atol=1e-2,
rtol=1e-2
)
)
# slicing logits[0, 0, 0:15]
expected_slices = Expectations(
{
("xpu", 3): torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]]),
("cuda", 7): torch.tensor([[-12.5000, -7.0625, -0.6289, -7.8750, -6.9688, -7.8125, -6.4688, -7.4375, -7.6875, -6.9375, -6.0312, -7.0000, -1.8594, 1.8438, -8.5000]]),
("cuda", 8): torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]]),
("rocm", (9, 4)): torch.tensor([[-12.5000, -7.0625, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9375, -6.0312, -7.0312, -1.8594, 1.8438, -8.5000]])
})
# fmt: on
expected_slice = expected_slices.get_expectation().to(torch_device)
actual_slice = out.logits[0, 0, :15].float()
self.assertTrue(torch.allclose(expected_slice, actual_slice, atol=1e-2, rtol=1e-2))
@slow
def test_model_7b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", device_map="auto", dtype=torch.float16)
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(torch_device))
# fmt: off
# Expected mean on dim = -1
expected_means = Expectations(
{
("xpu", 3): torch.tensor([[-6.6544, -4.1259, -4.9840, -3.2456, 0.8261, -3.0124, 1.2971, -3.3641]]),
("cuda", 7): torch.tensor([[-6.6420, -4.1227, -4.9809, -3.2041, 0.8261, -3.0052, 1.2957, -3.3648]]),
("cuda", 8): torch.tensor([[-6.6544, -4.1259, -4.9840, -3.2456, 0.8261, -3.0124, 1.2971, -3.3641]]),
})
expected_mean = expected_means.get_expectation()
self.assertTrue(
torch.allclose(
expected_mean.to(torch_device),
out.logits.float().mean(-1),
atol=1e-2,
rtol=1e-2
)
)
# slicing logits[0, 0, 0:15]
expected_slices = Expectations(
{
("xpu", 3): torch.tensor([-12.8281, -7.4609, -0.4668, -8.0703, -7.2539, -8.0078, -6.4961, -7.7734, -7.8516, -7.0352, -6.2188, -7.1367, -1.8564, 1.9922, -8.6328]),
("cuda", 7): torch.tensor([-12.8125, -7.3359, -0.4846, -8.0234, -7.2383, -7.9922, -6.4805, -7.7344, -7.8125, -7.0078, -6.1797, -7.1094, -1.8633, 1.9736, -8.6016]),
("cuda", 8): torch.tensor([-12.8281, -7.4609, -0.4668, -8.0703, -7.2539, -8.0078, -6.4961, -7.7734, -7.8516, -7.0352, -6.2188, -7.1367, -1.8564, 1.9922, -8.6328])
})
# fmt: on
expected_slice = expected_slices.get_expectation()
self.assertTrue(
torch.allclose(
expected_slice.to(torch_device),
out.logits[0, 0, :15].float(),
atol=1e-2,
rtol=1e-2,
)
)
# TODO: check why we have the following strange situation.
# without running in subprocess, this test causes subsequent tests failing with `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0!`
@run_test_using_subprocess
@slow
def test_model_7b_dola_generation(self):
# ground truth text generated with dola_layers="low", repetition_penalty=1.2
EXPECTED_TEXT_COMPLETION = (
"Simply put, the theory of relativity states that 1) time and space are relative, and 2) the laws of "
"physics are the same for all observers in uniform motion relative to one another.\n\nThe theory of "
"relativity was developed by Albert Einstein in the early 20th century, and it revolutionized our "
"understanding of space and time."
)
prompt = "Simply put, the theory of relativity states that "
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
model = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-chat-hf", device_map="sequential", dtype=torch.float16
)
model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# greedy generation outputs
generated_ids = model.generate(
**model_inputs, max_new_tokens=64, top_p=None, temperature=1, do_sample=False, dola_layers="low"
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
@require_torch_accelerator
@pytest.mark.torch_compile_test
def test_compile_static_cache(self):
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
# work as intended. See https://github.com/pytorch/pytorch/issues/121943
if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
NUM_TOKENS_TO_GENERATE = 40
# Note on `EXPECTED_TEXT_COMPLETION`'s diff: the current value matches the original test if the original test
# was changed to have a cache of 53 tokens (as opposed to 4096), on Ampere GPUs.
EXPECTED_TEXT_COMPLETION = [
"Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial "
"reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe "
"theory of relativ",
"My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, "
"my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p",
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right")
model = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-hf", device_map=torch_device, dtype=torch.float16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
# Static Cache + compile (`generate()` internally compiles each decoding step when static cache is used)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
@slow
@pytest.mark.torch_export_test
def test_export_static_cache(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
)
llama_models = {
"meta-llama/Llama-3.2-1B": [
"Simply put, the theory of relativity states that 1) the speed of light is the same for all "
"observers, regardless of their location, and 2) the laws of physics are the same for all observers"
],
}
for llama_model_ckp, EXPECTED_TEXT_COMPLETION in llama_models.items():
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(llama_model_ckp, pad_token="</s>", padding_side="right")
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = LlamaForCausalLM.from_pretrained(
llama_model_ckp,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
"device": device,
},
),
)
prompts = ["Simply put, the theory of relativity states that "]
prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + export
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
@slow
@require_torch_accelerator
class Mask4DTestHard(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def setUp(self):
cleanup(torch_device, gc_collect=True)
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
self.model_dtype = torch.float32
self.tokenizer = LlamaTokenizer.from_pretrained(model_name)
self.model = LlamaForCausalLM.from_pretrained(model_name, dtype=self.model_dtype).to(torch_device)
def get_test_data(self):
template = "my favorite {}"
items = ("pet is a", "artist plays a", "name is L") # same number of tokens in each item
batch_separate = [template.format(x) for x in items] # 3 separate lines
batch_shared_prefix = template.format(" ".join(items)) # 1 line with options concatenated
input_ids = self.tokenizer(batch_separate, return_tensors="pt").input_ids.to(torch_device)
input_ids_shared_prefix = self.tokenizer(batch_shared_prefix, return_tensors="pt").input_ids.to(torch_device)
mask_shared_prefix = torch.tensor(
[
[
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
]
]
],
device=torch_device,
)
position_ids = torch.arange(input_ids.shape[1]).tile(input_ids.shape[0], 1).to(torch_device)
# building custom positions ids based on custom mask
position_ids_shared_prefix = (mask_shared_prefix.sum(dim=-1) - 1).reshape(1, -1)
# effectively: position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]]).to(device)
# inverting the mask
min_dtype = torch.finfo(self.model_dtype).min
mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=self.model_dtype) * min_dtype
return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix
def test_stacked_causal_mask(self):
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# single forward run with 4D custom mask
logits_shared_prefix = self.model.forward(
input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix
).logits
logits_shared_prefix_last = logits_shared_prefix[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], :
] # last three tokens
decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)]
self.assertEqual(decoded, decoded_shared_prefix)
def test_partial_stacked_causal_mask(self):
# Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# 2 forward runs with custom 4D masks
part_a = 3 # split point
input_1a = input_ids_shared_prefix[:, :part_a]
position_ids_1a = position_ids_shared_prefix[:, :part_a]
mask_1a = mask_shared_prefix[:, :, :part_a, :part_a]
outs_1a = self.model.forward(input_1a, attention_mask=mask_1a, position_ids=position_ids_1a)
past_key_values_a = outs_1a["past_key_values"]
# Case 1: we pass a 4D attention mask regarding the current sequence length (i.e. [..., seq_len, full_len])
input_1b = input_ids_shared_prefix[:, part_a:]
position_ids_1b = position_ids_shared_prefix[:, part_a:]
mask_1b = mask_shared_prefix[:, :, part_a:, :]
outs_1b = self.model.forward(
input_1b,
attention_mask=mask_1b,
position_ids=position_ids_1b,
past_key_values=past_key_values_a,
)
decoded_1b = [
self.tokenizer.decode(t)
for t in outs_1b.logits.argmax(-1)[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a
]
]
self.assertEqual(decoded, decoded_1b)
def test_stacked_causal_mask_static_cache(self):
"""same as above but with StaticCache"""
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# upgrade the model with StaticCache
max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1]
past_key_values = StaticCache(config=self.model.config, max_cache_len=max_cache_len)
padded_attention_mask = torch.nn.functional.pad(
input=mask_shared_prefix,
pad=(0, max_cache_len - mask_shared_prefix.shape[-1]),
mode="constant",
value=torch.finfo(self.model_dtype).min,
)
# single forward run with 4D custom mask
logits_shared_prefix = self.model.forward(
input_ids_shared_prefix,
attention_mask=padded_attention_mask,
position_ids=position_ids_shared_prefix,
cache_position=torch.arange(input_ids_shared_prefix.shape[-1], device=torch_device),
past_key_values=past_key_values,
).logits
logits_shared_prefix_last = logits_shared_prefix[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], :
] # last three tokens
decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)]
self.assertEqual(decoded, decoded_shared_prefix)
def test_partial_stacked_causal_mask_static_cache(self):
# Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks
# we pass a 4D attention mask shaped [..., seq_len, full_static_cache_len])
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# upgrade the model with StaticCache
max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1]
past_key_values = StaticCache(config=self.model.config, max_cache_len=max_cache_len)
# forward run for the first part of input
part_a = 3 # split point
input_1a = input_ids_shared_prefix[:, :part_a]
position_ids_1a = position_ids_shared_prefix[:, :part_a]
mask_1a = mask_shared_prefix[:, :, :part_a, :part_a]
padded_mask_1a = torch.nn.functional.pad(
input=mask_1a,
pad=(0, max_cache_len - mask_1a.shape[-1]),
mode="constant",
value=torch.finfo(self.model_dtype).min,
)
_ = self.model.forward(
input_1a,
attention_mask=padded_mask_1a,
position_ids=position_ids_1a,
cache_position=torch.arange(part_a, device=torch_device),
past_key_values=past_key_values,
)
# forward run for the second part of input
input_1b = input_ids_shared_prefix[:, part_a:]
position_ids_1b = position_ids_shared_prefix[:, part_a:]
mask_1b = mask_shared_prefix[:, :, part_a:, :]
padded_mask_1b = torch.nn.functional.pad(
input=mask_1b, pad=(0, max_cache_len - mask_1b.shape[-1]), mode="constant", value=0
)
outs_1b = self.model.forward(
input_1b,
attention_mask=padded_mask_1b,
position_ids=position_ids_1b,
cache_position=torch.arange(
part_a,
input_ids_shared_prefix.shape[-1],
device=torch_device,
),
past_key_values=past_key_values,
)
decoded_1b = [
self.tokenizer.decode(t)
for t in outs_1b.logits.argmax(-1)[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a
]
]
self.assertEqual(decoded, decoded_1b)
| transformers/tests/models/llama/test_modeling_llama.py/0 | {
"file_path": "transformers/tests/models/llama/test_modeling_llama.py",
"repo_id": "transformers",
"token_count": 12910
} | 511 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Llava-NeXT-Video model."""
import copy
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from parameterized import parameterized
from transformers import (
AutoProcessor,
LlavaNextVideoConfig,
LlavaNextVideoForConditionalGeneration,
LlavaNextVideoModel,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
require_bitsandbytes,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class LlavaNextVideoVisionText2TextModelTester:
def __init__(
self,
parent,
ignore_index=-100,
image_token_index=0,
video_token_index=1,
projector_hidden_act="gelu",
seq_length=7,
vision_feature_select_strategy="default",
vision_feature_layer=-1,
text_config={
"model_type": "llama",
"seq_length": 7,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 580,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 3,
},
is_training=True,
vision_config={
"image_size": 8,
"patch_size": 4,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"projection_dim": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.text_config = text_config
self.vision_config = vision_config
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = 3
self.image_size = 30
self.image_grid_pinpoints = [[16, 16]]
self.num_image_tokens = 24
self.num_video_tokens = 8
self.seq_length = seq_length + self.num_image_tokens + self.num_video_tokens
def get_config(self):
return LlavaNextVideoConfig(
text_config=self.text_config,
vision_config=self.vision_config,
ignore_index=self.ignore_index,
image_token_index=self.image_token_index,
video_token_index=self.video_token_index,
projector_hidden_act=self.projector_hidden_act,
vision_feature_select_strategy=self.vision_feature_select_strategy,
vision_feature_layer=self.vision_feature_layer,
image_grid_pinpoints=self.image_grid_pinpoints,
video_seq_length=self.num_video_tokens,
image_seq_length=self.num_image_tokens,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
5,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
pixel_values_videos = floats_tensor(
[
self.batch_size,
8,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values, pixel_values_videos
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_values_videos = self.prepare_config_and_inputs()
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
input_ids[input_ids == config.image_token_index] = self.pad_token_id
input_ids[input_ids == config.video_token_index] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = config.image_token_index
input_ids[:, self.num_image_tokens : self.num_video_tokens + self.num_image_tokens] = config.video_token_index
inputs_dict = {
"pixel_values": pixel_values,
"pixel_values_videos": pixel_values_videos,
"image_sizes": torch.tensor(
[[self.vision_config["image_size"], self.vision_config["image_size"]]] * self.batch_size
),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class LlavaNextVideoForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `LlavaNextVideoForConditionalGeneration`.
"""
all_model_classes = (
(
LlavaNextVideoModel,
LlavaNextVideoForConditionalGeneration,
)
if is_torch_available()
else ()
)
test_pruning = False
test_head_masking = False
_is_composite = True
def setUp(self):
self.model_tester = LlavaNextVideoVisionText2TextModelTester(self)
common_properties = ["image_token_index", "video_token_index", "vision_feature_layer", "image_seq_length"]
self.config_tester = ConfigTester(
self, config_class=LlavaNextVideoConfig, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if "image_newline" in name:
continue
elif param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
image_sizes = curr_input_dict["image_sizes"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_sizes = torch.cat([image_sizes, image_sizes], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
def test_odd_sized_image(self):
# prepare model configuration
config = self.model_tester.get_config()
# prepare input
num_image_tokens = 24
pixel_values = floats_tensor([1, 5, 3, config.vision_config.image_size, config.vision_config.image_size])
input_ids = ids_tensor([1, 64], config.text_config.vocab_size - 2) + 2
input_ids[:, :num_image_tokens] = config.image_token_index
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"image_sizes": torch.tensor([[13, 16]]), # odd-sized image
"input_ids": input_ids,
"attention_mask": attention_mask,
}
# forward with odd-sized image input
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model(**inputs_dict)
@parameterized.expand(
[
(-1,),
([-1],),
([-1, -2],),
],
)
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip("FlashAttention only support fp16 and bf16 data type")
def test_flash_attn_2_fp32_ln(self):
pass
@unittest.skip(
"VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@require_torch
class LlavaNextVideoForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
image_file = hf_hub_download(
repo_id="raushan-testing-hf/images_test", filename="llava_v1_5_radar.jpg", repo_type="dataset"
)
video_file = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset"
)
self.image = Image.open(image_file)
self.video = np.load(video_file)
self.prompt_image = "USER: <image>\nWhat is shown in this image? ASSISTANT:"
self.prompt_video = "USER: <video>\nWhy is this video funny? ASSISTANT:"
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@require_bitsandbytes
def test_small_model_integration_test(self):
model = LlavaNextVideoForConditionalGeneration.from_pretrained(
"llava-hf/LLaVA-NeXT-Video-7B-hf", load_in_4bit=True, cache_dir="./"
)
inputs = self.processor(text=self.prompt_video, videos=self.video, return_tensors="pt")
# verify single forward pass
inputs = inputs.to(torch_device)
with torch.no_grad():
output = model(**inputs)
# verify generation
output = model.generate(**inputs, do_sample=False, max_new_tokens=40)
expected_decoded_text = Expectations(
{
("cuda", None): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a book while another child is attempting to read the same book. The child who is reading the book seems",
("xpu", None): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a book while wearing a pair of glasses that are too large for them. The glasses are",
("rocm", (9, 5)): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and adorable behavior of the young child. The child is seen reading a book, but instead of turning the pages like one would typically do, they",
}
).get_expectation() # fmt: off
decoded_text = self.processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(decoded_text, expected_decoded_text)
@slow
@require_bitsandbytes
def test_small_model_integration_test_batch(self):
model = LlavaNextVideoForConditionalGeneration.from_pretrained(
"llava-hf/LLaVA-NeXT-Video-7B-hf", load_in_4bit=True, cache_dir="./"
)
inputs = self.processor(
text=[self.prompt_video, self.prompt_video],
videos=[self.video, self.video],
return_tensors="pt",
padding=True,
).to(torch_device)
output = model.generate(**inputs, do_sample=False, max_new_tokens=20)
decoded_text = self.processor.batch_decode(output, skip_special_tokens=True)
expected_decoded_text = Expectations(
{
("cuda", None): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a",
("rocm", (9, 5)): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and adorable behavior of the young child. The",
}
).get_expectation() # fmt: off
EXPECTED_DECODED_TEXT = [expected_decoded_text, expected_decoded_text]
self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT)
@slow
@require_bitsandbytes
def test_small_model_integration_test_batch_different_vision_types(self):
model = LlavaNextVideoForConditionalGeneration.from_pretrained(
"llava-hf/LLaVA-NeXT-Video-7B-hf",
load_in_4bit=True,
cache_dir="./",
)
inputs = self.processor(
text=[self.prompt_image, self.prompt_video],
images=self.image,
videos=self.video,
return_tensors="pt",
padding=True,
).to(torch_device)
# check loss when labels are passed
inputs["labels"] = inputs["input_ids"].clone()
with torch.no_grad():
output = model(**inputs)
self.assertTrue(output.loss is not None)
# verify generation
output = model.generate(**inputs, do_sample=False, max_new_tokens=50)
EXPECTED_DECODED_TEXT = Expectations(
{
("rocm", (9, 5)): "USER: \nWhat is shown in this image? ASSISTANT: The image displays a chart that appears to be a comparison of different models or versions of a machine learning (ML) model, likely a neural network, based on their performance on a task or dataset. The chart is a scatter plot with axes labeled",
("cuda", None): 'USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a machine learning model\'s performance on a task, likely related to natural language processing or text understanding. It shows a scatter plot with two axes, one labeled "BLIP-2"',
}
).get_expectation() # fmt: off
decoded_text = self.processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT)
@slow
@require_bitsandbytes
def test_small_model_integration_test_batch_matches_single(self):
model = LlavaNextVideoForConditionalGeneration.from_pretrained(
"llava-hf/LLaVA-NeXT-Video-7B-hf", load_in_4bit=True, cache_dir="./"
)
inputs_batched = self.processor(
text=[self.prompt_video, self.prompt_image],
images=[self.image],
videos=[self.video],
return_tensors="pt",
padding=True,
).to(torch_device)
inputs_single = self.processor(text=self.prompt_video, videos=[self.video], return_tensors="pt").to(
torch_device
)
# verify generation
output_batched = model.generate(**inputs_batched, do_sample=False, max_new_tokens=50)
output_single = model.generate(**inputs_single, do_sample=False, max_new_tokens=50)
self.assertEqual(
self.processor.decode(output_batched[0], skip_special_tokens=True),
self.processor.decode(output_single[0], skip_special_tokens=True),
)
| transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py/0 | {
"file_path": "transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py",
"repo_id": "transformers",
"token_count": 8712
} | 512 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Moonshine model."""
import copy
import unittest
from transformers import MoonshineConfig, is_torch_available
from transformers.testing_utils import Expectations, cleanup, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
AutoProcessor,
MoonshineForConditionalGeneration,
MoonshineModel,
)
from datasets import load_dataset
class MoonshineModelTester:
def __init__(
self,
parent,
batch_size=3, # need batch_size != num_hidden_layers
seq_length=1000,
is_training=False,
use_labels=False,
vocab_size=147,
hidden_size=8,
intermediate_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
encoder_hidden_act="gelu",
decoder_hidden_act="silu",
decoder_start_token_id=85,
bos_token_id=98,
eos_token_id=98,
pad_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.use_labels = use_labels
self.vocab_size = vocab_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.encoder_hidden_act = encoder_hidden_act
self.decoder_hidden_act = decoder_hidden_act
self.decoder_start_token_id = decoder_start_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device)
decoder_attention_mask = decoder_input_ids.ne(self.pad_token_id)
config = self.get_config()
return config, input_values, attention_mask, decoder_input_ids, decoder_attention_mask
def get_config(self):
return MoonshineConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
encoder_num_hidden_layers=self.num_hidden_layers,
decoder_num_hidden_layers=self.num_hidden_layers,
encoder_num_attention_heads=self.num_attention_heads,
decoder_num_attention_heads=self.num_attention_heads,
encoder_num_key_value_heads=self.num_key_value_heads,
decoder_num_key_value_heads=self.num_key_value_heads,
encoder_hidden_act=self.encoder_hidden_act,
decoder_hidden_act=self.decoder_hidden_act,
decoder_start_token_id=self.decoder_start_token_id,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
)
def check_output_attentions(self, config, input_values, attention_mask):
model = MoonshineModel(config=config)
model.config.layerdrop = 1.0
model.to(torch_device)
model.train()
outputs = model(input_values, attention_mask=attention_mask, output_attentions=True)
self.parent.assertTrue(len(outputs.attentions) > 0)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask, decoder_input_ids, decoder_attention_mask = (
self.prepare_config_and_inputs()
)
inputs_dict = {
"input_values": input_values,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
return config, inputs_dict
@require_torch
class MoonshineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MoonshineModel, MoonshineForConditionalGeneration) if is_torch_available() else ()
# Doesn't run generation tests. TODO (eustache): remove this line and then make CI green
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"automatic-speech-recognition": MoonshineForConditionalGeneration,
"feature-extraction": MoonshineModel,
}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
def setUp(self):
self.model_tester = MoonshineModelTester(self)
self.config_tester = ConfigTester(self, config_class=MoonshineConfig)
@unittest.skip("failing. Will fix only when the community opens an issue for it.")
def test_torchscript_output_hidden_state(self):
pass
@unittest.skip("failing. Will fix only when the community opens an issue for it.")
def test_torchscript_simple(self):
pass
def test_config(self):
self.config_tester.run_common_tests()
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length)
subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
out_len = len(outputs)
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
subsampled_encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
# Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_hidden_states_output
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[subsampled_seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
decoder_input_ids = inputs.pop("decoder_input_ids", None)
inputs.pop("decoder_attention_mask", None)
wte = model.get_input_embeddings()
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
# Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_resize_tokens_embeddings
def test_resize_tokens_embeddings(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# make sure that decoder_input_ids are resized
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_resize_embeddings_untied
def test_resize_embeddings_untied(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class MoonshineModelIntegrationTests(unittest.TestCase):
def setUp(self):
self.processor_tiny = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
self.processor_base = AutoProcessor.from_pretrained("UsefulSensors/moonshine-base")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@slow
def test_tiny_logits_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
inputs = self.processor_tiny(self._load_datasamples(1), return_tensors="pt")
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
expectations = Expectations(
{
(None, None): [-9.1106, 4.5542, 6.3892, -6.8139, -7.2456, -7.9074, -7.2839, -7.6043, -8.0384, -7.8351, -7.3867, -7.2450, -7.7420, -7.3912, -7.3866, -7.6979, -7.6420, -7.0504, -7.3979, -7.2483, -8.0796, -7.3300, -7.3672, -6.8765, -7.6876, -7.2682, -6.9866, -6.7457, -7.6855, -7.3050],
("cuda", 8): [-9.1107, 4.5538, 6.3902, -6.8141, -7.2459, -7.9076, -7.2842, -7.6045, -8.0387, -7.8354, -7.3869, -7.2453, -7.7423, -7.3914, -7.3869, -7.6982, -7.6422, -7.0507, -7.3982, -7.2486, -8.0798, -7.3302, -7.3675, -6.8769, -7.6878, -7.2684, -6.9868, -6.7459, -7.6858, -7.3052],
}
)
EXPECTED_LOGITS = torch.tensor(expectations.get_expectation()).to(torch_device)
# fmt: on
torch.testing.assert_close(outputs.logits[0][0, :30], EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_base_logits_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
inputs = self.processor_base(self._load_datasamples(1), return_tensors="pt")
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor([
-6.7336, 1.9482, 5.2448, -8.0277, -7.9167, -7.8956, -7.9649, -7.9348, -8.1312, -8.0616,
-8.1070, -7.7696, -7.8809, -7.9450, -8.1013, -7.8177, -7.8598, -7.8257, -7.8729, -7.9657,
-7.9310, -8.1024, -7.8699, -7.8231, -8.0752, -7.9764, -7.8127, -8.0536, -7.9492, -7.9290,
])
# fmt: on
torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_tiny_logits_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
inputs = self.processor_tiny(self._load_datasamples(4), return_tensors="pt", padding=True)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
[-8.5973, 4.8608, 5.8845, -6.6182, -7.0376, -7.7120, -7.0638, -7.3837, -7.8328, -7.6114],
[-4.3157, -2.4944, 8.4917, -6.4806, -7.0952, -6.7500, -6.1084, -6.6484, -6.9868, -6.5919],
[-10.0086, 3.2859, 0.7345, -6.5557, -6.8514, -6.5308, -6.4172, -6.9484, -6.6214, -6.6229],
[-11.1003, 3.9395, 0.6672, -5.0150, -5.3939, -5.4103, -5.2240, -5.4407, -5.2204, -5.2706],
],
)
# fmt: on
torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_base_logits_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
inputs = self.processor_base(self._load_datasamples(4), return_tensors="pt", padding=True)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
[-6.3602, 1.8383, 5.2615, -7.9576, -7.8442, -7.8238, -7.9014, -7.8645, -8.0550, -7.9963],
[-6.1725, -0.6274, 8.1798, -6.8570, -6.8078, -6.7915, -6.9099, -6.8980, -6.9760, -6.8264],
[-7.3186, 3.1192, 3.8938, -5.7208, -5.8429, -5.7610, -5.9997, -5.8213, -5.8616, -5.8720],
[-7.3432, 1.0402, 3.9912, -5.4177, -5.4890, -5.4573, -5.6516, -5.4776, -5.5079, -5.5391],
]
)
# fmt: on
torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_tiny_generation_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
audio_array = self._load_datasamples(1)
inputs = self.processor_tiny(audio_array, return_tensors="pt")
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True)[0]
EXPECTED_TRANSCRIPT = "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome"
self.assertEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_base_generation_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
audio_array = self._load_datasamples(1)
inputs = self.processor_base(audio_array, return_tensors="pt")
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_base.batch_decode(generated_ids, skip_special_tokens=True)[0]
EXPECTED_TRANSCRIPT = "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome"
self.assertEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_tiny_generation_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
audio_array = self._load_datasamples(4)
inputs = self.processor_tiny(audio_array, return_tensors="pt", padding=True)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True)
# fmt: off
EXPECTED_TRANSCRIPT = [
"Mr. Quilter is the apostle of the middle classes, and we are glad to welcome",
"Nor is Mr. Quilter's manner less interesting than his matter.",
"He tells us that at this festive season of the year, with Christmas and Rose beef lo",
"He has grave doubts whether Sir Frederick Layton's work is really Greek after all,",
]
# fmt: on
self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_base_generation_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
audio_array = self._load_datasamples(4)
inputs = self.processor_base(audio_array, return_tensors="pt", padding=True)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_base.batch_decode(generated_ids, skip_special_tokens=True)
# fmt: off
EXPECTED_TRANSCRIPT = [
"Mr. Quilter is the apostle of the middle classes, and we are glad to welcome",
"Nor is Mr. Quilter's manner less interesting than his matter.",
"He tells us that at this festive season of the year, with Christmas and rose beef lo",
"He has grave doubts whether Sir Frederick Layton's work is really Greek after all,",
]
# fmt: on
self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
| transformers/tests/models/moonshine/test_modeling_moonshine.py/0 | {
"file_path": "transformers/tests/models/moonshine/test_modeling_moonshine.py",
"repo_id": "transformers",
"token_count": 12518
} | 513 |
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import requests
from huggingface_hub import hf_hub_download
from transformers.image_utils import SizeDict
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import cached_property, is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import NougatImageProcessor
if is_torchvision_available():
from transformers import NougatImageProcessorFast
class NougatImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_crop_margin=True,
do_resize=True,
size=None,
do_thumbnail=True,
do_align_long_axis: bool = False,
do_pad=True,
do_normalize: bool = True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_crop_margin = do_crop_margin
self.do_resize = do_resize
self.size = size
self.do_thumbnail = do_thumbnail
self.do_align_long_axis = do_align_long_axis
self.do_pad = do_pad
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.data_format = "channels_first"
self.input_data_format = "channels_first"
def prepare_image_processor_dict(self):
return {
"do_crop_margin": self.do_crop_margin,
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_long_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_dummy_image(self):
revision = "ec57bf8c8b1653a209c13f6e9ee66b12df0fc2db"
filepath = hf_hub_download(
repo_id="hf-internal-testing/fixtures_docvqa",
filename="nougat_pdf.png",
repo_type="dataset",
revision=revision,
)
image = Image.open(filepath).convert("RGB")
return image
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class NougatImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = NougatImageProcessor if is_vision_available() else None
fast_image_processing_class = NougatImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = NougatImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
@cached_property
def image_processor(self):
return self.image_processing_class(**self.image_processor_dict)
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
kwargs = dict(self.image_processor_dict)
kwargs.pop("size", None)
image_processor = self.image_processing_class(**kwargs, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_expected_output(self):
dummy_image = self.image_processor_tester.prepare_dummy_image()
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
inputs = image_processor(dummy_image, return_tensors="pt")
torch.testing.assert_close(inputs["pixel_values"].mean(), torch.tensor(0.4906), rtol=1e-3, atol=1e-3)
def test_crop_margin_all_white(self):
image = np.uint8(np.ones((3, 100, 100)) * 255)
for image_processing_class in self.image_processor_list:
if image_processing_class == NougatImageProcessorFast:
image = torch.from_numpy(image)
image_processor = image_processing_class(**self.image_processor_dict)
cropped_image = image_processor.crop_margin(image)
self.assertTrue(torch.equal(image, cropped_image))
else:
image_processor = image_processing_class(**self.image_processor_dict)
cropped_image = image_processor.crop_margin(image)
self.assertTrue(np.array_equal(image, cropped_image))
def test_crop_margin_centered_black_square(self):
image = np.ones((3, 100, 100), dtype=np.uint8) * 255
image[:, 45:55, 45:55] = 0
expected_cropped = image[:, 45:55, 45:55]
for image_processing_class in self.image_processor_list:
if image_processing_class == NougatImageProcessorFast:
image = torch.from_numpy(image)
expected_cropped = torch.from_numpy(expected_cropped)
image_processor = image_processing_class(**self.image_processor_dict)
cropped_image = image_processor.crop_margin(image)
self.assertTrue(torch.equal(expected_cropped, cropped_image))
else:
image_processor = image_processing_class(**self.image_processor_dict)
cropped_image = image_processor.crop_margin(image)
self.assertTrue(np.array_equal(expected_cropped, cropped_image))
def test_align_long_axis_no_rotation(self):
image = np.uint8(np.ones((3, 100, 200)) * 255)
for image_processing_class in self.image_processor_list:
if image_processing_class == NougatImageProcessorFast:
image = torch.from_numpy(image)
size = SizeDict(height=200, width=300)
image_processor = image_processing_class(**self.image_processor_dict)
aligned_image = image_processor.align_long_axis(image, size)
self.assertEqual(image.shape, aligned_image.shape)
else:
size = {"height": 200, "width": 300}
image_processor = image_processing_class(**self.image_processor_dict)
aligned_image = image_processor.align_long_axis(image, size)
self.assertEqual(image.shape, aligned_image.shape)
def test_align_long_axis_with_rotation(self):
image = np.uint8(np.ones((3, 200, 100)) * 255)
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
if image_processing_class == NougatImageProcessorFast:
image = torch.from_numpy(image)
size = SizeDict(height=300, width=200)
image_processor = image_processing_class(**self.image_processor_dict)
aligned_image = image_processor.align_long_axis(image, size)
self.assertEqual(torch.Size([3, 200, 100]), aligned_image.shape)
else:
size = {"height": 300, "width": 200}
image_processor = image_processing_class(**self.image_processor_dict)
aligned_image = image_processor.align_long_axis(image, size)
self.assertEqual((3, 200, 100), aligned_image.shape)
def test_align_long_axis_data_format(self):
image = np.uint8(np.ones((3, 100, 200)) * 255)
for image_processing_class in self.image_processor_list:
if image_processing_class == NougatImageProcessorFast:
image = torch.from_numpy(image)
image_processor = image_processing_class(**self.image_processor_dict)
size = SizeDict(height=200, width=300)
aligned_image = image_processor.align_long_axis(image, size)
self.assertEqual(torch.Size([3, 100, 200]), aligned_image.shape)
else:
size = {"height": 200, "width": 300}
data_format = "channels_first"
image_processor = image_processing_class(**self.image_processor_dict)
aligned_image = image_processor.align_long_axis(image, size, data_format)
self.assertEqual((3, 100, 200), aligned_image.shape)
def prepare_dummy_np_image(self):
revision = "ec57bf8c8b1653a209c13f6e9ee66b12df0fc2db"
filepath = hf_hub_download(
repo_id="hf-internal-testing/fixtures_docvqa",
filename="nougat_pdf.png",
repo_type="dataset",
revision=revision,
)
image = Image.open(filepath).convert("RGB")
return np.array(image).transpose(2, 0, 1)
def test_crop_margin_equality_cv2_python(self):
image = self.prepare_dummy_np_image()
for image_processing_class in self.image_processor_list:
if image_processing_class == NougatImageProcessorFast:
image = torch.from_numpy(image)
image_processor = image_processing_class(**self.image_processor_dict)
image_cropped_python = image_processor.crop_margin(image)
self.assertEqual(image_cropped_python.shape, torch.Size([3, 850, 685]))
self.assertAlmostEqual(image_cropped_python.float().mean().item(), 237.43881150708458, delta=0.001)
else:
image_processor = image_processing_class(**self.image_processor_dict)
image_cropped_python = image_processor.crop_margin(image)
self.assertEqual(image_cropped_python.shape, (3, 850, 685))
self.assertAlmostEqual(image_cropped_python.mean(), 237.43881150708458, delta=0.001)
def test_call_numpy_4_channels(self):
for image_processing_class in self.image_processor_list:
if image_processing_class == NougatImageProcessor:
# Test that can process images which have an arbitrary number of channels
# Initialize image_processing
image_processor = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
# Test not batched input
encoded_images = image_processor(
image_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=0,
image_std=1,
).pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processor(
image_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=0,
image_std=1,
).pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
# Adding a larget than usual tolerance because the slow processor uses reducing_gap=2.0 during resizing.
torch.testing.assert_close(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=2e-1, rtol=0)
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 2e-2
)
| transformers/tests/models/nougat/test_image_processing_nougat.py/0 | {
"file_path": "transformers/tests/models/nougat/test_image_processing_nougat.py",
"repo_id": "transformers",
"token_count": 6624
} | 514 |
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import CLIPTokenizer, OneFormerImageProcessor, OneFormerProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def prepare_metadata(class_info_file, repo_path="shi-labs/oneformer_demo"):
with open(hf_hub_download(repo_path, class_info_file, repo_type="dataset")) as f:
class_info = json.load(f)
metadata = {}
class_names = []
thing_ids = []
for key, info in class_info.items():
metadata[key] = info["name"]
class_names.append(info["name"])
if info["isthing"]:
thing_ids.append(int(key))
metadata["thing_ids"] = thing_ids
metadata["class_names"] = class_names
return metadata
class OneFormerProcessorTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
size=None,
do_resize=True,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
num_labels=10,
do_reduce_labels=False,
ignore_index=255,
max_seq_length=77,
task_seq_length=77,
model_repo="shi-labs/oneformer_ade20k_swin_tiny",
class_info_file="ade20k_panoptic.json",
num_text=10,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.max_seq_length = max_seq_length
self.task_seq_length = task_seq_length
self.class_info_file = class_info_file
self.metadata = prepare_metadata(class_info_file)
self.num_text = num_text
self.model_repo = model_repo
# for the post_process_functions
self.batch_size = 2
self.num_queries = 10
self.num_classes = 10
self.height = 3
self.width = 4
self.num_labels = num_labels
self.do_reduce_labels = do_reduce_labels
self.ignore_index = ignore_index
def prepare_processor_dict(self):
image_processor_dict = {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
image_processor = OneFormerImageProcessor(**image_processor_dict)
tokenizer = CLIPTokenizer.from_pretrained(self.model_repo)
return {
"image_processor": image_processor,
"tokenizer": tokenizer,
"max_seq_length": self.max_seq_length,
"task_seq_length": self.task_seq_length,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to OneFormerProcessor,
assuming do_resize is set to True with a scalar size. It also provides the expected sequence length
for the task_inputs and text_list_input.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width, expected_sequence_length = self.get_expected_values([image])
expected_values.append((expected_height, expected_width, expected_sequence_length))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
expected_sequence_length = self.max_seq_length
return expected_height, expected_width, expected_sequence_length
def get_fake_oneformer_outputs(self):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)),
masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)),
)
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class OneFormerProcessingTest(unittest.TestCase):
processing_class = OneFormerProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_feat_extracttion_common.test_feat_extract_to_json_string
feature_extraction_class = processing_class
def setUp(self):
self.processing_tester = OneFormerProcessorTester(self)
@property
def processor_dict(self):
return self.processing_tester.prepare_processor_dict()
def test_feat_extract_properties(self):
processor = self.processing_class(**self.processor_dict)
self.assertTrue(hasattr(processor, "image_processor"))
self.assertTrue(hasattr(processor, "tokenizer"))
self.assertTrue(hasattr(processor, "max_seq_length"))
self.assertTrue(hasattr(processor, "task_seq_length"))
@unittest.skip
def test_batch_feature(self):
pass
def test_call_pil(self):
# Initialize processor
processor = self.processing_class(**self.processor_dict)
# create random PIL images
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs
)
self.assertEqual(
encoded_images.shape,
(1, self.processing_tester.num_channels, expected_height, expected_width),
)
tokenized_task_inputs = processor(image_inputs[0], ["semantic"], return_tensors="pt").task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(1, expected_sequence_length),
)
# Test batched
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs, batched=True
)
encoded_images = processor(image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.processing_tester.batch_size,
self.processing_tester.num_channels,
expected_height,
expected_width,
),
)
tokenized_task_inputs = processor(
image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt"
).task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(self.processing_tester.batch_size, expected_sequence_length),
)
def test_call_numpy(self):
# Initialize processor
processor = self.processing_class(**self.processor_dict)
# create random numpy tensors
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs
)
self.assertEqual(
encoded_images.shape,
(1, self.processing_tester.num_channels, expected_height, expected_width),
)
tokenized_task_inputs = processor(image_inputs[0], ["semantic"], return_tensors="pt").task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(1, expected_sequence_length),
)
# Test batched
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs, batched=True
)
encoded_images = processor(image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.processing_tester.batch_size,
self.processing_tester.num_channels,
expected_height,
expected_width,
),
)
tokenized_task_inputs = processor(
image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt"
).task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(self.processing_tester.batch_size, expected_sequence_length),
)
def test_call_pytorch(self):
# Initialize processor
processor = self.processing_class(**self.processor_dict)
# create random PyTorch tensors
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs
)
self.assertEqual(
encoded_images.shape,
(1, self.processing_tester.num_channels, expected_height, expected_width),
)
tokenized_task_inputs = processor(image_inputs[0], ["semantic"], return_tensors="pt").task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(1, expected_sequence_length),
)
# Test batched
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs, batched=True
)
encoded_images = processor(image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.processing_tester.batch_size,
self.processing_tester.num_channels,
expected_height,
expected_width,
),
)
tokenized_task_inputs = processor(
image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt"
).task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(self.processing_tester.batch_size, expected_sequence_length),
)
def comm_get_processor_inputs(self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np"):
processor = self.processing_class(**self.processor_dict)
# prepare image and target
num_labels = self.processing_tester.num_labels
annotations = None
instance_id_to_semantic_id = None
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False)
if with_segmentation_maps:
high = num_labels
if is_instance_map:
labels_expanded = list(range(num_labels)) * 2
instance_id_to_semantic_id = dict(enumerate(labels_expanded))
annotations = [
np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs
]
if segmentation_type == "pil":
annotations = [Image.fromarray(annotation) for annotation in annotations]
inputs = processor(
image_inputs,
["semantic"] * len(image_inputs),
annotations,
return_tensors="pt",
instance_id_to_semantic_id=instance_id_to_semantic_id,
pad_and_return_pixel_mask=True,
)
return inputs
@unittest.skip
def test_init_without_params(self):
pass
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
feat_extract_first.save_pretrained(tmpdirname)
check_json_file_has_correct_format(os.path.join(tmpdirname, "preprocessor_config.json"))
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
self.assertEqual(feat_extract_second.image_processor.to_dict(), feat_extract_first.image_processor.to_dict())
self.assertIsInstance(feat_extract_first.image_processor, OneFormerImageProcessor)
self.assertIsInstance(feat_extract_first.tokenizer, CLIPTokenizer)
def test_call_with_segmentation_maps(self):
def common(is_instance_map=False, segmentation_type=None):
inputs = self.comm_get_processor_inputs(
with_segmentation_maps=True, is_instance_map=is_instance_map, segmentation_type=segmentation_type
)
mask_labels = inputs["mask_labels"]
class_labels = inputs["class_labels"]
pixel_values = inputs["pixel_values"]
text_inputs = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(mask_labels, class_labels, text_inputs):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
self.assertEqual(text_input.shape[0], self.processing_tester.num_text)
common()
common(is_instance_map=True)
common(is_instance_map=False, segmentation_type="pil")
common(is_instance_map=True, segmentation_type="pil")
def test_integration_semantic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = processor.encode_inputs(
pixel_values_list,
["semantic", "semantic"],
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values, task inputs, text inputs and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
self.assertEqual(inputs["task_inputs"].shape, (2, 77))
self.assertEqual(inputs["text_inputs"].shape, (2, self.processing_tester.num_text, 77))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 12, 3, 5, 0, 43, 96, 104, 31, 125, 138, 87, 149]) # noqa: E231 # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 67, 82, 17, 12, 42, 3, 14, 5, 0, 115, 43, 8, 138, 125, 143]) # noqa: E231 # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
self.assertEqual(inputs["task_inputs"][0].sum().item(), 141082)
self.assertEqual(inputs["task_inputs"][0].sum().item(), inputs["task_inputs"][1].sum().item())
# verify the text inputs
self.assertEqual(len(inputs["text_inputs"]), 2)
self.assertEqual(inputs["text_inputs"][0].sum().item(), 1095752)
self.assertEqual(inputs["text_inputs"][1].sum().item(), 1062468)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (16, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (16, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_integration_instance_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = processor.encode_inputs(
pixel_values_list,
["instance", "instance"],
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values, task inputs, text inputs and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
self.assertEqual(inputs["task_inputs"].shape, (2, 77))
self.assertEqual(inputs["text_inputs"].shape, (2, self.processing_tester.num_text, 77))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 43, 43, 43, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 12, 12, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
self.assertEqual(inputs["task_inputs"][0].sum().item(), 144985)
self.assertEqual(inputs["task_inputs"][0].sum().item(), inputs["task_inputs"][1].sum().item())
# verify the text inputs
self.assertEqual(len(inputs["text_inputs"]), 2)
self.assertEqual(inputs["text_inputs"][0].sum().item(), 1037040)
self.assertEqual(inputs["text_inputs"][1].sum().item(), 1044078)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (73, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (57, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 35040.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 98228.0)
def test_integration_panoptic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = processor.encode_inputs(
pixel_values_list,
["panoptic", "panoptic"],
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values, task inputs, text inputs and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
self.assertEqual(inputs["task_inputs"].shape, (2, 77))
self.assertEqual(inputs["text_inputs"].shape, (2, self.processing_tester.num_text, 77))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
self.assertEqual(inputs["task_inputs"][0].sum().item(), 136240)
self.assertEqual(inputs["task_inputs"][0].sum().item(), inputs["task_inputs"][1].sum().item())
# verify the text inputs
self.assertEqual(len(inputs["text_inputs"]), 2)
self.assertEqual(inputs["text_inputs"][0].sum().item(), 1048653)
self.assertEqual(inputs["text_inputs"][1].sum().item(), 1067160)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (79, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (61, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))
fake_binary_mask[0, 20:] = 1
fake_binary_mask[1, :15] = 1
fake_binary_mask[5, :10] = 1
rle = binary_mask_to_rle(fake_binary_mask)
self.assertEqual(len(rle), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def test_post_process_semantic_segmentation(self):
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
outputs = self.processing_tester.get_fake_oneformer_outputs()
segmentation = processor.post_process_semantic_segmentation(outputs)
self.assertEqual(len(segmentation), self.processing_tester.batch_size)
self.assertEqual(
segmentation[0].shape,
(
self.processing_tester.height,
self.processing_tester.width,
),
)
target_sizes = [(1, 4) for i in range(self.processing_tester.batch_size)]
segmentation = processor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def test_post_process_instance_segmentation(self):
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
outputs = self.processing_tester.get_fake_oneformer_outputs()
segmentation = processor.post_process_instance_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (self.processing_tester.height, self.processing_tester.width))
def test_post_process_panoptic_segmentation(self):
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
outputs = self.processing_tester.get_fake_oneformer_outputs()
segmentation = processor.post_process_panoptic_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (self.processing_tester.height, self.processing_tester.width))
| transformers/tests/models/oneformer/test_processing_oneformer.py/0 | {
"file_path": "transformers/tests/models/oneformer/test_processing_oneformer.py",
"repo_id": "transformers",
"token_count": 15465
} | 515 |
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import Phi4MultimodalFeatureExtractor
from transformers.testing_utils import check_json_file_has_correct_format, require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
global_rng = random.Random()
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class Phi4MultimodalFeatureExtractionTester:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=80,
hop_length=160,
win_length=400,
padding_value=0.0,
sampling_rate=16_000,
return_attention_mask=False,
do_normalize=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
self.feature_size = feature_size
self.win_length = win_length
self.hop_length = hop_length
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"win_length": self.win_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
class Phi4MultimodalFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = Phi4MultimodalFeatureExtractor
def setUp(self):
self.feat_extract_tester = Phi4MultimodalFeatureExtractionTester(self)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_from_pretrained_kwargs(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(
tmpdirname, feature_size=2 * self.feat_extract_dict["feature_size"]
)
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(2 * mel_1.shape[1] == mel_2.shape[1])
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
pt_speech_inputs = [torch.tensor(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
max_audio_len = (1200 - feature_extractor.win_length) // feature_extractor.hop_length + 1
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
self.assertTrue(input_features.shape[-2] == max_audio_len)
# Test not batched input
encoded_sequences_1 = feature_extractor(pt_speech_inputs[0], return_tensors="np").audio_input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").audio_input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(pt_speech_inputs, return_tensors="np").audio_input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
pt_speech_inputs = torch.tensor(speech_inputs)
encoded_sequences_1 = feature_extractor(pt_speech_inputs, return_tensors="np").audio_input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
@require_torch
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{"audio_input_features": inputs}], return_tensors="np")
self.assertTrue(np_processed.audio_input_features.dtype == np.float32)
pt_processed = feature_extractor.pad([{"audio_input_features": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.audio_input_features.dtype == torch.float32)
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def test_torch_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
6.5243, 7.2267, 8.0917, 8.0041, 6.8247, 6.3216, 5.9599, 5.6770,
5.7441, 5.6138, 6.6793, 6.8597, 5.5375, 6.5330, 5.4880, 7.3280,
9.0736, 9.7665, 9.8773, 10.0828, 10.0518, 10.1736, 10.0145, 9.2545,
11.0495, 11.6518, 10.8654, 10.2293, 9.1045, 9.4819,
]
)
# fmt: on
input_speech = self._load_datasamples(1)
feature_extractor = Phi4MultimodalFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").audio_input_features
self.assertEqual(input_features.shape, (1, 584, 80))
torch.testing.assert_close(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
@unittest.mock.patch(
"transformers.models.phi4_multimodal.feature_extraction_phi4_multimodal.is_torch_available", lambda: False
)
def test_numpy_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = np.array(
[
6.5242944, 7.226712, 8.091721, 8.004097, 6.824679, 6.3216243,
5.959894, 5.676975, 5.744051, 5.61384, 6.6793485, 6.8597484,
5.5374746, 6.532976, 5.4879804, 7.3279905, 9.073576, 9.766463,
9.877262, 10.082759, 10.051792, 10.173581, 10.0144825, 9.254548,
11.049487, 11.651841, 10.865354, 10.229329, 9.104464, 9.481946,
]
)
# fmt: on
input_speech = self._load_datasamples(1)
feature_extractor = Phi4MultimodalFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="np").audio_input_features
self.assertEqual(input_features.shape, (1, 584, 80))
self.assertTrue(np.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
@require_torch
def test_torch_integration_batch(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
[
6.5243, 7.2267, 8.0917, 8.0041, 6.8247, 6.3216, 5.9599, 5.6770,
5.7441, 5.6138, 6.6793, 6.8597, 5.5375, 6.5330, 5.4880, 7.3280,
9.0736, 9.7665, 9.8773, 10.0828, 10.0518, 10.1736, 10.0145, 9.2545,
11.0495, 11.6518, 10.8654, 10.2293, 9.1045, 9.4819
],
[
7.5105, 7.9453, 8.6161, 7.7666, 7.2572, 6.8823, 6.3242, 6.1899,
6.9706, 8.0810, 7.3227, 5.8580, 5.4990, 7.7373, 8.5447, 7.7203,
6.3230, 7.1995, 7.1463, 7.3153, 7.4054, 7.2855, 6.9396, 7.0255,
7.3285, 7.2748, 8.0742, 7.3998, 6.4813, 6.7509
],
[
7.7932, 8.1604, 8.7653, 8.2080, 7.2630, 6.4537, 4.8394, 6.3153,
8.0207, 8.3379, 6.0896, 5.7369, 5.8601, 4.7598, 4.8850, 6.2529,
3.9354, 6.1577, 7.9921, 9.6577, 10.1449, 9.1414, 9.3361, 9.0022,
9.2533, 10.0548, 10.4372, 8.8550, 9.1266, 9.9013
]
]
)
# fmt: on
input_speech = self._load_datasamples(3)
feature_extractor = Phi4MultimodalFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").audio_input_features
self.assertEqual(input_features.shape, (3, 1247, 80))
print(input_features[:, 0, :30])
torch.testing.assert_close(input_features[:, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
| transformers/tests/models/phi4_multimodal/test_feature_extraction_phi4_multimodal.py/0 | {
"file_path": "transformers/tests/models/phi4_multimodal/test_feature_extraction_phi4_multimodal.py",
"repo_id": "transformers",
"token_count": 6115
} | 516 |
# Copyright 2022, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch PLBART model."""
import copy
import tempfile
import unittest
from transformers import PLBartConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_fp16,
slow,
torch_device,
)
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
)
from transformers.models.plbart.modeling_plbart import PLBartDecoder, PLBartEncoder
def prepare_plbart_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class PLBartModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = input_ids.clamp(3)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_plbart_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return PLBartConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = PLBartModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_with_past_key_values = model(
next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values
)
output_from_past = output_with_past_key_values["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = PLBartModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = PLBartEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = PLBartDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class PLBartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(PLBartModel, PLBartForConditionalGeneration, PLBartForSequenceClassification) if is_torch_available() else ()
)
pipeline_model_mapping = (
{
"feature-extraction": PLBartModel,
"summarization": PLBartForConditionalGeneration,
"text-classification": PLBartForSequenceClassification,
"text-generation": PLBartForCausalLM,
"text2text-generation": PLBartForConditionalGeneration,
"translation": PLBartForConditionalGeneration,
"zero-shot": PLBartForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = False # Fix me Michael
test_pruning = False
test_missing_keys = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `PLBartConfig` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def setUp(self):
self.model_tester = PLBartModelTester(self)
self.config_tester = ConfigTester(self, config_class=PLBartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# PLBartForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (PLBartModel, PLBartForConditionalGeneration):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = PLBartForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@unittest.skip(reason="Failing since #26752")
def test_sample_generate(self):
pass
@unittest.skip(
reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245"
)
def test_load_save_without_tied_weights(self):
pass
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
@require_sentencepiece
@require_tokenizers
class AbstractSeq2SeqIntegrationTest(unittest.TestCase):
maxDiff = 1000 # longer string compare tracebacks
checkpoint_name = None
@classmethod
def setUpClass(cls):
cls.tokenizer = AutoTokenizer.from_pretrained(cls.checkpoint_name, use_fast=False)
return cls
@cached_property
def model(self):
"""Only load the model if needed."""
model = PLBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model
@require_torch
@require_sentencepiece
@require_tokenizers
class PLBartJavaCsIntegrationTest(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "uclanlp/plbart-java-cs"
src_text = [
"public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}",
"public int product(int a, int b, int c){return a*b*c;}",
]
tgt_text = [
"public int maximum(int a, int b, int c){return Math.Max(",
"public int Product(int a, int b, int c){return a * b *",
]
@slow
def test_java_cs_generate_one(self):
batch = self.tokenizer(
["public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}"], return_tensors="pt"
)
batch = batch.to(torch_device)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
# self.assertEqual(self.tgt_text[1], decoded[1])
@slow
def test_java_cs_generate_batch(self):
batch = self.tokenizer(self.src_text, return_tensors="pt", padding=True, truncation=True)
batch = batch.to(torch_device)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
assert self.tgt_text == decoded
def test_plbart_java_cs_config(self):
plbart_models = ["uclanlp/plbart-java-cs"]
expected = {"scale_embedding": True}
for name in plbart_models:
config = PLBartConfig.from_pretrained(name)
for k, v in expected.items():
try:
self.assertEqual(v, getattr(config, k))
except AssertionError as e:
e.args += (name, k)
raise
def test_plbart_fast_forward(self):
config = PLBartConfig(
vocab_size=99,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
add_final_layer_norm=True,
)
lm_model = PLBartForConditionalGeneration(config).to(torch_device)
context = torch.tensor(
[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
)
summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
result = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(result.logits.shape, expected_shape)
@require_torch
@require_sentencepiece
@require_tokenizers
class PLBartBaseIntegrationTest(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "uclanlp/plbart-base"
src_text = ["Is 0 the first Fibonacci number ?", "Find the sum of all prime numbers ."]
tgt_text = ["0 the first Fibonacci number?", "the sum of all prime numbers.......... the the"]
def test_base_generate(self):
inputs = self.tokenizer([self.src_text[0]], return_tensors="pt").to(torch_device)
src_lan = self.tokenizer._convert_lang_code_special_format("en_XX")
translated_tokens = self.model.generate(
input_ids=inputs["input_ids"].to(torch_device),
decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan],
)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
@slow
def test_fill_mask(self):
inputs = self.tokenizer(["Is 0 the <mask> Fibonacci <mask> ?"], return_tensors="pt").to(torch_device)
src_lan = self.tokenizer._convert_lang_code_special_format("en_XX")
outputs = self.model.generate(
inputs["input_ids"], decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan], num_beams=1
)
prediction: str = self.tokenizer.batch_decode(
outputs, clean_up_tokenization_spaces=True, skip_special_tokens=True
)[0]
self.assertEqual(prediction, "0 0 the 0 the 0 the 0 the 0 the 0 the 0 the 0 the")
class PLBartStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=50,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = PLBartConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
num_hidden_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (config, input_ids, attention_mask, lm_labels)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = PLBartDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = PLBartDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(
next_tokens, attention_mask=attn_mask, past_key_values=past_key_values, use_cache=True
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, attention_mask, lm_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class PLBartStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (PLBartDecoder, PLBartForCausalLM) if is_torch_available() else ()
test_pruning = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = PLBartStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=PLBartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="Decoder cannot keep gradients")
def test_flex_attention_with_grads():
return
| transformers/tests/models/plbart/test_modeling_plbart.py/0 | {
"file_path": "transformers/tests/models/plbart/test_modeling_plbart.py",
"repo_id": "transformers",
"token_count": 12192
} | 517 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from transformers import AutoProcessor, AutoTokenizer, Qwen2AudioProcessor, WhisperFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_processing_common import ProcessorTesterMixin
@require_torch
@require_torchaudio
class Qwen2AudioProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Qwen2AudioProcessor
@classmethod
def setUpClass(cls):
cls.checkpoint = "Qwen/Qwen2-Audio-7B-Instruct"
cls.tmpdirname = tempfile.mkdtemp()
processor = Qwen2AudioProcessor.from_pretrained(cls.checkpoint)
processor.save_pretrained(cls.tmpdirname)
cls.audio_token = processor.audio_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_audio_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).audio_processor
def get_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def test_can_load_various_tokenizers(self):
processor = Qwen2AudioProcessor.from_pretrained(self.checkpoint)
tokenizer = AutoTokenizer.from_pretrained(self.checkpoint)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
def test_save_load_pretrained_default(self):
tokenizer = AutoTokenizer.from_pretrained(self.checkpoint)
processor = Qwen2AudioProcessor.from_pretrained(self.checkpoint)
feature_extractor = processor.feature_extractor
processor = Qwen2AudioProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
with tempfile.TemporaryDirectory() as tmpdir:
processor.save_pretrained(tmpdir)
processor = Qwen2AudioProcessor.from_pretrained(tmpdir)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor)
def test_tokenizer_integration(self):
slow_tokenizer = AutoTokenizer.from_pretrained(self.checkpoint, use_fast=False)
fast_tokenizer = AutoTokenizer.from_pretrained(self.checkpoint, from_slow=True, legacy=False)
prompt = "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<|audio_bos|><|AUDIO|><|audio_eos|>\nWhat is it in this audio?<|im_end|><|im_start|>assistant\n"
EXPECTED_OUTPUT = [
"<|im_start|>",
"system",
"Ċ",
"Answer",
"Ġthe",
"Ġquestions",
".",
"<|im_end|>",
"<|im_start|>",
"user",
"Ċ",
"<|audio_bos|>",
"<|AUDIO|>",
"<|audio_eos|>",
"Ċ",
"What",
"Ġis",
"Ġit",
"Ġin",
"Ġthis",
"Ġaudio",
"?",
"<|im_end|>",
"<|im_start|>",
"assistant",
"Ċ",
]
self.assertEqual(slow_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
self.assertEqual(fast_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
def test_chat_template(self):
processor = AutoProcessor.from_pretrained(self.checkpoint)
expected_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nAudio 1: <|audio_bos|><|AUDIO|><|audio_eos|>\nWhat's that sound?<|im_end|>\n<|im_start|>assistant\nIt is the sound of glass shattering.<|im_end|>\n<|im_start|>user\nAudio 2: <|audio_bos|><|AUDIO|><|audio_eos|>\nHow about this one?<|im_end|>\n<|im_start|>assistant\n"
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3",
},
{"type": "text", "text": "What's that sound?"},
],
},
{"role": "assistant", "content": "It is the sound of glass shattering."},
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav",
},
{"type": "text", "text": "How about this one?"},
],
},
]
formatted_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
self.assertEqual(expected_prompt, formatted_prompt)
| transformers/tests/models/qwen2_audio/test_processing_qwen2_audio.py/0 | {
"file_path": "transformers/tests/models/qwen2_audio/test_processing_qwen2_audio.py",
"repo_id": "transformers",
"token_count": 2588
} | 518 |
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datasets import load_dataset
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import SamImageProcessor
if is_torchvision_available():
from transformers import SamImageProcessorFast
class SamImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_pad=True,
pad_size=None,
mask_size=None,
mask_pad_size=None,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"longest_edge": 20}
pad_size = pad_size if pad_size is not None else {"height": 20, "width": 20}
mask_size = mask_size if mask_size is not None else {"longest_edge": 12}
mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 12, "width": 12}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_pad = do_pad
self.pad_size = pad_size
self.mask_size = mask_size
self.mask_pad_size = mask_pad_size
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"do_pad": self.do_pad,
"pad_size": self.pad_size,
"mask_size": self.mask_size,
"mask_pad_size": self.mask_pad_size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.pad_size["height"], self.pad_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
class SamImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = SamImageProcessor if is_vision_available() else None
fast_image_processing_class = SamImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = SamImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "pad_size"))
self.assertTrue(hasattr(image_processing, "mask_size"))
self.assertTrue(hasattr(image_processing, "mask_pad_size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processing_class = image_processing_class(**self.image_processor_dict)
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"longest_edge": 20})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 42})
self.assertEqual(image_processor.size, {"longest_edge": 42})
def test_call_segmentation_maps(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processor
image_processor = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
encoding = image_processor(image_inputs, maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
image, segmentation_map = prepare_semantic_single_inputs()
encoding = image_processor(image, segmentation_map, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
images, segmentation_maps = prepare_semantic_batch_inputs()
encoding = image_processor(images, segmentation_maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
2,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self.assertTrue(torch.allclose(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(image_encoding_slow.pixel_values - image_encoding_fast.pixel_values)).item(), 1e-3
)
self.assertTrue(torch.allclose(image_encoding_slow.labels, image_encoding_fast.labels, atol=1e-1))
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
| transformers/tests/models/sam/test_image_processing_sam.py/0 | {
"file_path": "transformers/tests/models/sam/test_image_processing_sam.py",
"repo_id": "transformers",
"token_count": 5907
} | 519 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from transformers import SeamlessM4TFeatureExtractor, SeamlessM4TProcessor
from transformers.models.seamless_m4t import (
SeamlessM4TTokenizer,
SeamlessM4TTokenizerFast,
)
from transformers.testing_utils import require_torch
from .test_feature_extraction_seamless_m4t import floats_list
@require_torch
class SeamlessM4TProcessorTest(unittest.TestCase):
def setUp(self):
self.checkpoint = "facebook/hf-seamless-m4t-medium"
self.tmpdirname = tempfile.mkdtemp()
def get_tokenizer(self, **kwargs):
return SeamlessM4TTokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return SeamlessM4TFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = SeamlessM4TProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
tokenizer_instance = isinstance(processor.tokenizer, (SeamlessM4TTokenizerFast, SeamlessM4TTokenizer))
self.assertTrue(tokenizer_instance)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = SeamlessM4TProcessor(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()
)
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = SeamlessM4TProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
tokenizer_instance = isinstance(processor.tokenizer, (SeamlessM4TTokenizerFast, SeamlessM4TTokenizer))
self.assertTrue(tokenizer_instance)
# Copied from test.models.whisper.test_processing_whisper.WhisperProcessorTest.test_feature_extractor with Whisper->SeamlessM4T
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(audios=raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
# Copied from test.models.whisper.test_processing_whisper.WhisperProcessorTest.test_tokenizer with Whisper->SeamlessM4T
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
# Copied from test.models.whisper.test_processing_whisper.WhisperProcessorTest.test_tokenizer_decode with Whisper->SeamlessM4T
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
| transformers/tests/models/seamless_m4t/test_processing_seamless_m4t.py/0 | {
"file_path": "transformers/tests/models/seamless_m4t/test_processing_seamless_m4t.py",
"repo_id": "transformers",
"token_count": 2018
} | 520 |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import SmolVLMVideoProcessor
class SmolVLMVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"longest_edge": 20}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.max_image_size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"max_image_size": self.max_image_size,
}
def expected_output_video_shape(self, videos):
return [
self.num_frames,
self.num_channels,
self.max_image_size["longest_edge"],
self.max_image_size["longest_edge"],
]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class SmolVLMVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = SmolVLMVideoProcessor if is_torchvision_available() else None
input_name = "pixel_values"
def setUp(self):
super().setUp()
self.video_processor_tester = SmolVLMVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"longest_edge": 20})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42)
self.assertEqual(video_processor.size, {"height": 42, "width": 42})
# overwrite, SmolVLM requires to have metadata no matter how we sample
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
# Force set sampling to False. No sampling is expected even when `num_frames` exists
video_processing.do_sample_frames = False
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 8)
self.assertEqual(encoded_videos_batched.shape[1], 8)
# Set sampling to True. Video frames should be sampled with `num_frames` in the output
video_processing.do_sample_frames = True
metadata = [[{"duration": 2.0, "total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
# Sample with `fps` requires metadata to infer number of frames from total duration
with self.assertRaises(ValueError):
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=6, fps=3)[
self.input_name
]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=6, fps=3)[
self.input_name
]
encoded_videos = video_processing(
video_inputs[0], return_tensors="pt", num_frames=6, fps=3, video_metadata=metadata
)[self.input_name]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", num_frames=6, fps=3, video_metadata=batched_metadata
)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 6)
self.assertEqual(encoded_videos_batched.shape[1], 6)
# We should raise error when asked to sample more frames than there are in input video
with self.assertRaises(ValueError):
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=10, num_frames=20)[
self.input_name
]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", fps=10, num_frames=20)[
self.input_name
]
# Assign back the actual num frames in tester
self.video_processor_tester.num_frames = prev_num_frames
| transformers/tests/models/smolvlm/test_video_processing_smolvlm.py/0 | {
"file_path": "transformers/tests/models/smolvlm/test_video_processing_smolvlm.py",
"repo_id": "transformers",
"token_count": 2982
} | 521 |
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy as np
import pandas as pd
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TapasConfig,
is_torch_available,
)
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
)
from transformers.models.tapas.modeling_tapas import (
IndexMap,
ProductIndexMap,
flatten,
gather,
range_index_map,
reduce_max,
reduce_mean,
reduce_sum,
)
class TapasModelTester:
"""You can also import this e.g from .test_modeling_tapas import TapasModelTester"""
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
max_position_embeddings=512,
type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10],
type_sequence_label_size=2,
positive_weight=10.0,
num_aggregation_labels=4,
num_labels=2,
aggregation_loss_importance=0.8,
use_answer_as_supervision=True,
answer_loss_importance=0.001,
use_normalized_answer_loss=False,
huber_loss_delta=25.0,
temperature=1.0,
agg_temperature=1.0,
use_gumbel_for_cells=False,
use_gumbel_for_agg=False,
average_approximation_function="ratio",
cell_selection_preference=0.5,
answer_loss_cutoff=100,
max_num_rows=64,
max_num_columns=32,
average_logits_per_cell=True,
select_one_column=True,
allow_empty_column_selection=False,
init_cell_selection_weights_to_zero=True,
reset_position_index_per_cell=True,
disable_per_token_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.max_position_embeddings = max_position_embeddings
self.type_vocab_sizes = type_vocab_sizes
self.type_sequence_label_size = type_sequence_label_size
self.positive_weight = positive_weight
self.num_aggregation_labels = num_aggregation_labels
self.num_labels = num_labels
self.aggregation_loss_importance = aggregation_loss_importance
self.use_answer_as_supervision = use_answer_as_supervision
self.answer_loss_importance = answer_loss_importance
self.use_normalized_answer_loss = use_normalized_answer_loss
self.huber_loss_delta = huber_loss_delta
self.temperature = temperature
self.agg_temperature = agg_temperature
self.use_gumbel_for_cells = use_gumbel_for_cells
self.use_gumbel_for_agg = use_gumbel_for_agg
self.average_approximation_function = average_approximation_function
self.cell_selection_preference = cell_selection_preference
self.answer_loss_cutoff = answer_loss_cutoff
self.max_num_rows = max_num_rows
self.max_num_columns = max_num_columns
self.average_logits_per_cell = average_logits_per_cell
self.select_one_column = select_one_column
self.allow_empty_column_selection = allow_empty_column_selection
self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero
self.reset_position_index_per_cell = reset_position_index_per_cell
self.disable_per_token_loss = disable_per_token_loss
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).to(torch_device)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length]).to(torch_device)
token_type_ids = []
for type_vocab_size in self.type_vocab_sizes:
token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size))
token_type_ids = torch.stack(token_type_ids, dim=2).to(torch_device)
sequence_labels = None
token_labels = None
labels = None
numeric_values = None
numeric_values_scale = None
float_answer = None
aggregation_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size).to(torch_device)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels).to(torch_device)
labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2).to(torch_device)
numeric_values = floats_tensor([self.batch_size, self.seq_length]).to(torch_device)
numeric_values_scale = floats_tensor([self.batch_size, self.seq_length]).to(torch_device)
float_answer = floats_tensor([self.batch_size]).to(torch_device)
aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels).to(torch_device)
config = self.get_config()
return (
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
)
def get_config(self):
return TapasConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_sizes=self.type_vocab_sizes,
initializer_range=self.initializer_range,
positive_weight=self.positive_weight,
num_aggregation_labels=self.num_aggregation_labels,
num_labels=self.num_labels,
aggregation_loss_importance=self.aggregation_loss_importance,
use_answer_as_supervision=self.use_answer_as_supervision,
answer_loss_importance=self.answer_loss_importance,
use_normalized_answer_loss=self.use_normalized_answer_loss,
huber_loss_delta=self.huber_loss_delta,
temperature=self.temperature,
agg_temperature=self.agg_temperature,
use_gumbel_for_cells=self.use_gumbel_for_cells,
use_gumbel_for_agg=self.use_gumbel_for_agg,
average_approximation_function=self.average_approximation_function,
cell_selection_preference=self.cell_selection_preference,
answer_loss_cutoff=self.answer_loss_cutoff,
max_num_rows=self.max_num_rows,
max_num_columns=self.max_num_columns,
average_logits_per_cell=self.average_logits_per_cell,
select_one_column=self.select_one_column,
allow_empty_column_selection=self.allow_empty_column_selection,
init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero,
reset_position_index_per_cell=self.reset_position_index_per_cell,
disable_per_token_loss=self.disable_per_token_loss,
)
def create_and_check_model(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
model = TapasModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
model = TapasForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
# inference: without aggregation head (SQA). Model only returns logits
sqa_config = copy.copy(config)
sqa_config.num_aggregation_labels = 0
sqa_config.use_answer_as_supervision = False
model = TapasForQuestionAnswering(config=sqa_config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
# inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits
model = TapasForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
# training: can happen in 3 main ways
# case 1: conversational (SQA)
model = TapasForQuestionAnswering(config=sqa_config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=labels,
)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
# case 2: weak supervision for aggregation (WTQ)
model = TapasForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=labels,
numeric_values=numeric_values,
numeric_values_scale=numeric_values_scale,
float_answer=float_answer,
)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
# case 3: strong supervision for aggregation (WikiSQL-supervised)
wikisql_config = copy.copy(config)
wikisql_config.use_answer_as_supervision = False
model = TapasForQuestionAnswering(config=wikisql_config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=labels,
aggregation_labels=aggregation_labels,
)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
def create_and_check_for_sequence_classification(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
config.num_labels = self.num_labels
model = TapasForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TapasModel,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
)
if is_torch_available()
else None
)
pipeline_model_mapping = (
{
"feature-extraction": TapasModel,
"fill-mask": TapasForMaskedLM,
"table-question-answering": TapasForQuestionAnswering,
"text-classification": TapasForSequenceClassification,
"zero-shot": TapasForSequenceClassification,
}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = True
test_head_masking = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict = {
k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
if isinstance(v, torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
elif model_class in get_values(MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["aggregation_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["numeric_values"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),
dtype=torch.float,
device=torch_device,
)
inputs_dict["numeric_values_scale"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),
dtype=torch.float,
device=torch_device,
)
inputs_dict["float_answer"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.float, device=torch_device
)
elif model_class in [
*get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
*get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING),
]:
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class in [
*get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
*get_values(MODEL_FOR_CAUSAL_LM_MAPPING),
*get_values(MODEL_FOR_MASKED_LM_MAPPING),
*get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
]:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
def setUp(self):
self.model_tester = TapasModelTester(self)
self.config_tester = ConfigTester(self, config_class=TapasConfig, dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
@unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
def test_tf_from_pt_safetensors(self):
pass
def prepare_tapas_single_inputs_for_inference():
# Here we prepare a single table-question pair to test TAPAS inference on:
data = {
"Footballer": ["Lionel Messi", "Cristiano Ronaldo"],
"Age": ["33", "35"],
}
queries = "Which footballer is 33 years old?"
table = pd.DataFrame.from_dict(data)
return table, queries
def prepare_tapas_batch_inputs_for_inference():
# Here we prepare a batch of 2 table-question pairs to test TAPAS inference on:
data = {
"Footballer": ["Lionel Messi", "Cristiano Ronaldo"],
"Age": ["33", "35"],
"Number of goals": ["712", "750"],
}
queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"]
table = pd.DataFrame.from_dict(data)
return table, queries
def prepare_tapas_batch_inputs_for_training():
# Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on:
data = {
"Footballer": ["Lionel Messi", "Cristiano Ronaldo"],
"Age": ["33", "35"],
"Number of goals": ["712", "750"],
}
queries = ["Which footballer is 33 years old?", "What's the total number of goals?"]
table = pd.DataFrame.from_dict(data)
answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]]
answer_text = [["Lionel Messi"], ["1462"]]
float_answer = [float("NaN"), float("1462")]
return table, queries, answer_coordinates, answer_text, float_answer
@require_torch
class TapasModelIntegrationTest(unittest.TestCase):
@cached_property
def default_tokenizer(self):
return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
@slow
def test_inference_no_head(self):
# ideally we want to test this with the weights of tapas_inter_masklm_base_reset,
# but since it's not straightforward to do this with the TF 1 implementation, we test it with
# the weights of the WTQ base model (i.e. tapas_wtq_wikisql_sqa_inter_masklm_base_reset)
model = TapasModel.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device)
tokenizer = self.default_tokenizer
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
# test the sequence output
expected_slice = torch.tensor(
[
[
[-0.141581565, -0.599805772, 0.747186482],
[-0.143664181, -0.602008104, 0.749218345],
[-0.15169853, -0.603363097, 0.741370678],
]
],
device=torch_device,
)
torch.testing.assert_close(outputs.last_hidden_state[:, :3, :3], expected_slice, rtol=0.0005, atol=0.0005)
# test the pooled output
expected_slice = torch.tensor([[0.987518311, -0.970520139, -0.994303405]], device=torch_device)
torch.testing.assert_close(outputs.pooler_output[:, :3], expected_slice, rtol=0.0005, atol=0.0005)
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
pass
# TapasForQuestionAnswering has 3 possible ways of being fine-tuned:
# - conversational set-up (SQA)
# - weak supervision for aggregation (WTQ, WikiSQL)
# - strong supervision for aggregation (WikiSQL-supervised)
# We test all of them:
@slow
def test_inference_question_answering_head_conversational(self):
# note that google/tapas-base-finetuned-sqa should correspond to tapas_sqa_inter_masklm_base_reset
model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa").to(torch_device)
tokenizer = self.default_tokenizer
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((1, 21))
self.assertEqual(logits.shape, expected_shape)
expected_tensor = torch.tensor(
[
[
-9997.22461,
-9997.22461,
-9997.22461,
-9997.22461,
-9997.22461,
-9997.22461,
-9997.22461,
-9997.22461,
-9997.22461,
-16.2628059,
-10004.082,
15.4330549,
15.4330549,
15.4330549,
-9990.42,
-16.3270779,
-16.3270779,
-16.3270779,
-16.3270779,
-16.3270779,
-10004.8506,
]
],
device=torch_device,
)
torch.testing.assert_close(logits, expected_tensor, rtol=0.015, atol=0.015)
@slow
def test_inference_question_answering_head_conversational_absolute_embeddings(self):
# note that google/tapas-small-finetuned-sqa should correspond to tapas_sqa_inter_masklm_small_reset
# however here we test the version with absolute position embeddings
model = TapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa", revision="no_reset").to(
torch_device
)
tokenizer = self.default_tokenizer
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((1, 21))
self.assertEqual(logits.shape, expected_shape)
expected_tensor = torch.tensor(
[
[
-10014.7793,
-10014.7793,
-10014.7793,
-10014.7793,
-10014.7793,
-10014.7793,
-10014.7793,
-10014.7793,
-10014.7793,
-18.8419304,
-10018.0391,
17.7848816,
17.7848816,
17.7848816,
-9981.02832,
-16.4005489,
-16.4005489,
-16.4005489,
-16.4005489,
-16.4005489,
-10013.4736,
]
],
device=torch_device,
)
torch.testing.assert_close(logits, expected_tensor, rtol=0.01, atol=0.01)
@slow
def test_inference_question_answering_head_weak_supervision(self):
# note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset
model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device)
tokenizer = self.default_tokenizer
# let's test on a batch
table, queries = prepare_tapas_batch_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt")
inputs_on_device = {k: v.to(torch_device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs_on_device)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((2, 28))
self.assertEqual(logits.shape, expected_shape)
expected_slice = torch.tensor(
[
[-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736],
[-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677],
],
device=torch_device,
)
torch.testing.assert_close(logits[:, -6:], expected_slice, rtol=0.4, atol=0.4)
# test the aggregation logits
logits_aggregation = outputs.logits_aggregation
expected_shape = torch.Size((2, 4))
self.assertEqual(logits_aggregation.shape, expected_shape)
expected_tensor = torch.tensor(
[[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]],
device=torch_device,
)
torch.testing.assert_close(logits_aggregation, expected_tensor, rtol=0.001, atol=0.001)
# test the predicted answer coordinates and aggregation indices
EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]]
EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1]
predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions(
inputs, outputs.logits.detach().cpu(), outputs.logits_aggregation.detach().cpu()
)
self.assertEqual(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates)
self.assertEqual(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices)
@slow
def test_training_question_answering_head_weak_supervision(self):
# note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset
model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device)
model.to(torch_device)
# normally we should put the model in training mode but it's a pain to do this with the TF 1 implementation
tokenizer = self.default_tokenizer
# let's test on a batch
table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training()
inputs = tokenizer(
table=table,
queries=queries,
answer_coordinates=answer_coordinates,
answer_text=answer_text,
padding="longest",
return_tensors="pt",
)
# prepare data (created by the tokenizer) and move to torch_device
input_ids = inputs["input_ids"].to(torch_device)
attention_mask = inputs["attention_mask"].to(torch_device)
token_type_ids = inputs["token_type_ids"].to(torch_device)
labels = inputs["labels"].to(torch_device)
numeric_values = inputs["numeric_values"].to(torch_device)
numeric_values_scale = inputs["numeric_values_scale"].to(torch_device)
# the answer should be prepared by the user
float_answer = torch.FloatTensor(float_answer).to(torch_device)
# forward pass to get loss + logits:
with torch.no_grad():
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=labels,
numeric_values=numeric_values,
numeric_values_scale=numeric_values_scale,
float_answer=float_answer,
)
# test the loss
loss = outputs.loss
expected_loss = torch.tensor(3.3527612686157227e-08, device=torch_device)
torch.testing.assert_close(loss, expected_loss, rtol=1e-6, atol=1e-6)
# test the logits on the first example
logits = outputs.logits
expected_shape = torch.Size((2, 29))
self.assertEqual(logits.shape, expected_shape)
expected_slice = torch.tensor(
[
-160.0156,
-160.0156,
-160.0156,
-160.0156,
-160.0156,
-10072.2266,
-10070.8896,
-10092.6006,
-10092.6006,
],
device=torch_device,
)
torch.testing.assert_close(logits[0, -9:], expected_slice, rtol=1e-6, atol=1e-6)
# test the aggregation logits on the second example
logits_aggregation = outputs.logits_aggregation
expected_shape = torch.Size((2, 4))
self.assertEqual(logits_aggregation.shape, expected_shape)
expected_slice = torch.tensor([-4.0538, 40.0304, -5.3554, 23.3965], device=torch_device)
torch.testing.assert_close(logits_aggregation[1, -4:], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_question_answering_head_strong_supervision(self):
# note that google/tapas-base-finetuned-wikisql-supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset
model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised").to(
torch_device
)
tokenizer = self.default_tokenizer
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((1, 21))
self.assertEqual(logits.shape, expected_shape)
expected_tensor = torch.tensor(
[
[
-10011.1084,
-10011.1084,
-10011.1084,
-10011.1084,
-10011.1084,
-10011.1084,
-10011.1084,
-10011.1084,
-10011.1084,
-18.6185989,
-10008.7969,
17.6355762,
17.6355762,
17.6355762,
-10002.4404,
-18.7111301,
-18.7111301,
-18.7111301,
-18.7111301,
-18.7111301,
-10007.0977,
]
],
device=torch_device,
)
torch.testing.assert_close(logits, expected_tensor, rtol=0.02, atol=0.02)
# test the aggregation logits
logits_aggregation = outputs.logits_aggregation
expected_shape = torch.Size((1, 4))
self.assertEqual(logits_aggregation.shape, expected_shape)
expected_tensor = torch.tensor(
[[16.5659733, -3.06624889, -2.34152961, -0.970244825]], device=torch_device
) # PyTorch model outputs [[16.5679, -3.0668, -2.3442, -0.9674]]
torch.testing.assert_close(logits_aggregation, expected_tensor, rtol=0.003, atol=0.003)
@slow
def test_inference_classification_head(self):
# note that google/tapas-base-finetuned-tabfact should correspond to tapas_tabfact_inter_masklm_base_reset
model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact").to(torch_device)
tokenizer = self.default_tokenizer
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
# test the classification logits
logits = outputs.logits
expected_shape = torch.Size((1, 2))
self.assertEqual(logits.shape, expected_shape)
expected_tensor = torch.tensor(
[[0.795137286, 9.5572]], device=torch_device
) # Note that the PyTorch model outputs [[0.8057, 9.5281]]
torch.testing.assert_close(outputs.logits, expected_tensor, rtol=0.05, atol=0.05)
@require_torch
class TapasUtilitiesTest(unittest.TestCase):
def _prepare_tables(self):
"""Prepares two tables, both with three distinct rows.
The first table has two columns:
1.0, 2.0 | 3.0
2.0, 0.0 | 1.0
1.0, 3.0 | 4.0
The second table has three columns:
1.0 | 2.0 | 3.0
2.0 | 0.0 | 1.0
1.0 | 3.0 | 4.0
Returns:
SegmentedTensors with the tables.
"""
values = torch.tensor(
[
[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]],
[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]],
]
)
row_index = IndexMap(
indices=torch.tensor(
[
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
]
),
num_segments=3,
batch_dims=1,
)
col_index = IndexMap(
indices=torch.tensor(
[
[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
[[0, 1, 2], [0, 1, 2], [0, 1, 2]],
]
),
num_segments=3,
batch_dims=1,
)
return values, row_index, col_index
def test_product_index(self):
_, row_index, col_index = self._prepare_tables()
cell_index = ProductIndexMap(row_index, col_index)
row_index_proj = cell_index.project_outer(cell_index)
col_index_proj = cell_index.project_inner(cell_index)
ind = cell_index.indices
self.assertEqual(cell_index.num_segments, 9)
# Projections should give back the original indices.
# we use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy())
self.assertEqual(row_index.num_segments, row_index_proj.num_segments)
self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims)
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy())
self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims)
# The first and second "column" are identified in the first table.
for i in range(3):
self.assertEqual(ind[0, i, 0], ind[0, i, 1])
self.assertNotEqual(ind[0, i, 0], ind[0, i, 2])
# All rows are distinct in the first table.
for i, i_2 in zip(range(3), range(3)):
for j, j_2 in zip(range(3), range(3)):
if i != i_2 and j != j_2:
self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2])
# All cells are distinct in the second table.
for i, i_2 in zip(range(3), range(3)):
for j, j_2 in zip(range(3), range(3)):
if i != i_2 or j != j_2:
self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2])
def test_flatten(self):
_, row_index, col_index = self._prepare_tables()
row_index_flat = flatten(row_index)
col_index_flat = flatten(col_index)
shape = [3, 4, 5]
batched_index = IndexMap(indices=torch.zeros(shape).type(torch.LongTensor), num_segments=1, batch_dims=3)
batched_index_flat = flatten(batched_index)
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(
row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]
)
np.testing.assert_array_equal(
col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5]
)
self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape))
np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape)))
def test_range_index_map(self):
batch_shape = [3, 4]
num_segments = 5
index = range_index_map(batch_shape, num_segments)
self.assertEqual(num_segments, index.num_segments)
self.assertEqual(2, index.batch_dims)
indices = index.indices
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(list(indices.size()), [3, 4, 5])
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments))
def test_reduce_sum(self):
values, row_index, col_index = self._prepare_tables()
cell_index = ProductIndexMap(row_index, col_index)
row_sum, _ = reduce_sum(values, row_index)
col_sum, _ = reduce_sum(values, col_index)
cell_sum, _ = reduce_sum(values, cell_index)
# We use np.testing.assert_allclose rather than Tensorflow's assertAllClose
np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]])
np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]])
np.testing.assert_allclose(
cell_sum.numpy(),
[[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]],
)
def test_reduce_mean(self):
values, row_index, col_index = self._prepare_tables()
cell_index = ProductIndexMap(row_index, col_index)
row_mean, _ = reduce_mean(values, row_index)
col_mean, _ = reduce_mean(values, col_index)
cell_mean, _ = reduce_mean(values, cell_index)
# We use np.testing.assert_allclose rather than Tensorflow's assertAllClose
np.testing.assert_allclose(
row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]]
)
np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]])
np.testing.assert_allclose(
cell_mean.numpy(),
[
[3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0],
[1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0],
],
)
def test_reduce_max(self):
values = torch.as_tensor([2.0, 1.0, 0.0, 3.0])
index = IndexMap(indices=torch.as_tensor([0, 1, 0, 1]), num_segments=2)
maximum, _ = reduce_max(values, index)
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(maximum.numpy(), [2, 3])
def test_reduce_sum_vectorized(self):
values = torch.as_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]])
index = IndexMap(indices=torch.as_tensor([[0, 0, 1]]), num_segments=2, batch_dims=0)
sums, new_index = reduce_sum(values, index)
# We use np.testing.assert_allclose rather than Tensorflow's assertAllClose
np.testing.assert_allclose(sums.numpy(), [3.0, 3.0])
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1])
np.testing.assert_array_equal(new_index.num_segments.numpy(), 2)
np.testing.assert_array_equal(new_index.batch_dims, 0)
def test_gather(self):
values, row_index, col_index = self._prepare_tables()
cell_index = ProductIndexMap(row_index, col_index)
# Compute sums and then gather. The result should have the same shape as
# the original table and each element should contain the sum the values in
# its cell.
sums, _ = reduce_sum(values, cell_index)
cell_sum = gather(sums, cell_index)
assert cell_sum.size() == values.size()
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_allclose(
cell_sum.numpy(),
[[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]],
)
def test_gather_vectorized(self):
values = torch.as_tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
index = IndexMap(indices=torch.as_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1)
result = gather(values, index)
# We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual
np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
| transformers/tests/models/tapas/test_modeling_tapas.py/0 | {
"file_path": "transformers/tests/models/tapas/test_modeling_tapas.py",
"repo_id": "transformers",
"token_count": 22114
} | 522 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "FacebookAI/xlm-mlm-en-2048"
tokenizer_class = XLMTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(cls.vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(cls.merges_file, "w") as fp:
fp.write("\n".join(merges))
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
tokenizer = XLMTokenizer(self.vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_2 + [1]
| transformers/tests/models/xlm/test_tokenization_xlm.py/0 | {
"file_path": "transformers/tests/models/xlm/test_tokenization_xlm.py",
"repo_id": "transformers",
"token_count": 1545
} | 523 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datasets
from huggingface_hub import DepthEstimationOutput
from huggingface_hub.utils import insecure_hashlib
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
compare_pipeline_output_to_hub_spec,
is_pipeline_test,
nested_simplify,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
def hashimage(image: Image) -> str:
m = insecure_hashlib.md5(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class DepthEstimationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_dataset = None
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls._dataset is None:
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
cls._dataset = datasets.load_dataset(
"hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1"
)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
depth_estimator = DepthEstimationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def run_pipeline_test(self, depth_estimator, examples):
self._load_dataset()
outputs = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png")
self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs)
outputs = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
self._dataset[0]["image"],
# LA
self._dataset[1]["image"],
# L
self._dataset[2]["image"],
]
)
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
],
outputs,
)
for single_output in outputs:
compare_pipeline_output_to_hub_spec(single_output, DepthEstimationOutput)
@slow
@require_torch
def test_large_model_pt(self):
model_id = "Intel/dpt-large"
depth_estimator = pipeline("depth-estimation", model=model_id)
outputs = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
outputs["depth"] = hashimage(outputs["depth"])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()), 29.306)
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()), 2.662)
@require_torch
def test_small_model_pt(self):
# This is highly irregular to have no small tests.
self.skipTest(reason="There is not hf-internal-testing tiny model for either GLPN nor DPT")
@require_torch
def test_multiprocess(self):
depth_estimator = pipeline(
model="hf-internal-testing/tiny-random-DepthAnythingForDepthEstimation",
num_workers=2,
)
outputs = depth_estimator(
[
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
)
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
],
outputs,
)
| transformers/tests/pipelines/test_pipelines_depth_estimation.py/0 | {
"file_path": "transformers/tests/pipelines/test_pipelines_depth_estimation.py",
"repo_id": "transformers",
"token_count": 2529
} | 524 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_torch,
require_torch_bf16,
require_torch_fp16,
slow,
torch_device,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
# These 2 model types require different inputs than those of the usual text models.
_TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class TextClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if not hasattr(tf_model_mapping, "is_dummy"):
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def test_small_model_pt(self):
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
outputs = text_classifier("This is great !", top_k=2)
self.assertEqual(
nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]
)
outputs = text_classifier(["This is great !", "This is bad"], top_k=2)
self.assertEqual(
nested_simplify(outputs),
[
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
],
)
outputs = text_classifier("This is great !", top_k=1)
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
# Legacy behavior
outputs = text_classifier("This is great !", return_all_scores=False)
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
outputs = text_classifier("This is great !", return_all_scores=True)
self.assertEqual(
nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]]
)
outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True)
self.assertEqual(
nested_simplify(outputs),
[
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
],
)
outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False)
self.assertEqual(
nested_simplify(outputs),
[
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
],
)
# Do not apply any function to output for regression tasks
# hack: changing problem_type artificially (so keep this test at last)
text_classifier.model.config.problem_type = "regression"
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.01}])
@require_torch
def test_accepts_torch_device(self):
text_classifier = pipeline(
task="text-classification",
model="hf-internal-testing/tiny-random-distilbert",
framework="pt",
device=torch_device,
)
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
@require_torch_fp16
def test_accepts_torch_fp16(self):
text_classifier = pipeline(
task="text-classification",
model="hf-internal-testing/tiny-random-distilbert",
framework="pt",
device=torch_device,
dtype=torch.float16,
)
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
@require_torch_bf16
def test_accepts_torch_bf16(self):
text_classifier = pipeline(
task="text-classification",
model="hf-internal-testing/tiny-random-distilbert",
framework="pt",
device=torch_device,
dtype=torch.bfloat16,
)
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
@slow
@require_torch
def test_pt_bert(self):
text_classifier = pipeline("text-classification")
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}])
outputs = text_classifier("This is bad !")
self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}])
outputs = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}])
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
text_classifier = TextClassificationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
return text_classifier, ["HuggingFace is in", "This is another test"]
def run_pipeline_test(self, text_classifier, _):
model = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
valid_inputs = "HuggingFace is in"
outputs = text_classifier(valid_inputs)
self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}])
self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
valid_inputs = ["HuggingFace is in ", "Paris is in France"]
outputs = text_classifier(valid_inputs)
self.assertEqual(
nested_simplify(outputs),
[{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}],
)
self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
self.assertTrue(outputs[1]["label"] in model.config.id2label.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
outputs = text_classifier(valid_inputs, top_k=None)
N = len(model.config.id2label.values())
self.assertEqual(
nested_simplify(outputs),
[[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N],
)
valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
outputs = text_classifier(valid_inputs)
self.assertEqual(
nested_simplify(outputs),
{"label": ANY(str), "score": ANY(float)},
)
self.assertTrue(outputs["label"] in model.config.id2label.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
invalid_input = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(ValueError):
text_classifier(invalid_input)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]])
self.assertEqual(
nested_simplify(outputs),
[{"label": ANY(str), "score": ANY(float)}],
)
self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
| transformers/tests/pipelines/test_pipelines_text_classification.py/0 | {
"file_path": "transformers/tests/pipelines/test_pipelines_text_classification.py",
"repo_id": "transformers",
"token_count": 4030
} | 525 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from transformers import AutoModelForCausalLM, AutoRoundConfig, AutoTokenizer
from transformers.testing_utils import (
backend_empty_cache,
backend_synchronize,
require_accelerate,
require_auto_round,
require_intel_extension_for_pytorch,
require_torch_accelerator,
require_torch_gpu,
require_torch_multi_accelerator,
slow,
torch_device,
)
from transformers.utils import is_torch_available
if is_torch_available():
import torch
@slow
@require_torch_accelerator
@require_auto_round
@require_accelerate
class AutoRoundTest(unittest.TestCase):
model_name = "OPEA/Qwen2.5-1.5B-Instruct-int4-sym-inc"
input_text = "There is a girl who likes adventure,"
EXPECTED_OUTPUTS = set()
## Different backends may produce slight variations in output
EXPECTED_OUTPUTS.add(
"There is a girl who likes adventure, and she has been exploring the world "
"for many years. She travels to different countries and cultures, trying new "
"things every day. One of her favorite places to visit is a small village in "
"the mountains where"
)
EXPECTED_OUTPUTS.add(
"There is a girl who likes adventure, and she has been exploring the world for many years. She has visited every country in Europe and has even traveled to some of the most remote parts of Africa. She enjoys hiking through the mountains and discovering"
)
EXPECTED_OUTPUTS.add(
"There is a girl who likes adventure, and she has been exploring the world for many years. She has visited every country in Europe and has even traveled to some of the most remote parts of Africa. She has also climbed mountains and explored caves"
)
device_map = torch_device
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
backend_synchronize(torch_device)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name, device_map=cls.device_map, dtype=torch.float16
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False)
self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_raise_if_non_quantized(self):
model_id = "facebook/opt-125m"
quantization_config = AutoRoundConfig(bits=4)
with self.assertRaises(ValueError):
_ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
def test_quantized_model_bf16(self):
"""
Simple test that checks if the quantized model is working properly with bf16
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = AutoRoundConfig(backend="triton")
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
dtype=torch.bfloat16,
device_map=self.device_map,
quantization_config=quantization_config,
)
output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False)
self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
@require_intel_extension_for_pytorch
def test_quantized_model_on_cpu(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt")
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, dtype="auto")
output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False)
self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
## some backends like marlin/ipex will repack the weight that caused the weight shape changed
with tempfile.TemporaryDirectory() as tmpdirname:
quantization_config = AutoRoundConfig(backend="triton")
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=self.device_map,
dtype=torch.float16,
quantization_config=quantization_config,
)
quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=40, do_sample=False)
output_tokens = self.tokenizer.decode(output[0], skip_special_tokens=True)
self.assertIn(output_tokens, self.EXPECTED_OUTPUTS)
@require_torch_multi_accelerator
def test_quantized_model_multi_accelerator(self):
"""
Simple test that checks if the quantized model is working properly with multiple accelerators
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = AutoRoundConfig(backend="triton")
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config, dtype="auto"
)
output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False)
self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_convert_from_gptq(self):
"""
Simple test that checks if auto-round work properly with gptq format
"""
model_name = "ybelkada/opt-125m-gptq-4bit"
quantization_config = AutoRoundConfig()
model = AutoModelForCausalLM.from_pretrained(
model_name, device_map=torch_device, quantization_config=quantization_config, dtype="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
text = "There is a girl who likes adventure,"
inputs = tokenizer(text, return_tensors="pt").to(model.device)
tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0])
@require_intel_extension_for_pytorch
def test_convert_from_awq_cpu(self):
"""
Simple test that checks if auto-round work properly with awq format
"""
model_name = "casperhansen/opt-125m-awq"
quantization_config = AutoRoundConfig()
model = AutoModelForCausalLM.from_pretrained(
model_name, device_map="cpu", quantization_config=quantization_config, dtype="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
text = "There is a girl who likes adventure,"
inputs = tokenizer(text, return_tensors="pt").to(model.device)
tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0])
@require_torch_gpu
def test_mixed_bits(self):
"""
Simple test that checks if auto-round work properly with mixed bits
"""
model_name = "facebook/opt-125m"
model = AutoModelForCausalLM.from_pretrained(model_name, dtype="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)
layer_config = {
"model.decoder.layers.0.self_attn.k_proj": {"bits": 8},
"model.decoder.layers.6.self_attn.out_proj": {"bits": 2, "group_size": 32},
}
bits, group_size, sym = 4, 128, True
from auto_round import AutoRound
autoround = AutoRound(model, tokenizer, bits=bits, group_size=group_size, sym=sym, layer_config=layer_config)
with tempfile.TemporaryDirectory() as tmpdirname:
autoround.quantize_and_save(output_dir=tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, dtype=torch.float16, device_map=torch_device)
text = "There is a girl who likes adventure,"
inputs = tokenizer(text, return_tensors="pt").to(model.device)
tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0])
| transformers/tests/quantization/autoround/test_auto_round.py/0 | {
"file_path": "transformers/tests/quantization/autoround/test_auto_round.py",
"repo_id": "transformers",
"token_count": 3710
} | 526 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
import pytest
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, SpQRConfig, StaticCache
from transformers.testing_utils import (
backend_empty_cache,
require_accelerate,
require_spqr,
require_torch_gpu,
require_torch_multi_gpu,
slow,
torch_device,
)
from transformers.utils import is_accelerate_available, is_torch_available
if is_torch_available():
import torch
if is_accelerate_available():
from accelerate import init_empty_weights
@require_torch_gpu
class SpQRConfigTest(unittest.TestCase):
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = SpQRConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {
"beta1": 16,
"beta2": 16,
"bits": 3,
"modules_to_not_convert": ["lm_head.weight"],
"shapes": {"model.layers.0.self_attn.q_proj.dense_weights.shape": 16},
}
quantization_config = SpQRConfig.from_dict(dict)
self.assertEqual(dict["beta1"], quantization_config.beta1)
self.assertEqual(dict["beta2"], quantization_config.beta2)
self.assertEqual(dict["bits"], quantization_config.bits)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["shapes"], quantization_config.shapes)
@slow
@require_torch_gpu
@require_spqr
@require_accelerate
class SpQRTest(unittest.TestCase):
model_name = "elvircrn/Llama-2-7b-SPQR-3Bit-16x16-red_pajama-hf"
input_text = "Hello my name is"
max_new_tokens = 32
EXPECTED_OUTPUT = (
"Hello my name is Jesse. (I'm also known as Jesse) I'm a 25 year old male from United States. I'm looking for"
)
EXPECTED_OUTPUT_COMPILE = "Hello my name is Jake and I am a 20 year old student at the University of North Texas. (Go Mean Green!) I am a huge fan of the Dallas"
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
device_map=torch_device,
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from spqr_quant import QuantizedLinear
from transformers.integrations import replace_with_spqr_linear
model_id = "meta-llama/Llama-2-7b-hf"
config = AutoConfig.from_pretrained(model_id)
quantization_config = AutoConfig.from_pretrained(self.model_name, return_dict=False).quantization_config
quantization_config = SpQRConfig.from_dict(quantization_config)
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id, config=config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model, _ = replace_with_spqr_linear(
model,
quantization_config=quantization_config,
modules_to_not_convert=quantization_config.modules_to_not_convert,
)
nb_spqr_linear = 0
for module in model.modules():
if isinstance(module, QuantizedLinear):
nb_spqr_linear += 1
self.assertEqual(nb_linears - 1, nb_spqr_linear)
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_raise_if_non_quantized(self):
model_id = "meta-llama/Llama-2-7b-hf"
quantization_config = SpQRConfig()
with self.assertRaises(ValueError):
_ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
@unittest.skip
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto")
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@pytest.mark.torch_compile_test
def test_quantized_model_compile(self):
"""
Simple test that checks if the quantized model is working properly
"""
# Sample tokens greedily
def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values):
logits = model(
cur_token,
position_ids=input_pos,
cache_position=cache_position,
past_key_values=past_key_values,
return_dict=False,
use_cache=True,
)[0]
new_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int)
return new_token
# Tokenize the test input
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)["input_ids"]
seq_length = input_ids.shape[1]
# Setup static KV cache for generation
past_key_values = StaticCache(
config=self.quantized_model.config, max_cache_len=seq_length + self.max_new_tokens + 1
)
# Allocate token ids to be generated and copy prefix ids
cache_position = torch.arange(seq_length, device=torch_device)
generated_ids = torch.zeros(1, seq_length + self.max_new_tokens, dtype=torch.int, device=torch_device)
generated_ids[:, cache_position] = input_ids.to(torch_device).to(torch.int)
# Do a forward pass to fill the prefix cache and compile the kernels if necessary
logits = self.quantized_model(
input_ids,
cache_position=cache_position,
past_key_values=past_key_values,
return_dict=False,
use_cache=True,
)[0]
next_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int)
generated_ids[:, [seq_length]] = next_token
with torch.no_grad():
# Compile the CUDA graph
decode_one_tokens = torch.compile(decode_one_tokens, mode="default", backend="inductor", fullgraph=True)
# Generate tokens one by one
cache_position = torch.tensor([seq_length + 1], device=torch_device)
for _ in range(1, self.max_new_tokens):
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True):
next_token = decode_one_tokens(
self.quantized_model, next_token.clone(), None, cache_position, past_key_values
)
generated_ids.index_copy_(1, cache_position, next_token)
cache_position += 1
# Check generated text
self.assertEqual(
self.tokenizer.decode(generated_ids[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_COMPILE
)
| transformers/tests/quantization/spqr_integration/test_spqr.py/0 | {
"file_path": "transformers/tests/quantization/spqr_integration/test_spqr.py",
"repo_id": "transformers",
"token_count": 4089
} | 527 |
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
import itertools
import json
import os
import pickle
import re
import shutil
import tempfile
import traceback
import unittest
from collections import OrderedDict
from functools import lru_cache
from itertools import takewhile
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, Union
from parameterized import parameterized
from transformers import (
AlbertTokenizer,
AlbertTokenizerFast,
BertTokenizer,
BertTokenizerFast,
PreTrainedTokenizer,
PreTrainedTokenizerBase,
PreTrainedTokenizerFast,
SpecialTokensMixin,
Trainer,
TrainingArguments,
is_torch_available,
logging,
)
from transformers.testing_utils import (
check_json_file_has_correct_format,
get_tests_dir,
require_jinja,
require_read_token,
require_tokenizers,
require_torch,
run_test_in_subprocess,
slow,
)
from transformers.tokenization_utils import AddedToken
if is_torch_available():
import torch
import torch.nn as nn
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel, TFPreTrainedModel
def use_cache_if_possible(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
use_cache = kwargs.pop("use_cache", True)
underline_func = func
if "functools" in str(func):
underline_func = func.__wrapped__
if not use_cache:
return underline_func(*args, **kwargs)
if any(not arg.__hash__ for arg in args):
return underline_func(*args, **kwargs)
elif any(not kwarg.__hash__ for kwarg in kwargs.values()):
return underline_func(*args, **kwargs)
cached = func(*args, **kwargs)
copied = copy.deepcopy(cached)
if hasattr(copied, "_tokenizer") and "tests.models.clip.test_tokenization_clip.CLIPTokenizationTest" in str(
args[0]
):
copied._tokenizer = cached._tokenizer
if hasattr(copied, "sp_model"):
copied.sp_model = cached.sp_model
return copied
return wrapper
logger = logging.get_logger(__name__)
NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"]
SMALL_TRAINING_CORPUS = [
["This is the first sentence.", "This is the second one."],
["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."],
]
def filter_non_english(_, pretrained_name: str):
"""Filter all the model for non-english language"""
return not any(lang in pretrained_name for lang in NON_ENGLISH_TAGS)
def filter_roberta_detectors(_, pretrained_name: str):
return "detector" not in pretrained_name
def merge_model_tokenizer_mappings(
model_mapping: dict["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]],
tokenizer_mapping: dict["PretrainedConfig", tuple["PreTrainedTokenizer", "PreTrainedTokenizerFast"]],
) -> dict[
Union["PreTrainedTokenizer", "PreTrainedTokenizerFast"],
tuple["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]],
]:
configurations = list(model_mapping.keys())
model_tokenizer_mapping = OrderedDict([])
for configuration in configurations:
if configuration in model_mapping and configuration in tokenizer_mapping:
model = model_mapping[configuration]
tokenizer = tokenizer_mapping[configuration][0]
tokenizer_fast = tokenizer_mapping[configuration][1]
if tokenizer is not None:
if configuration.__name__.startswith(tokenizer.__name__.replace("Tokenizer", "")):
model_tokenizer_mapping.update({tokenizer: (configuration, model)})
if tokenizer_fast is not None:
if configuration.__name__.startswith(tokenizer_fast.__name__.replace("TokenizerFast", "")):
model_tokenizer_mapping.update({tokenizer_fast: (configuration, model)})
return model_tokenizer_mapping
def _test_subword_regularization_tokenizer(in_queue, out_queue, timeout):
error = None
try:
inputs = in_queue.get(timeout=timeout)
tokenizer = inputs["tokenizer"]
sp_model_kwargs = inputs["sp_model_kwargs"]
test_sentencepiece_ignore_case = inputs["test_sentencepiece_ignore_case"]
unittest.TestCase().assertTrue(hasattr(tokenizer, "sp_model_kwargs"))
unittest.TestCase().assertIsNotNone(tokenizer.sp_model_kwargs)
unittest.TestCase().assertTrue(isinstance(tokenizer.sp_model_kwargs, dict))
unittest.TestCase().assertDictEqual(tokenizer.sp_model_kwargs, sp_model_kwargs)
check_subword_sampling(tokenizer, test_sentencepiece_ignore_case=test_sentencepiece_ignore_case)
except Exception:
error = f"{traceback.format_exc()}"
results = {"error": error}
out_queue.put(results, timeout=timeout)
out_queue.join()
def check_subword_sampling(
tokenizer: PreTrainedTokenizer,
text: Optional[str] = None,
test_sentencepiece_ignore_case: bool = True,
) -> None:
"""
Check if the tokenizer generates different results when subword regularization is enabled.
Subword regularization augments training data with subword sampling.
This has a random component.
Args:
tokenizer: The tokenizer to check.
text: The text to use for the checks.
test_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`.
"""
text = "This is a test for subword regularization." if text is None else text
if test_sentencepiece_ignore_case:
text = text.lower()
tokens_list = []
for _ in range(5):
tokens_list.append(tokenizer.tokenize(text))
# the list of different pairs of tokens_list
combinations = itertools.combinations(tokens_list, 2)
# check of sampling is done
subword_sampling_found = False
for combination in combinations:
if combination[0] != combination[1]:
subword_sampling_found = True
unittest.TestCase().assertTrue(subword_sampling_found)
# check if converting back to original text works
for tokens in tokens_list:
if test_sentencepiece_ignore_case:
unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower())
else:
unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens))
class TokenizerTesterMixin:
tokenizer_class = None
rust_tokenizer_class = None
test_slow_tokenizer = True
test_rust_tokenizer = True
space_between_special_tokens = False
from_pretrained_kwargs = None
from_pretrained_filter = None
from_pretrained_id = None
from_pretrained_vocab_key = "vocab_file"
test_seq2seq = True
# set to True to test a sentencepiece tokenizer
test_sentencepiece = False
# set to True to ignore casing when testing a sentencepiece tokenizer
# test_sentencepiece must also be set to True
test_sentencepiece_ignore_case = False
@classmethod
def setUpClass(cls) -> None:
# Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
# information available in Tokenizer (name, rust class, python class, vocab key name)
cls.from_pretrained_id = (
[cls.from_pretrained_id] if isinstance(cls.from_pretrained_id, str) else cls.from_pretrained_id
)
cls.tokenizers_list = []
if cls.test_rust_tokenizer:
cls.tokenizers_list = [
(
cls.rust_tokenizer_class,
pretrained_id,
cls.from_pretrained_kwargs if cls.from_pretrained_kwargs is not None else {},
)
for pretrained_id in cls.from_pretrained_id
]
else:
cls.tokenizers_list = []
with open(f"{get_tests_dir()}/fixtures/sample_text.txt", encoding="utf-8") as f_data:
cls._data = f_data.read().replace("\n\n", "\n").strip()
cls.tmpdirname = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def get_input_output_texts(self, tokenizer):
input_txt = self.get_clean_sequence(tokenizer)[0]
return input_txt, input_txt
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> tuple[str, list]:
# the length of the tokenizer does not always represent the tokens that it can encode: what if there are holes?
toks = [
(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in set(tokenizer.get_vocab().values())
]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return output_txt, output_ids
def get_tokenizers(self, fast=True, **kwargs) -> list[PreTrainedTokenizerBase]:
if fast and self.test_rust_tokenizer and self.test_slow_tokenizer:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
elif fast and self.test_rust_tokenizer:
return [self.get_rust_tokenizer(**kwargs)]
elif self.test_slow_tokenizer:
return [self.get_tokenizer(**kwargs)]
else:
raise ValueError("This tokenizer class has no tokenizer to be tested.")
@classmethod
@use_cache_if_possible
@lru_cache(maxsize=64)
def get_tokenizer(cls, pretrained_name=None, **kwargs) -> PreTrainedTokenizer:
pretrained_name = pretrained_name or cls.tmpdirname
return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
@classmethod
@use_cache_if_possible
@lru_cache(maxsize=64)
def get_rust_tokenizer(cls, pretrained_name=None, **kwargs) -> PreTrainedTokenizerFast:
pretrained_name = pretrained_name or cls.tmpdirname
return cls.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
def tokenizer_integration_test_util(
self,
expected_encoding: dict,
model_name: str,
revision: Optional[str] = None,
sequences: Optional[list[str]] = None,
decode_kwargs: Optional[dict[str, Any]] = None,
padding: bool = True,
):
"""
Util for integration test.
Text is tokenized and then reverted back to text. Both results are then checked.
Args:
expected_encoding:
The expected result of the tokenizer output.
model_name:
The model name of the tokenizer to load and use.
revision:
The full git revision number of the model. This is to pin the
tokenizer config and to avoid that tests start to fail if the
config gets changed upstream.
sequences:
Can overwrite the texts that are used to check the tokenizer.
This is useful if the tokenizer supports non english languages
like france.
decode_kwargs:
Additional args for the ``decode`` function which reverts the
tokenized text back to a string.
padding:
Activates and controls padding of the tokenizer.
"""
decode_kwargs = {} if decode_kwargs is None else decode_kwargs
if sequences is None:
sequences = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained "
"models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
if self.test_sentencepiece_ignore_case:
sequences = [sequence.lower() for sequence in sequences]
tokenizer_classes = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
tokenizer = tokenizer_class.from_pretrained(
model_name,
revision=revision, # to pin the tokenizer version
)
encoding = tokenizer(sequences, padding=padding)
decoded_sequences = [
tokenizer.decode(seq, skip_special_tokens=True, **decode_kwargs) for seq in encoding["input_ids"]
]
encoding_data = encoding.data
self.assertDictEqual(encoding_data, expected_encoding)
for expected, decoded in zip(sequences, decoded_sequences):
if self.test_sentencepiece_ignore_case:
expected = expected.lower()
self.assertEqual(expected, decoded)
def assert_padded_input_match(self, input_r: list, input_p: list, max_length: int, pad_token_id: int):
# Ensure we match max_length
self.assertEqual(len(input_r), max_length)
self.assertEqual(len(input_p), max_length)
# Ensure the number of padded tokens is the same
padded_tokens_r = list(takewhile(lambda i: i == pad_token_id, reversed(input_r)))
padded_tokens_p = list(takewhile(lambda i: i == pad_token_id, reversed(input_p)))
self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
def assert_batch_padded_input_match(
self,
input_r: dict,
input_p: dict,
max_length: int,
pad_token_id: int,
model_main_input_name: str = "input_ids",
):
for i_r in input_r.values():
(
self.assertEqual(len(i_r), 2),
self.assertEqual(len(i_r[0]), max_length),
self.assertEqual(len(i_r[1]), max_length),
)
(
self.assertEqual(len(i_r), 2),
self.assertEqual(len(i_r[0]), max_length),
self.assertEqual(len(i_r[1]), max_length),
)
for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]):
self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id)
for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]):
self.assertSequenceEqual(i_r, i_p)
@staticmethod
def convert_batch_encode_plus_format_to_encode_plus(batch_encode_plus_sequences):
# Switch from batch_encode_plus format: {'input_ids': [[...], [...]], ...}
# to the list of examples/ encode_plus format: [{'input_ids': [...], ...}, {'input_ids': [...], ...}]
return [
{value: batch_encode_plus_sequences[value][i] for value in batch_encode_plus_sequences}
for i in range(len(batch_encode_plus_sequences["input_ids"]))
]
# TODO: this test can be combined with `test_sentencepiece_tokenize_and_convert_tokens_to_string` after the latter is extended to all tokenizers.
def test_tokenize_special_tokens(self):
"""Test `tokenize` with special tokens."""
tokenizers = self.get_tokenizers(fast=True, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
SPECIAL_TOKEN_1 = "[SPECIAL_TOKEN_1]"
SPECIAL_TOKEN_2 = "[SPECIAL_TOKEN_2]"
# Both methods should add the token to `_additional_special_tokens` and `added_tokens_decoder`
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True)
tokenizer.add_special_tokens(
{"additional_special_tokens": [SPECIAL_TOKEN_2]}, replace_additional_special_tokens=False
)
token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1)
token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2)
self.assertEqual(len(token_1), 1)
self.assertEqual(len(token_2), 1)
self.assertEqual(token_1[0], SPECIAL_TOKEN_1)
# next is failing for almost all the Fast tokenizers now.
# self.assertEqual(token_2[0], SPECIAL_TOKEN_2)
# TODO: this test could be extended to all tokenizers - not just the sentencepiece
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
"""Test ``_tokenize`` and ``convert_tokens_to_string``."""
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
tokenizer = self.get_tokenizer()
text = "This is text to test the tokenizer."
if self.test_sentencepiece_ignore_case:
text = text.lower()
tokens = tokenizer.tokenize(text)
self.assertTrue(len(tokens) > 0)
# check if converting back to original text works
reverse_text = tokenizer.convert_tokens_to_string(tokens)
if self.test_sentencepiece_ignore_case:
reverse_text = reverse_text.lower()
self.assertEqual(reverse_text, text)
special_tokens = tokenizer.all_special_tokens
special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens)
for special_token in special_tokens:
self.assertIn(special_token, special_tokens_string)
if self.test_rust_tokenizer:
rust_tokenizer = self.get_rust_tokenizer()
special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens)
self.assertEqual(special_tokens_string, special_tokens_string_rust)
def test_sentencepiece_tokenize_and_decode(self):
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
text = "This is text to test the tokenizer."
if self.test_rust_tokenizer:
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
slow_ids = tokenizer(text).input_ids
fast_ids = rust_tokenizer(text).input_ids
self.assertEqual(slow_ids, fast_ids)
slow_decoded = tokenizer.decode(slow_ids)
fast_decoded = rust_tokenizer.decode(slow_ids)
self.assertEqual(slow_decoded, fast_decoded)
def test_subword_regularization_tokenizer(self) -> None:
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
# Subword regularization is only available for the slow tokenizer.
sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1}
tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs)
run_test_in_subprocess(
test_case=self,
target_func=_test_subword_regularization_tokenizer,
inputs={
"tokenizer": tokenizer,
"sp_model_kwargs": sp_model_kwargs,
"test_sentencepiece_ignore_case": self.test_sentencepiece_ignore_case,
},
)
def test_pickle_subword_regularization_tokenizer(self) -> None:
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
"""Google pickle __getstate__ __setstate__ if you are struggling with this."""
# Subword regularization is only available for the slow tokenizer.
sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1}
tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs)
tokenizer_bin = pickle.dumps(tokenizer)
del tokenizer
tokenizer_new = pickle.loads(tokenizer_bin)
run_test_in_subprocess(
test_case=self,
target_func=_test_subword_regularization_tokenizer,
inputs={
"tokenizer": tokenizer_new,
"sp_model_kwargs": sp_model_kwargs,
"test_sentencepiece_ignore_case": self.test_sentencepiece_ignore_case,
},
)
def test_save_sentencepiece_tokenizer(self) -> None:
if not self.test_sentencepiece or not self.test_slow_tokenizer:
self.skipTest(reason="test_sentencepiece or test_slow_tokenizer is set to False")
# We want to verify that we will be able to save the tokenizer even if the original files that were used to
# build the tokenizer have been deleted in the meantime.
text = "This is text to test the tokenizer."
tokenizer_slow_1 = self.get_tokenizer()
encoding_tokenizer_slow_1 = tokenizer_slow_1(text)
tmpdirname_1 = tempfile.mkdtemp()
tmpdirname_2 = tempfile.mkdtemp()
tokenizer_slow_1.save_pretrained(tmpdirname_1)
tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1)
encoding_tokenizer_slow_2 = tokenizer_slow_2(text)
shutil.rmtree(tmpdirname_1)
tokenizer_slow_2.save_pretrained(tmpdirname_2)
tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2)
encoding_tokenizer_slow_3 = tokenizer_slow_3(text)
shutil.rmtree(tmpdirname_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3)
def test_model_input_names_signature(self):
accepted_model_main_input_names = [
"input_ids", # nlp models
"input_values", # speech models
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
# first name of model_input_names has to correspond to main model input name
# to make sure `tokenizer.pad(...)` works correctly
self.assertTrue(tokenizer.model_input_names[0] in accepted_model_main_input_names)
def test_rust_tokenizer_signature(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
signature = inspect.signature(self.rust_tokenizer_class.__init__)
self.assertIn("tokenizer_file", signature.parameters)
self.assertIsNone(signature.parameters["tokenizer_file"].default)
def test_tokenizer_slow_store_full_signature(self):
if not self.test_slow_tokenizer:
self.skipTest(reason="test_slow_tokenizer is set to False")
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_tokenizer_fast_store_full_signature(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
signature = inspect.signature(self.rust_tokenizer_class.__init__)
tokenizer = self.get_rust_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty and parameter_name not in [
"vocab_file",
"merges_file",
"tokenizer_file",
]:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence, _ = self.get_input_output_texts(tokenizer)
# We don't have an exact equivalence on `tokenize()` between Rust and Slow
# Slow tokenizer only split tokens, Rust tokenizers will replace with <unk>
# tokens = tokenizer.tokenize(sequence)
# rust_tokens = rust_tokenizer.tokenize(sequence)
# self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(sequence, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenizers_common_properties(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
attributes_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
self.assertTrue(hasattr(tokenizer, attr + "_id"))
self.assertTrue(hasattr(tokenizer, "additional_special_tokens"))
self.assertTrue(hasattr(tokenizer, "additional_special_tokens_ids"))
attributes_list = [
"model_max_length",
"init_inputs",
"init_kwargs",
]
if not isinstance(tokenizer, PreTrainedTokenizerFast):
attributes_list += [
"added_tokens_encoder",
"added_tokens_decoder",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
def test_tokenizers_common_ids_setters(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
attributes_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
vocab = tokenizer.get_vocab()
token_id_to_test_setters = next(iter(vocab.values()))
token_to_test_setters = tokenizer.convert_ids_to_tokens(
token_id_to_test_setters, skip_special_tokens=False
)
for attr in attributes_list:
setattr(tokenizer, attr + "_id", None)
self.assertEqual(getattr(tokenizer, attr), None)
self.assertEqual(getattr(tokenizer, attr + "_id"), None)
setattr(tokenizer, attr + "_id", token_id_to_test_setters)
self.assertEqual(getattr(tokenizer, attr), token_to_test_setters)
self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters)
setattr(tokenizer, "additional_special_tokens_ids", [])
self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [])
self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [])
setattr(tokenizer, "additional_special_tokens_ids", [token_id_to_test_setters])
self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [token_to_test_setters])
self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [token_id_to_test_setters])
@parameterized.expand([(True,), (False,)])
def test_tokenizers_special_tokens_properties_unset(self, verbose):
tokenizers = self.get_tokenizers(verbose=verbose)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
attributes_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
for attr in attributes_list:
setattr(tokenizer, attr, None)
self.assertIsNone(getattr(tokenizer, attr))
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
tokenizer.add_tokens(["bim", "bambam"])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens(
{"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False
)
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
# Test that we can also use the non-legacy saving format for fast tokenizers
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
tokenizer.add_tokens(["bim", "bambam"])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens(
{"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False
)
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
def test_pickle_tokenizer(self):
"""Google pickle __getstate__ __setstate__ if you are struggling with this."""
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertIsNotNone(tokenizer)
text = "Munich and Berlin are nice cities"
subwords = tokenizer.tokenize(text)
filename = os.path.join(self.tmpdirname, "tokenizer.bin")
with open(filename, "wb") as handle:
pickle.dump(tokenizer, handle)
with open(filename, "rb") as handle:
tokenizer_new = pickle.load(handle)
subwords_loaded = tokenizer_new.tokenize(text)
self.assertListEqual(subwords, subwords_loaded)
@require_tokenizers
def test_pickle_added_tokens(self):
tok1 = AddedToken("<s>", rstrip=True, lstrip=True, normalized=False, single_word=True)
tok2 = pickle.loads(pickle.dumps(tok1))
self.assertEqual(tok1.__getstate__(), tok2.__getstate__())
def test_added_tokens_do_lower_case(self):
tokenizers = self.get_tokenizers(do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if not hasattr(tokenizer, "do_lower_case") or not tokenizer.do_lower_case:
continue
special_token = tokenizer.all_special_tokens[0]
text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token
text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token
toks_before_adding = tokenizer.tokenize(text) # toks before adding new_toks
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"]
added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks])
toks_after_adding = tokenizer.tokenize(text)
toks_after_adding2 = tokenizer.tokenize(text2)
# Rust tokenizers don't lowercase added tokens at the time calling `tokenizer.add_tokens`,
# while python tokenizers do, so new_toks 0 and 2 would be treated as the same, so do new_toks 1 and 3.
self.assertIn(added, [2, 4])
self.assertListEqual(toks_after_adding, toks_after_adding2)
self.assertTrue(
len(toks_before_adding) > len(toks_after_adding), # toks_before_adding should be longer
)
# Check that none of the special tokens are lowercased
sequence_with_special_tokens = "A " + " yEs ".join(tokenizer.all_special_tokens) + " B"
# Convert the tokenized list to str as some special tokens are tokenized like normal tokens
# which have a prefix spacee e.g. the mask token of Albert, and cannot match the original
# special tokens exactly.
tokenized_sequence = "".join(tokenizer.tokenize(sequence_with_special_tokens))
for special_token in tokenizer.all_special_tokens:
self.assertTrue(special_token in tokenized_sequence or special_token.lower() in tokenized_sequence)
tokenizers = self.get_tokenizers(do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if hasattr(tokenizer, "do_lower_case") and tokenizer.do_lower_case:
continue
special_token = tokenizer.all_special_tokens[0]
text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token
text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token
toks_before_adding = tokenizer.tokenize(text) # toks before adding new_toks
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"]
added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks])
self.assertIn(added, [2, 4])
toks_after_adding = tokenizer.tokenize(text)
toks_after_adding2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks_after_adding), len(toks_after_adding2)) # Length should still be the same
self.assertNotEqual(
toks_after_adding[1], toks_after_adding2[1]
) # But at least the first non-special tokens should differ
self.assertTrue(
len(toks_before_adding) > len(toks_after_adding), # toks_before_adding should be longer
)
# TODO @ArthurZ Nuke this
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests (but also otherwise) because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = [
AddedToken("aaaaa bbbbbb", rstrip=True, lstrip=True),
AddedToken("cccccccccdddddddd", rstrip=True, lstrip=True),
]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {
"eos_token": AddedToken(">>>>|||<||<<|<<", rstrip=True, lstrip=True),
"pad_token": AddedToken("<<<<<|||>|>>>>|>", rstrip=True, lstrip=True),
}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaa bbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_text, ids = self.get_clean_sequence(tokenizer)
special_token = AddedToken("[SPECIAL_TOKEN]", lstrip=True, rstrip=True)
tokenizer.add_special_tokens({"cls_token": special_token})
special_token = str(special_token)
encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
text = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=False)
encoded = tokenizer.encode(text, add_special_tokens=False)
input_encoded = tokenizer.encode(input_text, add_special_tokens=False)
special_token_id = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(encoded, input_encoded + special_token_id)
decoded = tokenizer.decode(encoded, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False, fast=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
new_toks = [
# These are added tokens, they will be normalized....
AddedToken("[ABC]", normalized=True, lstrip=True, rstrip=True),
AddedToken("[DEF]", normalized=True, lstrip=True, rstrip=True),
AddedToken("GHI IHG", normalized=True, lstrip=True, rstrip=True),
]
tokenizer.add_tokens(new_toks)
tokenizer.add_tokens([AddedToken("[SAMPLE]", normalized=True)], special_tokens=True)
input = "[ABC][DEF][ABC]GHI IHG[DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] GHI IHG [DEF]"
else:
output = input
encoded = tokenizer.encode(input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
return
# TODO @ArthurZ Refactor testing as now the do_normalize works for special and non special
encoded = tokenizer.encode("[ABC] [DEF][SAMPLE]", add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=True, skip_special_tokens=False)
self.assertIn(decoded, ["[ABC] [DEF] [SAMPLE]", "[ABC] [DEF] [SAMPLE]".lower()])
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=True, skip_special_tokens=True)
self.assertIn(decoded, ["[ABC] [DEF]", "[ABC] [DEF]".lower()])
encoded = tokenizer.encode("[ABC][SAMPLE][DEF]", add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=True)
self.assertIn(decoded, ["[ABC] [SAMPLE] [DEF]", "[ABC][SAMPLE][DEF]".lower()])
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=False)
self.assertIn(decoded, ["[ABC][SAMPLE][DEF]", "[ABC][SAMPLE][DEF]".lower()])
def test_mask_output(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
seq_0 = "Test this method."
seq_1 = "With these inputs."
information = tokenizer.encode_plus(seq_0, seq_1, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0, return_token_type_ids=True)
self.assertIn(0, output["token_type_ids"])
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = "With these inputs."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
@require_jinja
def test_chat_template(self):
dummy_template = "{% for message in messages %}{{message['role'] + message['content']}}{% endfor %}"
dummy_conversation = [
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message"},
]
expected_output = "systemsystem messageuseruser messageassistantassistant message"
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, return_dict=False
)
self.assertEqual(output, expected_output) # Test we can pass chat_template arg
# Check that no error raised when tokenize=True
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=True, return_dict=False
)
dict_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=True, return_dict=True
)
self.assertEqual(dict_output["input_ids"], output) # Test return_dict behaviour matches
tokenizer.chat_template = dummy_template
self.assertEqual(tokenizer.chat_template, dummy_template) # Test property setter
output = tokenizer.apply_chat_template(dummy_conversation, tokenize=False, return_dict=False)
self.assertEqual(output, expected_output) # Test chat_template attribute is used if no arg is passed
# Check that no error raised
tokenizer.apply_chat_template(dummy_conversation, tokenize=True, return_dict=False)
with tempfile.TemporaryDirectory() as tmp_dir_name:
save_files = tokenizer.save_pretrained(tmp_dir_name, save_jinja_files=False)
# Check we aren't saving a chat_template.jinja file
self.assertFalse(any(file.endswith("chat_template.jinja") for file in save_files))
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
self.assertEqual(new_tokenizer.chat_template, dummy_template) # Test template has persisted
output = new_tokenizer.apply_chat_template(dummy_conversation, tokenize=False, return_dict=False)
self.assertEqual(output, expected_output) # Test output is the same after reloading
# Check that no error raised
new_tokenizer.apply_chat_template(dummy_conversation, tokenize=True, return_dict=False)
with tempfile.TemporaryDirectory() as tmp_dir_name:
save_files = tokenizer.save_pretrained(tmp_dir_name)
# Check we are saving a chat_template.jinja file
self.assertTrue(any(file.endswith("chat_template.jinja") for file in save_files))
chat_template_file = Path(tmp_dir_name) / "chat_template.jinja"
self.assertTrue(chat_template_file.is_file())
self.assertEqual(chat_template_file.read_text(), dummy_template)
config_dict = json.loads((Path(tmp_dir_name) / "tokenizer_config.json").read_text())
# Assert the chat template is not in the config when it's saved as a separate file
self.assertNotIn("chat_template", config_dict)
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
self.assertEqual(new_tokenizer.chat_template, dummy_template) # Test template has persisted
output = new_tokenizer.apply_chat_template(dummy_conversation, tokenize=False, return_dict=False)
self.assertEqual(output, expected_output) # Test output is the same after reloading
# Check that no error raised
new_tokenizer.apply_chat_template(dummy_conversation, tokenize=True, return_dict=False)
@require_jinja
def test_chat_template_save_loading(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
signature = inspect.signature(tokenizer.__init__)
if "chat_template" not in {*signature.parameters.keys()}:
self.skipTest("tokenizer doesn't accept chat templates at input")
tokenizer.chat_template = "test template"
with tempfile.TemporaryDirectory() as tmpdirname:
tokenizer.save_pretrained(tmpdirname)
self.assertTrue(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertFalse(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_tokenizer = self.tokenizer_class.from_pretrained(tmpdirname)
self.assertEqual(tokenizer.chat_template, reloaded_tokenizer.chat_template)
# When we save as single files, tokenizers and tokenizers share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_tokenizer.chat_template, reloaded_tokenizer.tokenizer.chat_template)
with tempfile.TemporaryDirectory() as tmpdirname:
tokenizer.chat_template = {"default": "a", "secondary": "b"}
tokenizer.save_pretrained(tmpdirname)
self.assertTrue(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertTrue(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_tokenizer = self.tokenizer_class.from_pretrained(tmpdirname)
self.assertEqual(tokenizer.chat_template, reloaded_tokenizer.chat_template)
# When we save as single files, tokenizers and tokenizers share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_tokenizer.chat_template, reloaded_tokenizer.tokenizer.chat_template)
with tempfile.TemporaryDirectory() as tmpdirname:
tokenizer.chat_template = {"default": "a", "secondary": "b"}
tokenizer.save_pretrained(tmpdirname, save_jinja_files=False)
self.assertFalse(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertFalse(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_tokenizer = self.tokenizer_class.from_pretrained(tmpdirname)
self.assertEqual(tokenizer.chat_template, reloaded_tokenizer.chat_template)
# When we save as single files, tokenizers and tokenizers share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_tokenizer.chat_template, reloaded_tokenizer.tokenizer.chat_template)
@require_jinja
def test_chat_template_batched(self):
dummy_template = "{% for message in messages %}{{message['role'] + message['content']}}{% endfor %}"
dummy_conversations = [
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message"},
],
[
{"role": "system", "content": "system message 2"},
{"role": "user", "content": "user message 2"},
{"role": "assistant", "content": "assistant message 2"},
],
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
output = tokenizer.apply_chat_template(
dummy_conversations, chat_template=dummy_template, tokenize=False
)
self.assertEqual(
output,
[
"systemsystem messageuseruser messageassistantassistant message",
"systemsystem message 2useruser message 2assistantassistant message 2",
],
)
one_element_output = tokenizer.apply_chat_template(
dummy_conversations[:1], chat_template=dummy_template, tokenize=False
)
self.assertEqual(
one_element_output, ["systemsystem messageuseruser messageassistantassistant message"]
) # Assert that list structure is retained even with one element
tokenizer.apply_chat_template(
dummy_conversations, chat_template=dummy_template, tokenize=True
) # Check that no error raised
@require_jinja
def test_jinja_loopcontrols(self):
break_template = """
{%- for message in messages %}
{{- message.role + " " + message.content }}
{%- if loop.first %}
{%- break %}
{%- endif %}
{%- endfor %}""".strip()
dummy_conversation = [
{"role": "system", "content": "1"},
{"role": "user", "content": "2"},
{"role": "assistant", "content": "3"},
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
break_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=break_template, tokenize=False
)
self.assertEqual(break_output, "system 1") # Loop should break after first iter
@require_jinja
def test_jinja_strftime(self):
strftime_template = """{{- strftime_now("%Y-%m-%d") }}""".strip()
dummy_conversation = [
{"role": "system", "content": "1"},
{"role": "user", "content": "2"},
{"role": "assistant", "content": "3"},
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
strftime_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=strftime_template, tokenize=False
)
# Assert that we get a date formatted as expected
self.assertEqual(len(strftime_output), 10)
self.assertEqual(len(strftime_output.split("-")), 3)
@require_torch
@require_jinja
def test_chat_template_return_assistant_tokens_mask(self):
dummy_template = (
"{% for message in messages %}"
"{% if (message['role'] != 'assistant') %}"
"{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}"
"{% elif (message['role'] == 'assistant')%}"
"{{'<|im_start|>' + message['role'] + '\n'}}"
"{% generation %}"
"{{message['content'] + '<|im_end|>'}}"
"{% endgeneration %}"
"{{'\n'}}"
"{% endif %}"
"{% endfor %}"
)
conversations = [
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "start turn 1 assistant message. end turn 1"},
{"role": "user", "content": "user message 2"},
{"role": "assistant", "content": "start turn 2 assistant message. end turn 2"},
],
[
{"role": "system", "content": "system message 3"},
{"role": "user", "content": "user message 3"},
{"role": "assistant", "content": "start turn 3 assistant message. end turn 3"},
{"role": "user", "content": "user message 4"},
{"role": "assistant", "content": "start turn 4 assistant message. end turn 4"},
],
]
# These are the prefix and suffix strings of all the assistant messages. Used to find the assistant substring
# in the entire chat string, and then find the corresponding tokens in the tokenized output.
assistant_prefix_suffix = [
[("start turn 1", "end turn 1<|im_end|>"), ("start turn 2", "end turn 2<|im_end|>")],
[("start turn 3", "end turn 3<|im_end|>"), ("start turn 4", "end turn 4<|im_end|>")],
]
for tokenizer, pretrained_name, _ in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
if not self.test_rust_tokenizer:
self.skipTest(reason="No fast tokenizer defined")
tokenizer_r = self.get_rust_tokenizer(pretrained_name)
self._check_no_pad_token_padding(tokenizer_r, conversations)
tokenizer_r.padding_side = "right"
# check batched
output = tokenizer_r.apply_chat_template(
conversations,
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
)
output_pt = tokenizer_r.apply_chat_template(
conversations,
chat_template=dummy_template,
tokenize=True,
padding=True,
return_assistant_tokens_mask=True,
return_dict=True,
return_tensors="pt",
)
self.assertEqual(type(output_pt["assistant_masks"]), torch.Tensor)
self.assertEqual(output_pt["assistant_masks"].shape, output_pt["input_ids"].shape)
for i, conv in enumerate(conversations):
chat_string = tokenizer_r.apply_chat_template(conv, tokenize=False, chat_template=dummy_template)
assistant_start = output.char_to_token(i, chat_string.index(assistant_prefix_suffix[i][0][0]))
assistant_end = output.char_to_token(
i,
chat_string.index(assistant_prefix_suffix[i][0][1])
+ len(assistant_prefix_suffix[i][0][1])
- 1,
)
assistant_start2 = output.char_to_token(i, chat_string.index(assistant_prefix_suffix[i][1][0]))
assistant_end2 = output.char_to_token(
i,
chat_string.index(assistant_prefix_suffix[i][1][1])
+ len(assistant_prefix_suffix[i][1][1])
- 1,
)
# assert 1 in first assistant message
self.assertEqual(
output["assistant_masks"][i][assistant_start : assistant_end + 1],
[1] * (assistant_end - assistant_start + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][i, assistant_start : assistant_end + 1] == 1).all(),
)
# assert 1 second assistant message
self.assertEqual(
output["assistant_masks"][i][assistant_start2 : assistant_end2 + 1],
[1] * (assistant_end2 - assistant_start2 + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][i, assistant_start2 : assistant_end2 + 1] == 1).all(),
)
# assert 0 in user/system indices
self.assertEqual(output["assistant_masks"][i][:assistant_start], [0] * assistant_start)
self.assertTrue((output_pt["assistant_masks"][i, :assistant_start] == 0).all())
self.assertEqual(
output["assistant_masks"][i][assistant_end + 1 : assistant_start2],
[0] * (assistant_start2 - assistant_end - 1),
)
self.assertTrue(
(output_pt["assistant_masks"][i, assistant_end + 1 : assistant_start2] == 0).all(),
)
# check not batched
output = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
)
output_pt = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
return_tensors="pt",
)
self.assertEqual(type(output_pt["assistant_masks"]), torch.Tensor)
self.assertEqual(output_pt["assistant_masks"].shape, output_pt["input_ids"].shape)
chat_string = tokenizer_r.apply_chat_template(
conversations[0], tokenize=False, chat_template=dummy_template
)
assistant_start = output.char_to_token(0, chat_string.index(assistant_prefix_suffix[0][0][0]))
assistant_end = output.char_to_token(
0, chat_string.index(assistant_prefix_suffix[0][0][1]) + len(assistant_prefix_suffix[0][0][1]) - 1
)
assistant_start2 = output.char_to_token(0, chat_string.index(assistant_prefix_suffix[0][1][0]))
assistant_end2 = output.char_to_token(
0, chat_string.index(assistant_prefix_suffix[0][1][1]) + len(assistant_prefix_suffix[0][1][1]) - 1
)
# assert 1 in assistant indices
self.assertEqual(
output["assistant_masks"][assistant_start : assistant_end + 1],
[1] * (assistant_end - assistant_start + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][assistant_start : assistant_end + 1] == 1).all(),
)
self.assertEqual(
output["assistant_masks"][assistant_start2 : assistant_end2 + 1],
[1] * (assistant_end2 - assistant_start2 + 1),
)
self.assertTrue(
(output_pt["assistant_masks"][assistant_start2 : assistant_end2 + 1] == 1).all(),
)
# assert 0 in user/system indices
self.assertEqual(output["assistant_masks"][:assistant_start], [0] * assistant_start)
self.assertTrue((output_pt["assistant_masks"][0, :assistant_start] == 0).all())
self.assertEqual(
output["assistant_masks"][assistant_end + 1 : assistant_start2],
[0] * (assistant_start2 - assistant_end - 1),
)
self.assertTrue(
(output_pt["assistant_masks"][0, assistant_end + 1 : assistant_start2] == 0).all(),
)
@require_jinja
def test_chat_template_return_assistant_tokens_mask_truncated(self):
dummy_template = (
"{% for message in messages %}"
"{% if (message['role'] != 'assistant') %}"
"{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}"
"{% elif (message['role'] == 'assistant')%}"
"{{'<|im_start|>' + message['role'] + '\n'}}"
"{% generation %}"
"{{message['content'] + '<|im_end|>'}}"
"{% endgeneration %}"
"{{'\n'}}"
"{% endif %}"
"{% endfor %}"
)
conversations = [
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{
"role": "assistant",
"content": (
"start turn assistant. long string to be truncated, long string to be truncated, "
"long string to be truncated, long string to be truncated, long string to be truncated"
),
},
{"role": "user", "content": "another user message"},
],
[
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{
"role": "assistant",
"content": (
"start turn assistant. long string to be truncated, long string to be truncated, "
"long string to be truncated, long string to be truncated, long string to be truncated"
),
},
{"role": "user", "content": "another user message"},
],
]
for tokenizer, pretrained_name, _ in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
if not self.test_rust_tokenizer:
self.skipTest(reason="No fast tokenizer defined")
tokenizer_r = self.get_rust_tokenizer(pretrained_name)
# Find where to truncate, as the amount of tokens is different for different tokenizers and I want the
# truncation to happen in the middle of the assistant content.
full_encoding = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_dict=True,
)
chat_string = tokenizer_r.apply_chat_template(
conversations[0], tokenize=False, chat_template=dummy_template
)
truncation_position = full_encoding.char_to_token(chat_string.index(", long string to be truncated,"))
# check batched
output = tokenizer_r.apply_chat_template(
conversations,
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
max_length=truncation_position,
truncation=True,
return_dict=True,
)
for i, conv in enumerate(conversations):
chat_string = tokenizer_r.apply_chat_template(conv, tokenize=False, chat_template=dummy_template)
assistant_start = output.char_to_token(i, chat_string.index("start turn assistant"))
# assert 1 from assistant_start to the end because the rest is truncated.
self.assertEqual(
output["assistant_masks"][i][assistant_start:],
[1] * (len(output["assistant_masks"][i]) - assistant_start),
)
# check not batched
output = tokenizer_r.apply_chat_template(
conversations[0],
chat_template=dummy_template,
tokenize=True,
return_assistant_tokens_mask=True,
return_dict=True,
max_length=truncation_position,
truncation=True,
)
chat_string = tokenizer_r.apply_chat_template(
conversations[0], tokenize=False, chat_template=dummy_template
)
assistant_start = output.char_to_token(0, chat_string.index("start turn assistant"))
# assert 1 from assistant_start to the end because the rest is truncated.
self.assertEqual(
output["assistant_masks"][assistant_start:],
[1] * (len(output["assistant_masks"]) - assistant_start),
)
@require_jinja
def test_continue_final_message(self):
dummy_template = """
{%- for message in messages %}
{{- "<|im_start|>" + message['role'] + "\n" + message['content'] + "<|im_end|>" + "\n"}}
{%- endfor %}"""
dummy_conversation = [
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message"},
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=False
)
self.assertEqual(
output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message<|im_end|>\n",
)
prefill_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True
)
# Assert that the final message is unterminated
self.assertEqual(
prefill_output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message",
)
@require_jinja
def test_continue_final_message_with_trim(self):
"""Regression test for chat templates with trimming: https://github.com/huggingface/transformers/pull/34214"""
dummy_template = """
{%- for message in messages %}
{{- "<|im_start|>" + message['role'] + "\n" + message['content'] | trim + "<|im_end|>" + "\n"}}
{%- endfor %}"""
dummy_conversation = [
{"role": "system", "content": "system message"},
{"role": "user", "content": "user message"},
{"role": "assistant", "content": "assistant message "}, # Note the trailing whitespace
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=False
)
self.assertEqual(
output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message<|im_end|>\n",
)
prefill_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True
)
# Assert that the final message is unterminated
self.assertEqual(
prefill_output,
"<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message",
)
@require_jinja
def test_continue_final_message_with_decoy_earlier_message(self):
"""Regression test for chat templates where an earlier message has similar content to the final message
https://github.com/huggingface/transformers/issues/35433"""
dummy_template = """
{%- for message in messages %}
{{- "<|im_start|>" + message['role'] + "\n" + message['content'] | trim + "<|im_end|>" + "\n"}}
{%- endfor %}"""
dummy_conversation = [
{"role": "user", "content": "hi 0"},
{"role": "assistant", "content": "bye: 0"},
{"role": "user", "content": "hi 1"},
{"role": "assistant", "content": "bye: "},
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
prefill_output = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True
)
# Assert that the final message is unterminated
self.assertEqual(
prefill_output,
"<|im_start|>user\nhi 0<|im_end|>\n<|im_start|>assistant\nbye: 0<|im_end|>\n<|im_start|>user\nhi 1<|im_end|>\n<|im_start|>assistant\nbye:",
)
@require_jinja
def test_chat_template_dict(self):
dummy_template_1 = "{{'a'}}"
dummy_template_2 = "{{'b'}}"
dummy_conversation = [
{"role": "user", "content": "user message"},
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.chat_template = {"template1": dummy_template_1, "template2": dummy_template_2}
output1 = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template_1, tokenize=False
)
output1_via_dict = tokenizer.apply_chat_template(
dummy_conversation, chat_template="template1", tokenize=False
)
self.assertEqual(output1, output1_via_dict)
output2 = tokenizer.apply_chat_template(
dummy_conversation, chat_template=dummy_template_2, tokenize=False
)
output2_via_dict = tokenizer.apply_chat_template(
dummy_conversation, chat_template="template2", tokenize=False
)
self.assertEqual(output2, output2_via_dict)
@require_jinja
def test_chat_template_dict_saving(self):
dummy_template_1 = "{{'a'}}"
dummy_template_2 = "{{'b'}}"
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
for save_jinja_files in (True, False):
tokenizer.chat_template = {"default": dummy_template_1, "template2": dummy_template_2}
with tempfile.TemporaryDirectory() as tmp_dir_name:
# Test that save_jinja_files is ignored when there's a dict of multiple templates
tokenizer.save_pretrained(tmp_dir_name, save_jinja_files=save_jinja_files)
if save_jinja_files:
config_dict = json.load(open(os.path.join(tmp_dir_name, "tokenizer_config.json")))
self.assertNotIn("chat_template", config_dict)
self.assertTrue(os.path.exists(os.path.join(tmp_dir_name, "chat_template.jinja")))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir_name, "additional_chat_templates/template2.jinja"))
)
else:
config_dict = json.load(open(os.path.join(tmp_dir_name, "tokenizer_config.json")))
# Assert that chat templates are correctly serialized as lists of dictionaries
self.assertEqual(
config_dict["chat_template"],
[
{"name": "default", "template": "{{'a'}}"},
{"name": "template2", "template": "{{'b'}}"},
],
)
self.assertFalse(os.path.exists(os.path.join(tmp_dir_name, "chat_template.jinja")))
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
# Assert that the serialized list is correctly reconstructed as a single dict
self.assertEqual(new_tokenizer.chat_template, tokenizer.chat_template)
@require_jinja
def test_chat_template_file_priority(self):
dummy_template1 = "a"
dummy_template2 = "b"
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.chat_template = dummy_template1
tokenizer.save_pretrained(tmp_dir_name, save_jinja_files=False)
with Path(tmp_dir_name, "chat_template.jinja").open("w") as f:
f.write(dummy_template2)
new_tokenizer = tokenizer.from_pretrained(tmp_dir_name)
# Assert the file template clobbers any template in the config
self.assertEqual(new_tokenizer.chat_template, dummy_template2)
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = "With these inputs."
sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False)
attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer.encode(seq_0, add_special_tokens=False)
total_length = len(sequence)
self.assertGreater(
total_length, 4, "Issue with the testing sequence, please update it, it's too short"
)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
self.assertGreater(
total_length1,
model_max_length,
"Issue with the testing sequence, please update it, it's too short",
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
# Overflowing tokens
stride = 2
information = tokenizer(
seq_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence[:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence[:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :])
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Build a sequence from our model's vocabulary
stride = 2
seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer.encode(seq_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens), 2 + stride)
seq_1 = "This is another sentence to be encoded."
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
if abs(len(seq0_tokens) - len(seq1_tokens)) <= 2:
seq1_tokens = seq1_tokens + seq1_tokens
seq_1 = tokenizer.decode(seq1_tokens, clean_up_tokenization_spaces=False)
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens), 2 + stride)
smallest = seq1_tokens if len(seq0_tokens) > len(seq1_tokens) else seq0_tokens
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) # , add_prefix_space=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(seq_2, seq_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
self.assertLess(
total_length1, model_max_length - 10, "Issue with the testing sequence, please update it."
)
self.assertGreater(
total_length2, model_max_length, "Issue with the testing sequence, please update it."
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(seq_2, seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer(
[seq_2], [seq_1], padding=padding_state, truncation=truncation_state
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation="only_second")
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation="only_second")
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
truncated_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[:-2] + tokenizer.encode(
seq_1, add_special_tokens=False
)
truncated_second_sequence = (
tokenizer.encode(seq_0, add_special_tokens=False)
+ tokenizer.encode(seq_1, add_special_tokens=False)[:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[
-(2 + stride) :
] + tokenizer.encode(seq_1, add_special_tokens=False)
overflow_second_sequence = (
tokenizer.encode(seq_0, add_special_tokens=False)
+ tokenizer.encode(seq_1, add_special_tokens=False)[-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens[-(2 + stride) :])
information_second_truncated = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens[-(2 + stride) :])
# TODO: FIXME @ArthurZucker
@unittest.skip(
reason="start to fail after # 29473. See https://github.com/huggingface/transformers/pull/29473#pullrequestreview-1945687810"
)
@slow
@require_read_token
def test_encode_decode_fast_slow_all_tokens(self):
if self.rust_tokenizer_class is not None:
pretrained_name = self.from_pretrained_id
slow_tokenizer = self.get_tokenizer(pretrained_name, legacy=False)
with self.subTest(f"{pretrained_name}"):
rust_tokenizer = self.get_rust_tokenizer(pretrained_name, from_slow=True, legacy=False)
input_full_vocab_ids = list(
range(len(slow_tokenizer))
) # TODO let's maybe shuffle this! And run it 4 times. This way we cover more cmbinations
input_full_vocab_string = rust_tokenizer.convert_tokens_to_string(
rust_tokenizer.convert_ids_to_tokens(input_full_vocab_ids)
)
print(f"Length of the input string that is tested: {len(input_full_vocab_string)}")
for chunk in range(0, len(input_full_vocab_string) - 1024, 1024):
string_to_check = input_full_vocab_string[chunk : chunk + 1024]
with self.subTest(f"{(chunk / len(input_full_vocab_string)) * 100}%"):
slow_encode = slow_tokenizer.encode(string_to_check)
fast_encode = rust_tokenizer.encode(string_to_check)
self.assertEqual(
slow_encode,
fast_encode,
"Hint: the following tokenization diff were obtained for slow vs fast:\n "
f"elements in slow: {set(slow_tokenizer.tokenize(string_to_check)) - set(rust_tokenizer.tokenize(string_to_check))} \nvs\n "
f"elements in fast: {set(rust_tokenizer.tokenize(string_to_check)) - set(slow_tokenizer.tokenize(string_to_check))} \n"
f"string used : {string_to_check}",
)
print(f"Length of the input ids that is tested: {len(input_full_vocab_ids)}")
for chunk in range(0, len(input_full_vocab_ids) - 100, 100):
ids_to_decode = input_full_vocab_ids[chunk : chunk + 100]
with self.subTest(f"{(chunk / len(input_full_vocab_string)) * 100}%"):
self.assertEqual(
slow_tokenizer.decode(
ids_to_decode,
space_between_special_tokens=False,
clean_up_tokenization_spaces=False,
),
rust_tokenizer.decode(
ids_to_decode,
space_between_special_tokens=False,
clean_up_tokenization_spaces=False,
),
f"Hint here are the tokens being decoded.: {slow_tokenizer.convert_ids_to_tokens(ids_to_decode)}",
)
# def test_encode_input_type(self):
# tokenizers = self.get_tokenizers(do_lower_case=False)
# for tokenizer in tokenizers:
# with self.subTest(f"{tokenizer.__class__.__name__}"):
# sequence = "Let's encode this sequence"
# tokens = sequence.split() # tokenizer.tokenize(sequence)
# # input_ids = tokenizer.convert_tokens_to_ids(tokens)
# formatted_input = tokenizer.encode(sequence, add_special_tokens=True, add_prefix_space=False)
# self.assertEqual(
# tokenizer.encode(tokens, is_split_into_words=True, add_special_tokens=True), formatted_input
# )
# # This is not supported with the Rust tokenizers
# # self.assertEqual(tokenizer.encode(input_ids, add_special_tokens=True), formatted_input)
# def test_swap_special_token(self):
# tokenizers = self.get_tokenizers(do_lower_case=False)
# for tokenizer in tokenizers:
# with self.subTest(f"{tokenizer.__class__.__name__}"):
# # Our mask token
# mask = "<mask>"
# # We take a single word in the middle of the vocabulary
# all_tokens = sorted(tokenizer.get_vocab().keys())
# word = tokenizer.decode(tokenizer.encode(all_tokens[len(all_tokens)//2], add_special_tokens=False)[:1])
# sequence_0 = "Encode " + word + " sequence"
# sequence_masked_0 = "Encode " + mask + " sequence"
# sequence_1 = word + " this sequence"
# sequence_masked_1 = mask + " this sequence"
# # Add tokens so that masked token isn't split
# # tokens = [AddedToken(t, lstrip=True, normalized=False) for t in sequence.split()]
# # tokenizer.add_tokens(tokens)
# tokenizer.add_special_tokens(
# {"mask_token": AddedToken(mask, normalized=False)}
# ) # Eat left space on Byte-level BPE tokenizers
# mask_ind = tokenizer.convert_tokens_to_ids(mask)
# # Test first masked sequence
# encoded_0 = tokenizer.encode(sequence_0, add_special_tokens=False)
# encoded_masked = tokenizer.encode(sequence_masked_0, add_special_tokens=False)
# self.assertEqual(len(encoded_masked), len(encoded_0))
# mask_loc = encoded_masked.index(mask_ind)
# encoded_masked[mask_loc] = encoded_0[mask_loc]
# self.assertEqual(encoded_masked, encoded_0)
# # Test second masked sequence
# encoded_1 = tokenizer.encode(sequence_1, add_special_tokens=False)
# encoded_masked = tokenizer.encode(sequence_masked_1, add_special_tokens=False)
# self.assertEqual(len(encoded_masked), len(encoded_1))
# mask_loc = encoded_masked.index(mask_ind)
# encoded_masked[mask_loc] = encoded_1[mask_loc]
# self.assertEqual(encoded_masked, encoded_1)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
# Testing single inputs
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
sequence_0,
add_special_tokens=True,
return_special_tokens_mask=True, # , add_prefix_space=False
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
sequence_1 = "This one too please."
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
sequence_0,
sequence_1,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_padding_side_in_kwargs(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
if self.test_rust_tokenizer:
tokenizer_r = self.get_rust_tokenizer(pretrained_name, padding_side="left", **kwargs)
self.assertEqual(tokenizer_r.padding_side, "left")
tokenizer_r = self.get_rust_tokenizer(pretrained_name, padding_side="right", **kwargs)
self.assertEqual(tokenizer_r.padding_side, "right")
self.assertRaises(
ValueError,
self.rust_tokenizer_class.from_pretrained,
pretrained_name,
padding_side="unauthorized",
**kwargs,
)
if self.test_slow_tokenizer:
tokenizer_p = self.get_tokenizer(pretrained_name, padding_side="left", **kwargs)
self.assertEqual(tokenizer_p.padding_side, "left")
tokenizer_p = self.get_tokenizer(pretrained_name, padding_side="right", **kwargs)
self.assertEqual(tokenizer_p.padding_side, "right")
self.assertRaises(
ValueError,
self.tokenizer_class.from_pretrained,
pretrained_name,
padding_side="unauthorized",
**kwargs,
)
def test_truncation_side_in_kwargs(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
if self.test_rust_tokenizer:
tokenizer_r = self.get_rust_tokenizer(pretrained_name, truncation_side="left", **kwargs)
self.assertEqual(tokenizer_r.truncation_side, "left")
tokenizer_r = self.get_rust_tokenizer(pretrained_name, truncation_side="right", **kwargs)
self.assertEqual(tokenizer_r.truncation_side, "right")
self.assertRaises(
ValueError,
self.rust_tokenizer_class.from_pretrained,
pretrained_name,
truncation_side="unauthorized",
**kwargs,
)
if self.test_slow_tokenizer:
tokenizer_p = self.get_tokenizer(pretrained_name, truncation_side="left", **kwargs)
self.assertEqual(tokenizer_p.truncation_side, "left")
tokenizer_p = self.get_tokenizer(pretrained_name, truncation_side="right", **kwargs)
self.assertEqual(tokenizer_p.truncation_side, "right")
self.assertRaises(
ValueError,
self.tokenizer_class.from_pretrained,
pretrained_name,
truncation_side="unauthorized",
**kwargs,
)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
self.assertEqual(sequence_length + padding_size, padded_sequence_length)
self.assertEqual(encoded_sequence + [padding_idx] * padding_size, padded_sequence)
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
self.assertEqual(sequence_length + padding_size, padded_sequence_length)
self.assertEqual([padding_idx] * padding_size + encoded_sequence, padded_sequence)
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(sequence, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
self.assertEqual(sequence_length, padded_sequence_right_length)
self.assertEqual(encoded_sequence, padded_sequence_right)
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(sequence, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
self.assertEqual(sequence_length, padded_sequence_left_length)
self.assertEqual(encoded_sequence, padded_sequence_left)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(sequence)
padded_sequence_right_length = len(padded_sequence_right)
self.assertEqual(sequence_length, padded_sequence_right_length)
self.assertEqual(encoded_sequence, padded_sequence_right)
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(sequence, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
self.assertEqual(sequence_length, padded_sequence_left_length)
self.assertEqual(encoded_sequence, padded_sequence_left)
def test_right_and_left_truncation(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence = "This is a test sequence"
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
truncation_size = 3
tokenizer.truncation_side = "right"
encoded_sequence = tokenizer.encode(sequence, add_special_tokens=False)
sequence_length = len(encoded_sequence)
# Remove EOS/BOS tokens
truncated_sequence = tokenizer.encode(
sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False
)
truncated_sequence_length = len(truncated_sequence)
self.assertEqual(sequence_length, truncated_sequence_length + truncation_size)
self.assertEqual(encoded_sequence[:-truncation_size], truncated_sequence)
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the truncation flag set to True
tokenizer.truncation_side = "left"
sequence_length = len(encoded_sequence)
truncated_sequence = tokenizer.encode(
sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False
)
truncated_sequence_length = len(truncated_sequence)
self.assertEqual(sequence_length, truncated_sequence_length + truncation_size)
self.assertEqual(encoded_sequence[truncation_size:], truncated_sequence)
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_truncation'
sequence_length = len(encoded_sequence)
tokenizer.truncation_side = "right"
truncated_sequence_right = tokenizer.encode(sequence, truncation=True, add_special_tokens=False)
truncated_sequence_right_length = len(truncated_sequence_right)
self.assertEqual(sequence_length, truncated_sequence_right_length)
self.assertEqual(encoded_sequence, truncated_sequence_right)
tokenizer.truncation_side = "left"
truncated_sequence_left = tokenizer.encode(
sequence, truncation="longest_first", add_special_tokens=False
)
truncated_sequence_left_length = len(truncated_sequence_left)
self.assertEqual(sequence_length, truncated_sequence_left_length)
self.assertEqual(encoded_sequence, truncated_sequence_left)
tokenizer.truncation_side = "right"
truncated_sequence_right = tokenizer.encode(sequence, add_special_tokens=False)
truncated_sequence_right_length = len(truncated_sequence_right)
self.assertEqual(sequence_length, truncated_sequence_right_length)
self.assertEqual(encoded_sequence, truncated_sequence_right)
tokenizer.truncation_side = "left"
truncated_sequence_left = tokenizer.encode(sequence, truncation=False, add_special_tokens=False)
truncated_sequence_left_length = len(truncated_sequence_left)
self.assertEqual(sequence_length, truncated_sequence_left_length)
self.assertEqual(encoded_sequence, truncated_sequence_left)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest(reason="No padding token.")
else:
empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8)
for key, value in empty_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer("This", pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
"This",
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_padding_with_attention_mask(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest(reason="No padding token.")
if "attention_mask" not in tokenizer.model_input_names:
self.skipTest(reason="This model does not use attention mask.")
features = [
{"input_ids": [1, 2, 3, 4, 5, 6], "attention_mask": [1, 1, 1, 1, 1, 0]},
{"input_ids": [1, 2, 3], "attention_mask": [1, 1, 0]},
]
padded_features = tokenizer.pad(features)
if tokenizer.padding_side == "right":
self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]])
else:
self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]])
@parameterized.expand([(True,), (False,)])
def test_encode_plus_with_padding(self, use_padding_as_call_kwarg: bool):
"""
This test checks that padding works as expected when tokenizing a sequence.
Padding is expected to have no effect when the input is a single sequence and
the padding-strategy is not `max_length`. Otherwise it pads to the specified max-length
using tokenizer classes `padding_side` attribute. Also, we check that passing `padding_side`
as call time kwarg works same way as when one sets `tokenizer.padding_side` attribute.
"""
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence = "Sequence"
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
not_padded_sequence = tokenizer.encode_plus(
sequence,
padding=True,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertEqual(sequence_length, not_padded_sequence_length)
self.assertEqual(input_ids, not_padded_input_ids)
self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertEqual(sequence_length, not_padded_sequence_length)
self.assertEqual(input_ids, not_padded_input_ids)
self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask)
# Test right padding
tokenizer_kwargs_right = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "right"
else:
tokenizer_kwargs_right["padding_side"] = "right"
right_padded_sequence = tokenizer.encode_plus(sequence, **tokenizer_kwargs_right)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertEqual(sequence_length + padding_size, right_padded_sequence_length)
self.assertEqual(input_ids + [padding_idx] * padding_size, right_padded_input_ids)
self.assertEqual(special_tokens_mask + [1] * padding_size, right_padded_special_tokens_mask)
# Test left padding
tokenizer_kwargs_left = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "left"
else:
tokenizer_kwargs_left["padding_side"] = "left"
left_padded_sequence = tokenizer.encode_plus(sequence, **tokenizer_kwargs_left)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertEqual(sequence_length + padding_size, left_padded_sequence_length)
self.assertEqual([padding_idx] * padding_size + input_ids, left_padded_input_ids)
self.assertEqual([1] * padding_size + special_tokens_mask, left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
self.assertEqual(
token_type_ids + [token_type_padding_idx] * padding_size, right_padded_token_type_ids
)
self.assertEqual(
[token_type_padding_idx] * padding_size + token_type_ids, left_padded_token_type_ids
)
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertEqual(attention_mask + [0] * padding_size, right_padded_attention_mask)
self.assertEqual([0] * padding_size + attention_mask, left_padded_attention_mask)
def test_padding_warning_message_fast_tokenizer(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
sequence = "This is a text"
tokenizer_fast = self.get_rust_tokenizer()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer_fast, sequence)
encoding_fast = tokenizer_fast(sequence)
with self.assertLogs("transformers", level="WARNING") as cm:
tokenizer_fast.pad(encoding_fast)
self.assertEqual(len(cm.records), 1)
self.assertIn(
"Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to"
" encode the text followed by a call to the `pad` method to get a padded encoding.",
cm.records[0].message,
)
if not self.test_slow_tokenizer:
self.skipTest(reason="test_slow_tokenizer is set to False")
tokenizer_slow = self.get_tokenizer()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer_slow, sequence)
encoding_slow = tokenizer_slow(sequence)
with self.assertLogs(level="WARNING") as cm:
# We want to assert there are no warnings, but the 'assertLogs' method does not support that.
# Therefore, we are adding a dummy warning, and then we will assert it is the only warning.
logger.warning("Dummy warning")
tokenizer_slow.pad(encoding_slow)
self.assertEqual(len(cm.records), 1)
self.assertIn(
"Dummy warning",
cm.records[0].message,
)
def test_separate_tokenizers(self):
# This tests that tokenizers don't impact others. Unfortunately the case where it fails is when
# we're loading an S3 configuration from a pre-trained identifier, and we have no way of testing those today.
tokenizers = self.get_tokenizers(random_argument=True)
new_tokenizers = self.get_tokenizers(random_argument=False)
for tokenizer, new_tokenizer in zip(tokenizers, new_tokenizers):
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertTrue(tokenizer.init_kwargs["random_argument"])
self.assertTrue(tokenizer.init_kwargs["random_argument"])
self.assertFalse(new_tokenizer.init_kwargs["random_argument"])
def test_get_vocab(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_dict = tokenizer.get_vocab()
self.assertIsInstance(vocab_dict, dict)
self.assertGreaterEqual(len(tokenizer), len(vocab_dict))
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
tokenizer.add_tokens(["asdfasdfasdfasdf"])
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
def test_conversion_reversible(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab = tokenizer.get_vocab()
for word, ind in vocab.items():
if word == tokenizer.unk_token:
continue
self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind)
self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
# Test not batched
encoded_sequences_1 = tokenizer.encode_plus(sequences[0])
encoded_sequences_2 = tokenizer(sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
encoded_sequences_1 = tokenizer.encode_plus(sequences[0], sequences[1])
encoded_sequences_2 = tokenizer(sequences[0], sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
encoded_sequences_1 = tokenizer.batch_encode_plus(sequences)
encoded_sequences_2 = tokenizer(sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched pairs
encoded_sequences_1 = tokenizer.batch_encode_plus(list(zip(sequences, sequences)))
encoded_sequences_2 = tokenizer(sequences, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
encoded_sequences = [tokenizer.encode_plus(sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [
tokenizer.encode_plus(sequence, max_length=maximum_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(sequences, padding=True)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
sequences, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
sequences, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@require_tokenizers
def test_added_token_are_matched_longest_first(self):
if not self.test_slow_tokenizer:
self.skipTest(reason="This test is only for slow tokenizers")
tokenizers = self.get_tokenizers(fast=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
try:
tokenizer.add_tokens([AddedToken("extra_id_1")])
tokenizer.add_tokens([AddedToken("extra_id_100")])
except Exception:
# Canine cannot add tokens which are not codepoints
self.skipTest(reason="Cannot add those Added tokens")
# XXX: This used to split on `extra_id_1` first we're matching
# longest first now.
tokens = tokenizer.tokenize("This is some extra_id_100")
self.assertIn("extra_id_100", tokens)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.add_tokens([AddedToken("extra_id_100")])
tokenizer.add_tokens([AddedToken("extra_id_1")])
tokens = tokenizer.tokenize("This is some extra_id_100")
self.assertIn("extra_id_100", tokens)
@require_tokenizers
def test_added_token_serializable(self):
# TODO this is tested 10_000 times....
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
new_token = AddedToken("new_token", lstrip=True)
tokenizer.add_tokens([new_token])
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(tmp_dir_name)
tokenizer.from_pretrained(tmp_dir_name)
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_pretokenized_inputs(self):
# Test when inputs are pretokenized
tokenizers = self.get_tokenizers(do_lower_case=False) # , add_prefix_space=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if hasattr(tokenizer, "add_prefix_space") and not tokenizer.add_prefix_space:
continue
# Prepare a sequence from our tokenizer vocabulary
sequence, ids = self.get_clean_sequence(tokenizer, with_prefix_space=True, max_length=20)
# sequence = " " + sequence # To be sure the byte-level tokenizers are feeling good
token_sequence = sequence.split()
# sequence_no_prefix_space = sequence.strip()
# Test encode for pretokenized inputs
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode(sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode(sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
# Test encode_plus for pretokenized inputs
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=False)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=True)
for key in output:
self.assertEqual(output[key], output_sequence[key])
# Test batch_encode_plus for pretokenized inputs
sequence_batch = [sequence.strip()] * 2 + [sequence.strip() + " " + sequence.strip()]
token_sequence_batch = [s.split() for s in sequence_batch]
sequence_batch_cleaned_up_spaces = [" " + " ".join(s) for s in token_sequence_batch]
output = tokenizer.batch_encode_plus(
token_sequence_batch, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.batch_encode_plus(
sequence_batch_cleaned_up_spaces, add_special_tokens=False
)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(
token_sequence_batch, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.batch_encode_plus(
sequence_batch_cleaned_up_spaces, add_special_tokens=True
)
for key in output:
self.assertEqual(output[key], output_sequence[key])
# Test encode for pretokenized inputs pairs
output = tokenizer.encode(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
# Test encode_plus for pretokenized inputs pairs
output = tokenizer.encode_plus(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=False)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=True)
for key in output:
self.assertEqual(output[key], output_sequence[key])
# Test batch_encode_plus for pretokenized inputs pairs
sequence_pair_batch = [(sequence.strip(), sequence.strip())] * 2 + [
(sequence.strip() + " " + sequence.strip(), sequence.strip())
]
token_sequence_pair_batch = [tuple(s.split() for s in pair) for pair in sequence_pair_batch]
sequence_pair_batch_cleaned_up_spaces = [
tuple(" " + " ".join(s) for s in pair) for pair in token_sequence_pair_batch
]
output = tokenizer.batch_encode_plus(
token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.batch_encode_plus(
sequence_pair_batch_cleaned_up_spaces, add_special_tokens=False
)
for key in output:
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(
token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.batch_encode_plus(
sequence_pair_batch_cleaned_up_spaces, add_special_tokens=True
)
for key in output:
self.assertEqual(output[key], output_sequence[key])
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
string_sequence = "Testing the prepare_for_model method."
ids = tokenizer.encode(string_sequence, add_special_tokens=False)
prepared_input_dict = tokenizer.prepare_for_model(ids, add_special_tokens=True)
input_dict = tokenizer.encode_plus(string_sequence, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_batch_encode_plus_overflowing_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
string_sequences = ["Testing the prepare_for_model method.", "Test"]
if tokenizer.pad_token is None:
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
tokenizer.batch_encode_plus(
string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3
)
def _check_no_pad_token_padding(self, tokenizer, sequences):
# if tokenizer does not have pad_token_id, an error should be thrown
if tokenizer.pad_token_id is None:
with self.assertRaises(ValueError):
if isinstance(sequences, list):
tokenizer.batch_encode_plus(sequences, padding="longest")
else:
tokenizer.encode_plus(sequences, padding=True)
# add pad_token_id to pass subsequent tests
tokenizer.add_special_tokens({"pad_token": "<PAD>"})
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
self.skipTest(f"{tokenizer.__class__.__name__} is not in the MODEL_TOKENIZER")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
self.skipTest(reason="Model is not an encoder-decoder model or has no set pad token id")
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
if is_using_common_embeddings:
self.assertGreaterEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="pt")
# Ensure that the BatchEncoding.to() method works.
encoded_sequence.to(model.device)
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt")
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
# if self.test_rust_tokenizer:
# fast_tokenizer = self.get_rust_tokenizer()
# encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="pt")
# batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt")
# # This should not fail
# model(**encoded_sequence_fast)
# model(**batch_encoded_sequence_fast)
@require_torch
@slow
def test_np_encode_plus_sent_to_model(self):
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
self.skipTest(f"{tokenizer.__class__.__name__} is not in the MODEL_TOKENIZER_MAPPING")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
self.skipTest("Model is not an encoder-decoder model or has no set pad token id")
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="np")
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="np")
# This is currently here to make ruff happy !
if encoded_sequence is None:
raise ValueError("Cannot convert list to numpy tensor on encode_plus()")
if batch_encoded_sequence is None:
raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus()")
if self.test_rust_tokenizer:
fast_tokenizer = self.get_rust_tokenizer()
encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="np")
batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus(
[sequence, sequence], return_tensors="np"
)
# This is currently here to make ruff happy !
if encoded_sequence_fast is None:
raise ValueError("Cannot convert list to numpy tensor on encode_plus() (fast)")
if batch_encoded_sequence_fast is None:
raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus() (fast)")
@require_torch
def test_prepare_seq2seq_batch(self):
if not self.test_seq2seq:
self.skipTest(reason="test_seq2seq is set to False")
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
batch = tokenizer.prepare_seq2seq_batch(
src_texts=src_text,
tgt_texts=tgt_text,
max_length=3,
max_target_length=10,
return_tensors="pt",
src_lang="en_XX", # this should be ignored (for all but mbart) but not cause an error
)
except NotImplementedError:
self.skipTest(reason="Encountered NotImplementedError calling prepare_seq2seq_batch")
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# max_target_length will default to max_length if not specified
batch = tokenizer.prepare_seq2seq_batch(
src_text, tgt_texts=tgt_text, max_length=3, return_tensors="pt"
)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
batch_encoder_only = tokenizer.prepare_seq2seq_batch(
src_texts=src_text, max_length=3, max_target_length=10, return_tensors="pt"
)
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only)
def test_is_fast(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
# Check is_fast is set correctly
self.assertTrue(tokenizer_r.is_fast)
if self.test_slow_tokenizer:
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
self.assertFalse(tokenizer_p.is_fast)
def test_fast_only_inputs(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
# Ensure None raise an error
self.assertRaises(TypeError, tokenizer_r.tokenize, None)
self.assertRaises(TypeError, tokenizer_r.encode, None)
self.assertRaises(TypeError, tokenizer_r.encode_plus, None)
self.assertRaises(TypeError, tokenizer_r.batch_encode_plus, None)
def test_alignement_methods(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
batch_size = 3
encoding = tokenizer_r.encode_plus(text, add_special_tokens=False)
batch_encoding = tokenizer_r.batch_encode_plus([text] * batch_size, add_special_tokens=False)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# words, tokens
self.assertEqual(len(encoding.words(0)), num_tokens)
self.assertEqual(max(encoding.words(0)), last_word_index)
self.assertEqual(min(encoding.words(0)), 0)
self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
# Assert token_to_word
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
# Assert word_to_tokens
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1)
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(
batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1
)
# Assert token_to_chars
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1)
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(
batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1
)
# Assert char_to_token
self.assertEqual(encoding.char_to_token(0), 0)
self.assertEqual(encoding.char_to_token(0, 0), 0)
self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)
# Assert char_to_word
self.assertEqual(encoding.char_to_word(0), 0)
self.assertEqual(encoding.char_to_word(0, 0), 0)
self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)
# Assert word_to_chars
self.assertEqual(encoding.word_to_chars(0).start, 0)
self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1)
self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(
batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1
)
# Assert token_to_sequence
self.assertEqual(encoding.token_to_sequence(num_tokens // 2), 0)
self.assertEqual(encoding.token_to_sequence(0, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(1, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(0, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, num_tokens // 2), 0)
# Pair of input sequences
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
pair_words = ["Amazing", "example", "full", "of", "inspiration"]
pair_text = " ".join(pair_words)
batch_size = 3
index_word_in_first_seq = words.index("inspiration")
index_word_in_pair_seq = pair_words.index("inspiration")
index_char_in_first_seq = text.find("inspiration")
index_char_in_pair_seq = pair_text.find("inspiration")
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=False)
pair_batch_encoding = tokenizer_r.batch_encode_plus(
[(text, pair_text)] * batch_size, add_special_tokens=False
)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# Assert word_to_tokens
self.assertNotEqual(
pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start,
pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
pair_encoding["input_ids"][
pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start
],
pair_encoding["input_ids"][
pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start
],
)
self.assertNotEqual(
pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start,
pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start
],
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start
],
)
# Assert char_to_token
self.assertNotEqual(
pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0),
pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)],
pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)],
)
self.assertNotEqual(
pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0),
pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0)
],
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1)
],
)
# Assert char_to_word
self.assertNotEqual(
pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0),
pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)],
pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)],
)
self.assertNotEqual(
pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0),
pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)],
pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)],
)
# Assert word_to_chars
self.assertNotEqual(
pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start,
pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start],
pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start],
)
self.assertNotEqual(
pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start,
pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start],
pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start],
)
# Assert token_to_sequence
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=True)
pair_sequence_ids = [
pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding["input_ids"]))
]
self.assertIn(0, pair_sequence_ids)
self.assertIn(1, pair_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_sequence_ids)
pair_batch_encoding = tokenizer_r.batch_encode_plus(
[(text, pair_text)] * batch_size, add_special_tokens=True
)
pair_batch_sequence_ids = [
pair_batch_encoding.token_to_sequence(1, i)
for i in range(len(pair_batch_encoding["input_ids"][0]))
]
self.assertIn(0, pair_batch_sequence_ids)
self.assertIn(1, pair_batch_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_batch_sequence_ids)
def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
# Ensure basic input match
input_p = tokenizer_p.encode_plus(self._data)
input_r = tokenizer_r.encode_plus(self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(self._data, self._data)
input_pairs_r = tokenizer_r.encode_plus(self._data, self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
# Ensure truncation match
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
# Ensure truncation with stride match
input_p = tokenizer_p.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_num_special_tokens_to_add_equal(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(
tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False)
)
self.assertEqual(
tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True)
)
def test_max_length_equal(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence)
self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair)
def test_special_tokens_map_equal(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
# sometimes the tokenizer saved online is not the same
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
# Assert the set of special tokens match.
self.assertSequenceEqual(
tokenizer_p.special_tokens_map.items(),
tokenizer_r.special_tokens_map.items(),
)
def test_add_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
vocab_size = len(tokenizer_r)
self.assertEqual(tokenizer_r.add_tokens(""), 0)
self.assertEqual(tokenizer_r.add_tokens("testoken"), 1)
self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 3)
self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2)
self.assertRaises(
AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"}
)
self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1)
self.assertEqual(
tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2
)
self.assertIn("<testtoken3>", tokenizer_r.special_tokens_map["additional_special_tokens"])
self.assertIsInstance(tokenizer_r.special_tokens_map["additional_special_tokens"], list)
self.assertGreaterEqual(len(tokenizer_r.special_tokens_map["additional_special_tokens"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 8)
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
text = "Wonderful no inspiration example with subtoken"
pair = "Along with an awesome pair"
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
tokens_with_offsets = tokenizer_r.encode_plus(
text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequence it can returns different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.get_rust_tokenizer(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
else:
self.skipTest(reason="No expected framework (PT) found")
if not tokenizer.pad_token or tokenizer.pad_token_id < 0:
self.skipTest(reason="This tokenizer has no padding token set, or pad_token_id < 0")
tokens = tokenizer.encode_plus(
"HuggingFace is solving NLP one commit at a time",
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
# Mono sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
# Multi sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time", "Very tiny input"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
def test_compare_pretokenized_inputs(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
if hasattr(tokenizer_p, "add_prefix_space") and not tokenizer_p.add_prefix_space:
continue # Too hard to test for now
# Input string
pretokenized_input_simple = "This is a sample input".split()
pretokenized_input_pair = "This is a sample pair".split()
# Test encode for pretokenized inputs
output_r = tokenizer_r.encode(
pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False
)
output_p = tokenizer_p.encode(
pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False
)
self.assertEqual(output_p, output_r)
kwargs = {
"is_split_into_words": True,
# "return_token_type_ids": True, # Use the defaults for each tokenizers
# "return_attention_mask": True, # Use the defaults for each tokenizers
"return_overflowing_tokens": False,
"return_special_tokens_mask": True,
"return_offsets_mapping": False, # Not implemented in python tokenizers
# "add_special_tokens": False,
}
batch_kwargs = {
"is_split_into_words": True,
# "return_token_type_ids": True, # Use the defaults for each tokenizers
# "return_attention_mask": True, # Use the defaults for each tokenizers
"return_overflowing_tokens": False,
"return_special_tokens_mask": True,
"return_offsets_mapping": False, # Not implemented in python tokenizers
# "add_special_tokens": False,
}
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs)
for key in output_p:
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch = ([pretokenized_input_simple] * 2) + [pretokenized_input_simple + pretokenized_input_pair]
output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs)
for key in output_p:
self.assertEqual(output_p[key], output_r[key])
# Test encode for pretokenized inputs pairs
output_r = tokenizer_r.encode(
pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True
)
output_p = tokenizer_p.encode(
pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True
)
self.assertEqual(output_p, output_r)
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
for key in output_p:
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch_pair = ([pretokenized_input_simple, pretokenized_input_pair] * 2) + [
pretokenized_input_simple + pretokenized_input_pair,
pretokenized_input_pair,
]
output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs)
for key in output_p:
self.assertEqual(output_p[key], output_r[key])
def test_create_token_type_ids(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
input_simple = [1, 2, 3]
input_pair = [1, 2, 3]
# Generate output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
# # Input string
# input_simple = tokenizer_p.tokenize("This is a sample input", add_special_tokens=False)
# input_pair = tokenizer_p.tokenize("This is a sample pair", add_special_tokens=False)
# # Generate output
# output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
# output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
# self.assertEqual(output_p, output_r)
# # Generate pair output
# output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
# output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
# self.assertEqual(output_p, output_r)
input_pairs = [
("", ""),
("", "This is a sample pair"),
("This is a sample input", ""),
("This is a sample input", "This is a sample pair"),
]
for sample_input, sample_pair in input_pairs:
# Input tokens id
input_simple = tokenizer_p.encode(sample_input, add_special_tokens=False)
input_pair = tokenizer_p.encode(sample_pair, add_special_tokens=False)
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_padding(self, max_length=50):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
# Encode - Simple input
input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode("This is a simple input", padding="longest")
input_p = tokenizer_p.encode("This is a simple input", padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode - Pair input
input_r = tokenizer_r.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True)
input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode_plus - Simple input
input_r = tokenizer_r.encode_plus(
"This is a simple input", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
input_r = tokenizer_r.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], padding="longest"
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], padding=True
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Batch_encode_plus - Pair input
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_p.encode_plus("This is a input 1")
input_p = tokenizer_p.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus("This is a input 1")
input_p = tokenizer_p.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_p.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_p.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_p.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_p.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
# Test padding nested empty lists (in some use-cases, there is no any token id in the `input_ids` list).
input_r = tokenizer_r.pad({"input_ids": [[], []]}, max_length=max_length, padding="max_length")
input_p = tokenizer_p.pad({"input_ids": [[], []]}, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
# rename encoded batch to "inputs"
input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
# Renaming `input_ids` to `inputs`
tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:]
tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:]
input_r = tokenizer_r.pad(input_r, padding="longest")
input_p = tokenizer_r.pad(input_p, padding="longest")
max_length = len(input_p["inputs"][0])
self.assert_batch_padded_input_match(
input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs"
)
def test_save_pretrained(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# make sure that all ".json" files are saved in the correct format
for file_path in tokenizer_r_files + tokenizer_p_files:
if os.path.exists(file_path) and file_path.endswith(".json"):
check_json_file_has_correct_format(file_path)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(tmpdirname2)
# Save tokenizer rust, legacy_format=True
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it save with the same files
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
# Save tokenizer rust, legacy_format=False
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(
sentence,
add_special_tokens=True,
)
tokens_p = tokenizer_p.encode_plus(
sentence,
add_special_tokens=True,
)
for key in tokens_p:
self.assertEqual(tokens_r[key], tokens_p[key])
if "token_type_ids" in tokens_r:
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
# pair_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=True)
for text in ["", " "]:
# tokenize()
no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
self.assertEqual(
len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add
)
# encode()
no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True)
self.assertEqual(
len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add
)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True)
for key in no_special_tokens:
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# # batch_encode_plus
no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True)
for key in no_special_tokens:
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
def test_compare_prepare_for_model(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
string_sequence = "Asserting that both tokenizers are equal"
python_output = tokenizer_p.prepare_for_model(
tokenizer_p.encode(string_sequence, add_special_tokens=False)
)
rust_output = tokenizer_r.prepare_for_model(
tokenizer_r.encode(string_sequence, add_special_tokens=False)
)
for key in python_output:
self.assertEqual(python_output[key], rust_output[key])
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.get_rust_tokenizer(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
r_output = tokenizer_r.encode("Hey this is a <special> token")
special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
# in rust fast, you lose the information of the AddedToken when initializing with `additional_special_tokens`
tokenizer_cr = self.get_rust_tokenizer(
pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True
)
tokenizer_p = self.get_tokenizer(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
p_output = tokenizer_p.encode("Hey this is a <special> token")
cr_output = tokenizer_cr.encode("Hey this is a <special> token")
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
# This test no longer support rust tokenizers, because the only file that should be looked
# at by the fast tokenizer with the new saving format is `tokenizer_config.json`.
# The previous behaviour is very strange too. Fast tokenizer should not save 3 files, but just one. Can never do slow from fast.
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
# only legacy save will check this
tokenizer_path = "tokenizer_config.json"
with open(os.path.join(tmp_dir, tokenizer_path), encoding="utf-8") as json_file:
tokenizer_config = json.load(json_file)
tokenizer_config["additional_special_tokens"] = ["an_additional_special_token"]
with open(os.path.join(tmp_dir, tokenizer_path), "w", encoding="utf-8") as outfile:
json.dump(tokenizer_config, outfile)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
# TODO ArthurZ ... Ok so for legacy we have to support this I guess..... (special_tokens_map + additional)
tokenizer_without_change_in_init = tokenizer_class.from_pretrained(tmp_dir)
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens
)
self.assertIn("an_additional_special_token", tokenizer_without_change_in_init.get_vocab())
self.assertEqual(
["an_additional_special_token"],
tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])
),
)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
new_added_tokens = [AddedToken("a_new_additional_special_token", lstrip=True)]
tokenizer = tokenizer_class.from_pretrained(
tmp_dir,
additional_special_tokens=new_added_tokens,
)
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens)
self.assertEqual(
["a_new_additional_special_token"],
tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])
),
)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."])
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "This is the first sentence"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens_extended,
new_tokenizer.all_special_tokens_extended,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove("additional_special_tokens")
special_tokens_map = {}
for token in special_tokens_list:
if getattr(tokenizer, token) is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, token) is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens_extended:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
special_token.content = new_special_token_str
self.assertTrue(
find,
f"'{special_token.__repr__()}' should appear as an `AddedToken` in the all_special_tokens_extended = "
f"{[k for k in new_tokenizer.all_special_tokens_extended if str(k) == new_special_token_str]} but it is missing"
", this means that the new tokenizers did not keep the `rstrip`, `lstrip`, `normalized` etc attributes.",
)
elif special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token.__repr__()}' should be in {new_tokenizer.all_special_tokens_extended}",
)
else:
# The special token must appear in the list of the new tokenizer as an object of type string.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended)
# Test we can use the new tokenizer with something not seen during training
inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."])
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "This is the first sentence"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_tokenizer_mismatch_warning(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
with self.assertLogs("transformers", level="WARNING") as cm:
try:
if self.tokenizer_class == BertTokenizer:
AlbertTokenizer.from_pretrained(pretrained_name)
else:
BertTokenizer.from_pretrained(pretrained_name)
except OSError as e:
# Some tokenizer will raised an error before reaching the logged warning because there are no
# corresponding files to load
error_message = str(e)
except (TypeError, AttributeError):
# Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned,
# here we just check that the warning has been logged before the error is raised
pass
finally:
logged_msg_target = (
"The tokenizer class you load from this checkpoint is not the same type as the class "
"this function is called from."
)
raised_error_msg_target = "Can't load tokenizer for"
self.assertTrue(
cm.records[0].message.startswith(logged_msg_target)
if len(cm.records) > 0
else False or raised_error_msg_target in error_message
)
try:
if self.rust_tokenizer_class == BertTokenizerFast:
AlbertTokenizerFast.from_pretrained(pretrained_name)
else:
BertTokenizerFast.from_pretrained(pretrained_name)
except (TypeError, AttributeError):
# Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned,
# here we just check that the warning has been logged before the error is raised
pass
finally:
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class"
" this function is called from."
)
)
@require_torch
def test_saving_tokenizer_trainer(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
with tempfile.TemporaryDirectory() as tmp_dir:
# Save the fast tokenizer files in a temporary directory
tokenizer_old = self.get_rust_tokenizer(pretrained_name, **kwargs, use_fast=True)
tokenizer_old.save_pretrained(tmp_dir, legacy_format=False) # save only fast version
# Initialize toy model for the trainer
model = nn.Module()
# Load tokenizer from a folder without legacy files
tokenizer = self.rust_tokenizer_class.from_pretrained(tmp_dir)
training_args = TrainingArguments(output_dir=tmp_dir, do_train=True, no_cuda=True)
trainer = Trainer(model=model, args=training_args, processing_class=tokenizer)
# Should not raise an error
trainer.save_model(os.path.join(tmp_dir, "checkpoint"))
self.assertIn("tokenizer.json", os.listdir(os.path.join(tmp_dir, "checkpoint")))
def test_convert_tokens_to_string_format(self):
tokenizers = self.get_tokenizers(fast=True, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokens = ["this", "is", "a", "test"]
string = tokenizer.convert_tokens_to_string(tokens)
self.assertIsInstance(string, str)
def test_save_slow_from_fast_and_reload_fast(self):
if not self.test_slow_tokenizer or not self.test_rust_tokenizer:
# we need both slow and fast versions
self.skipTest(reason="test_rust_tokenizer or test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
with tempfile.TemporaryDirectory() as tmp_dir_1:
# Here we check that even if we have initialized a fast tokenizer with a tokenizer_file we can
# still save only the slow version and use these saved files to rebuild a tokenizer
tokenizer_fast_old_1 = self.get_rust_tokenizer(pretrained_name, **kwargs, use_fast=True)
tokenizer_file = os.path.join(tmp_dir_1, "tokenizer.json")
tokenizer_fast_old_1.backend_tokenizer.save(tokenizer_file)
tokenizer_fast_old_2 = self.get_rust_tokenizer(
pretrained_name, **kwargs, use_fast=True, tokenizer_file=tokenizer_file
)
tokenizer_fast_old_2.save_pretrained(tmp_dir_1, legacy_format=True) # save only slow version
tokenizer_slow = self.tokenizer_class.from_pretrained(tmp_dir_1)
with tempfile.TemporaryDirectory() as tmp_dir_2:
tokenizer_slow.save_pretrained(tmp_dir_2)
# Should not raise an error
self.rust_tokenizer_class.from_pretrained(tmp_dir_2)
def test_split_special_tokens(self):
if not self.test_slow_tokenizer:
self.skipTest(reason="test_slow_tokenizer is set to False")
# Tests the expected appearance (or absence) of special token in encoded output,
# explicit values are not tested because tokenization is model dependent and can change
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
special_token = "<my_new_token>"
special_sentence = f"Hey this is a {special_token} token"
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_rust = self.get_rust_tokenizer(
pretrained_name, additional_special_tokens=[special_token], split_special_tokens=True, **kwargs
)
tokenizer_py = self.get_tokenizer(
pretrained_name, additional_special_tokens=[special_token], split_special_tokens=True, **kwargs
)
special_token_id = tokenizer_py.convert_tokens_to_ids(special_token)
encoded_special_token_unsplit = tokenizer_py.encode(
special_token, add_special_tokens=False, split_special_tokens=False
)
self.assertTrue(special_token_id in encoded_special_token_unsplit)
encoded_special_token_split = tokenizer_py.encode(special_token, add_special_tokens=False)
self.assertTrue(special_token_id not in encoded_special_token_split)
py_tokens_output = tokenizer_py.tokenize(special_sentence)
rust_tokens_output = tokenizer_rust.tokenize(special_sentence)
self.assertTrue(special_token not in py_tokens_output)
self.assertTrue(special_token not in rust_tokens_output)
py_tokens_output_unsplit = tokenizer_py.tokenize(special_sentence, split_special_tokens=False)
rust_tokens_output_unsplit = tokenizer_rust.tokenize(special_sentence, split_special_tokens=False)
self.assertTrue(special_token in py_tokens_output_unsplit)
self.assertTrue(special_token in rust_tokens_output_unsplit)
py_tokens_output = tokenizer_py(special_sentence)
rust_tokens_output = tokenizer_rust(special_sentence)
self.assertTrue(special_token_id not in py_tokens_output)
self.assertTrue(special_token_id not in rust_tokens_output)
tmp_dir = tempfile.mkdtemp()
try:
tokenizer_py.save_pretrained(tmp_dir)
fast_from_saved = self.tokenizer_class.from_pretrained(tmp_dir)
finally:
shutil.rmtree(tmp_dir)
output_tokens_reloaded_split = fast_from_saved.tokenize(special_sentence)
self.assertTrue(special_token not in output_tokens_reloaded_split)
output_tokens_reloaded_unsplit = fast_from_saved.tokenize(special_sentence, split_special_tokens=False)
self.assertTrue(special_token in output_tokens_reloaded_unsplit)
def test_added_tokens_serialization(self):
# Utility to test the added vocab
def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir):
tokenizer = tokenizer_class.from_pretrained(temp_dir)
self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens)
self.assertIn(new_eos, tokenizer.added_tokens_decoder.values())
self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos)
self.assertTrue(all(item in tokenizer.added_tokens_decoder.items() for item in expected.items()))
return tokenizer
new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False, special=True)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
# Load a slow tokenizer from the hub, init with the new token for fast to also include it
tokenizer = self.get_tokenizer(pretrained_name, eos_token=new_eos)
EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder
with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"):
self.assertEqual(tokenizer._special_tokens_map["eos_token"], new_eos)
self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values()))
with tempfile.TemporaryDirectory() as tmp_dir_2:
tokenizer.save_pretrained(tmp_dir_2)
with self.subTest(
"Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class"
):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2
)
if self.rust_tokenizer_class is not None:
with self.subTest(
"Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class"
):
tokenizer_fast = _test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2
)
with tempfile.TemporaryDirectory() as tmp_dir_3:
tokenizer_fast.save_pretrained(tmp_dir_3)
with self.subTest(
"Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class"
):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
)
with self.subTest(
"Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class"
):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
)
with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"):
if self.rust_tokenizer_class is not None:
tokenizer_fast = self.get_rust_tokenizer(pretrained_name, eos_token=new_eos)
self.assertEqual(tokenizer_fast._special_tokens_map["eos_token"], new_eos)
self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values()))
# We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright
with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
# Fast tokenizer may have user_defined_symbols and control_symbols added, unlike slow
self.assertTrue(
all(
item in tokenizer.added_tokens_decoder.items()
for item in EXPECTED_ADDED_TOKENS_DECODER.items()
)
)
EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder
with tempfile.TemporaryDirectory() as tmp_dir_4:
tokenizer_fast.save_pretrained(tmp_dir_4)
with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4
)
with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4
)
def test_special_token_addition(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
# Create tokenizer and add an additional special token
tokenizer_1 = tokenizer.from_pretrained(pretrained_name)
tokenizer_1.add_special_tokens({"additional_special_tokens": ["<tok>"]})
self.assertEqual(tokenizer_1.additional_special_tokens, ["<tok>"])
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_1.save_pretrained(tmp_dir)
# Load the above tokenizer and add the same special token a second time
tokenizer_2 = tokenizer.from_pretrained(pretrained_name)
tokenizer_2.add_special_tokens({"additional_special_tokens": ["<tok>"]})
self.assertEqual(tokenizer_2.additional_special_tokens, ["<tok>"])
tokenizer_2.add_special_tokens({"additional_special_tokens": ["<tok>", "<other>"]})
self.assertEqual(tokenizer_2.additional_special_tokens, ["<tok>", "<other>"])
tokenizer_2.add_special_tokens({"additional_special_tokens": ["<other>", "<another>"]})
self.assertEqual(tokenizer_2.additional_special_tokens, ["<other>", "<another>"])
tokenizer_2.add_special_tokens(
{"additional_special_tokens": ["<tok>"]},
replace_additional_special_tokens=False,
)
self.assertEqual(tokenizer_2.additional_special_tokens, ["<other>", "<another>", "<tok>"])
def test_tokenizer_initialization_with_conflicting_key(self):
get_tokenizer_func = self.get_rust_tokenizer if self.test_rust_tokenizer else self.get_tokenizer
with self.assertRaises(AttributeError, msg="conflicts with the method"):
get_tokenizer_func(add_special_tokens=True)
with self.assertRaises(AttributeError, msg="conflicts with the method"):
get_tokenizer_func(get_vocab=True)
@parameterized.expand([(True,), (False,)])
def test_rust_tokenizer_add_prefix_space(self, add_prefix_space):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
for tokenizer, pretrained_name, _ in self.tokenizers_list:
fast_tokenizer = tokenizer.from_pretrained(pretrained_name, add_prefix_space=add_prefix_space)
self.assertEqual(fast_tokenizer.add_prefix_space, add_prefix_space)
# Only the ByteLevel pre-tokenizer has the `add_prefix_space` attribute, we have to ensure that it's set correctly
if hasattr(fast_tokenizer.backend_tokenizer.pre_tokenizer, "add_prefix_space"):
self.assertEqual(fast_tokenizer.backend_tokenizer.pre_tokenizer.add_prefix_space, add_prefix_space)
| transformers/tests/test_tokenization_common.py/0 | {
"file_path": "transformers/tests/test_tokenization_common.py",
"repo_id": "transformers",
"token_count": 121216
} | 528 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This test is meant to be run in on an instance with TPUs like this:
#
# python examples/pytorch/xla_spawn.py --num_cores=8 tests/test_trainer_tpu.py
#
# Replace 8 with the number of TPU cores you have.
#
import sys
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.utils import logging
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class DummyDataset(Dataset):
def __init__(self, length: int = 101):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
return i
class DummyDataCollator:
def __call__(self, features):
return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
self.fc = nn.Linear(120, 80)
def forward(self, input_ids, labels=None):
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
def main():
parser = HfArgumentParser((TrainingArguments,))
sys.argv += ["--output_dir", "./examples"]
training_args = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, "
f"tpu_num_cores: {training_args.tpu_num_cores}",
)
# Essentially, what we want to verify in the distributed case is
# that we get all samples back, in the right order.
# (this is crucial for prediction for instance)
for dataset_length in [1001, 256, 15]:
dataset = DummyDataset(dataset_length)
def compute_metrics(p: EvalPrediction) -> dict:
sequential = list(range(len(dataset)))
success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
return {"success": success}
trainer = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = 2
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = None
logger.info("🔥 All distributed tests successful")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| transformers/tests/trainer/test_trainer_tpu.py/0 | {
"file_path": "transformers/tests/trainer/test_trainer_tpu.py",
"repo_id": "transformers",
"token_count": 1644
} | 529 |
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from unittest.mock import patch
from transformers.testing_utils import CaptureStd, require_torch
class CLITest(unittest.TestCase):
@patch("sys.argv", ["fakeprogrampath", "env"])
def test_cli_env(self):
# test transformers env
import transformers.commands.transformers_cli
with CaptureStd() as cs:
transformers.commands.transformers_cli.main()
self.assertIn("Python version", cs.out)
self.assertIn("Platform", cs.out)
self.assertIn("Using distributed or parallel set-up in script?", cs.out)
@require_torch
@patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"])
def test_cli_download(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--tiny-random-gptj
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots"))
@require_torch
@patch(
"sys.argv",
[
"fakeprogrampath",
"download",
"hf-internal-testing/test_dynamic_model_with_tokenizer",
"--trust-remote-code",
"--cache-dir",
"/tmp",
],
)
def test_cli_download_trust_remote(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs"))
self.assertTrue(
os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots")
)
| transformers/tests/utils/test_cli.py/0 | {
"file_path": "transformers/tests/utils/test_cli.py",
"repo_id": "transformers",
"token_count": 1241
} | 530 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class HfArgumentParserTest(unittest.TestCase):
def test_set_level(self):
logger = logging.get_logger()
# the current default level is logging.WARNING
level_origin = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
# restore to the original level
logging.set_verbosity(level_origin)
def test_integration(self):
level_origin = logging.get_verbosity()
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
msg = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, msg + "\n")
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, "")
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, msg + "\n")
# restore to the original level
logging.set_verbosity(level_origin)
@mockenv(TRANSFORMERS_VERBOSITY="error")
def test_env_override(self):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_ = logging.get_logger("transformers.models.bart.tokenization_bart")
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
env_level = logging.log_levels[env_level_str]
current_level = logging.get_verbosity()
self.assertEqual(
env_level,
current_level,
f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}",
)
# restore to the original level
os.environ["TRANSFORMERS_VERBOSITY"] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error")
def test_env_invalid_override(self):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
logger = logging.logging.getLogger()
with CaptureLogger(logger) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart")
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out)
# no need to restore as nothing was changed
def test_advisory_warnings(self):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
msg = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1"):
# nothing should be logged as env var disables this method
with CaptureLogger(logger) as cl:
logger.warning_advice(msg)
self.assertEqual(cl.out, "")
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(logger) as cl:
logger.warning_advice(msg)
self.assertEqual(cl.out, msg + "\n")
def test_set_progress_bar_enabled():
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| transformers/tests/utils/test_logging.py/0 | {
"file_path": "transformers/tests/utils/test_logging.py",
"repo_id": "transformers",
"token_count": 2007
} | 531 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
FILES_TO_FIND = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/falcon_mamba/selective_scan_with_ln_interface.py",
"kernels/falcon_mamba/__init__.py",
"kernels/__init__.py",
"models/graphormer/algos_graphormer.pyx",
]
def test_custom_files_are_present(transformers_path):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
args = parser.parse_args()
if args.check_lib:
transformers_module = importlib.import_module("transformers")
transformers_path = Path(transformers_module.__file__).parent
else:
transformers_path = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| transformers/utils/check_build.py/0 | {
"file_path": "transformers/utils/check_build.py",
"repo_id": "transformers",
"token_count": 619
} | 532 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def normalize_test_line(line):
line = line.strip()
# Normalize SKIPPED/XFAIL/etc with path:line and reason
match = re.match(r"^(SKIPPED|XFAIL|XPASS|EXPECTEDFAIL)\s+\[?\d*\]?\s*(\S+:\d+)", line)
if match:
status, location = match.groups()
return f"{status} {location}"
# Normalize ERROR/FAILED lines with optional message
if line.startswith("ERROR") or line.startswith("FAILED"):
return re.split(r"\s+-\s+", line)[0].strip()
return line
def parse_summary_file(file_path):
test_set = set()
with open(file_path, "r", encoding="utf-8") as f:
in_summary = False
for line in f:
if line.strip().startswith("==="):
in_summary = not in_summary
continue
if in_summary:
stripped = line.strip()
if stripped:
normalized = normalize_test_line(stripped)
test_set.add(normalized)
return test_set
def compare_job_sets(job_set1, job_set2):
all_job_names = sorted(set(job_set1) | set(job_set2))
report_lines = []
for job_name in all_job_names:
file1 = job_set1.get(job_name)
file2 = job_set2.get(job_name)
tests1 = parse_summary_file(file1) if file1 else set()
tests2 = parse_summary_file(file2) if file2 else set()
added = tests2 - tests1
removed = tests1 - tests2
if added or removed:
report_lines.append(f"=== Diff for job: {job_name} ===")
if removed:
report_lines.append("--- Absent in current run:")
for test in sorted(removed):
report_lines.append(f" - {test}")
if added:
report_lines.append("+++ Appeared in current run:")
for test in sorted(added):
report_lines.append(f" + {test}")
report_lines.append("") # blank line
return "\n".join(report_lines) if report_lines else "No differences found."
# Example usage:
# job_set_1 = {
# "albert": "prev/multi-gpu_run_models_gpu_models/albert_test_reports/summary_short.txt",
# "bloom": "prev/multi-gpu_run_models_gpu_models/bloom_test_reports/summary_short.txt",
# }
# job_set_2 = {
# "albert": "curr/multi-gpu_run_models_gpu_models/albert_test_reports/summary_short.txt",
# "bloom": "curr/multi-gpu_run_models_gpu_models/bloom_test_reports/summary_short.txt",
# }
# report = compare_job_sets(job_set_1, job_set_2)
# print(report)
| transformers/utils/compare_test_runs.py/0 | {
"file_path": "transformers/utils/compare_test_runs.py",
"repo_id": "transformers",
"token_count": 1348
} | 533 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that updates the metadata of the Transformers library in the repository `huggingface/transformers-metadata`.
Usage for an update (as used by the GitHub action `update_metadata`):
```bash
python utils/update_metadata.py --token <token> --commit_sha <commit_sha>
```
Usage to check all pipelines are properly defined in the constant `PIPELINE_TAGS_AND_AUTO_MODELS` of this script, so
that new pipelines are properly added as metadata (as used in `make repo-consistency`):
```bash
python utils/update_metadata.py --check-only
```
"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
TRANSFORMERS_PATH = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers_module = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration|ForRetrieval)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
PIPELINE_TAGS_AND_AUTO_MODELS = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("image-feature-extraction", "MODEL_FOR_IMAGE_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("image-text-to-text", "MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES", "AutoModelForImageTextToText"),
("image-to-image", "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES", "AutoModelForImageToImage"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
("text-to-audio", "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES", "AutoModelForTextToSpectrogram"),
("text-to-audio", "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES", "AutoModelForTextToWaveform"),
]
def camel_case_split(identifier: str) -> list[str]:
"""
Split a camel-cased name into words.
Args:
identifier (`str`): The camel-cased name to parse.
Returns:
`List[str]`: The list of words in the identifier (as separated by capital letters).
Example:
```py
>>> camel_case_split("CamelCasedClass")
["Camel", "Cased", "Class"]
```
"""
# Regex thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier)
return [m.group(0) for m in matches]
def get_frameworks_table() -> pd.DataFrame:
"""
Generates a dataframe containing the supported auto classes for each model type, using the content of the auto
modules.
"""
# Dictionary model names to config.
config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
model_prefix_to_model_type = {
config.replace("Config", ""): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
pt_models = collections.defaultdict(bool)
tf_models = collections.defaultdict(bool)
flax_models = collections.defaultdict(bool)
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(transformers_module):
lookup_dict = None
if _re_tf_models.match(attr_name) is not None:
lookup_dict = tf_models
attr_name = _re_tf_models.match(attr_name).groups()[0]
elif _re_flax_models.match(attr_name) is not None:
lookup_dict = flax_models
attr_name = _re_flax_models.match(attr_name).groups()[0]
elif _re_pt_models.match(attr_name) is not None:
lookup_dict = pt_models
attr_name = _re_pt_models.match(attr_name).groups()[0]
if lookup_dict is not None:
while len(attr_name) > 0:
if attr_name in model_prefix_to_model_type:
lookup_dict[model_prefix_to_model_type[attr_name]] = True
break
# Try again after removing the last word in the name
attr_name = "".join(camel_case_split(attr_name)[:-1])
all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys()))
all_models = list(all_models)
all_models.sort()
data = {"model_type": all_models}
data["pytorch"] = [pt_models[t] for t in all_models]
data["tensorflow"] = [tf_models[t] for t in all_models]
data["flax"] = [flax_models[t] for t in all_models]
# Now let's find the right processing class for each model. In order we check if there is a Processor, then a
# Tokenizer, then a FeatureExtractor, then an ImageProcessor
processors = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
processors[t] = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
processors[t] = "AutoTokenizer"
elif t in transformers_module.models.auto.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES:
processors[t] = "AutoImageProcessor"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
processors[t] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
processors[t] = "AutoTokenizer"
data["processor"] = [processors[t] for t in all_models]
return pd.DataFrame(data)
def update_pipeline_and_auto_class_table(table: dict[str, tuple[str, str]]) -> dict[str, tuple[str, str]]:
"""
Update the table mapping models to pipelines and auto classes without removing old keys if they don't exist anymore.
Args:
table (`Dict[str, Tuple[str, str]]`):
The existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with
which they should be used.
Returns:
`Dict[str, Tuple[str, str]]`: The updated table in the same format.
"""
auto_modules = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"]
auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings):
# The type of pipeline may not exist in this framework
if not hasattr(module, mapping):
continue
# First extract all model_names
model_names = []
for name in getattr(module, mapping).values():
if isinstance(name, str):
model_names.append(name)
else:
model_names.extend(list(name))
# Add pipeline tag and auto model class for those models
table.update(dict.fromkeys(model_names, (pipeline_tag, cls)))
return table
def update_metadata(token: str, commit_sha: str):
"""
Update the metadata for the Transformers repo in `huggingface/transformers-metadata`.
Args:
token (`str`): A valid token giving write access to `huggingface/transformers-metadata`.
commit_sha (`str`): The commit SHA on Transformers corresponding to this update.
"""
frameworks_table = get_frameworks_table()
frameworks_dataset = Dataset.from_pandas(frameworks_table)
resolved_tags_file = hf_hub_download(
"huggingface/transformers-metadata", "pipeline_tags.json", repo_type="dataset", token=token
)
tags_dataset = Dataset.from_json(resolved_tags_file)
table = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(tags_dataset))
}
table = update_pipeline_and_auto_class_table(table)
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
model_classes = sorted(table.keys())
tags_table = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
}
)
tags_dataset = Dataset.from_pandas(tags_table)
hub_frameworks_json = hf_hub_download(
repo_id="huggingface/transformers-metadata",
filename="frameworks.json",
repo_type="dataset",
token=token,
)
with open(hub_frameworks_json) as f:
hub_frameworks_json = f.read()
hub_pipeline_tags_json = hf_hub_download(
repo_id="huggingface/transformers-metadata",
filename="pipeline_tags.json",
repo_type="dataset",
token=token,
)
with open(hub_pipeline_tags_json) as f:
hub_pipeline_tags_json = f.read()
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(tmp_dir, "frameworks.json"))
tags_dataset.to_json(os.path.join(tmp_dir, "pipeline_tags.json"))
with open(os.path.join(tmp_dir, "frameworks.json")) as f:
frameworks_json = f.read()
with open(os.path.join(tmp_dir, "pipeline_tags.json")) as f:
pipeline_tags_json = f.read()
frameworks_equal = hub_frameworks_json == frameworks_json
hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json
if frameworks_equal and hub_pipeline_tags_equal:
print("No updates on the Hub, not pushing the metadata files.")
return
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
commit_message = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata",
folder_path=tmp_dir,
repo_type="dataset",
token=token,
commit_message=commit_message,
)
def check_pipeline_tags():
"""
Check all pipeline tags are properly defined in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant of this script.
"""
in_table = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
pipeline_tasks = transformers_module.pipelines.SUPPORTED_TASKS
missing = []
for key in pipeline_tasks:
if key not in in_table:
model = pipeline_tasks[key]["pt"]
if isinstance(model, (list, tuple)):
model = model[0]
model = model.__name__
if model not in in_table.values():
missing.append(key)
if len(missing) > 0:
msg = ", ".join(missing)
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"`utils/update_metadata.py`: {msg}. Please add them!"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
args = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| transformers/utils/update_metadata.py/0 | {
"file_path": "transformers/utils/update_metadata.py",
"repo_id": "transformers",
"token_count": 6424
} | 534 |
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.10
hooks:
- id: ruff-check
types_or: [ python, pyi ]
args: [ --fix ]
- id: ruff-format
types_or: [ python, pyi ]
# - repo: https://github.com/codespell-project/codespell
# rev: v2.1.0
# hooks:
# - id: codespell
# args:
# - --ignore-words-list=nd,reacher,thist,ths,magent,ba
# - --skip=docs/css/termynal.css,docs/js/termynal.js
| trl/.pre-commit-config.yaml/0 | {
"file_path": "trl/.pre-commit-config.yaml",
"repo_id": "trl",
"token_count": 250
} | 535 |
# Callbacks
## SyncRefModelCallback
[[autodoc]] SyncRefModelCallback
## RichProgressCallback
[[autodoc]] RichProgressCallback
## WinRateCallback
[[autodoc]] WinRateCallback
## LogCompletionsCallback
[[autodoc]] LogCompletionsCallback
## MergeModelCallback
[[autodoc]] MergeModelCallback
## BEMACallback
[[autodoc]] BEMACallback
| trl/docs/source/callbacks.md/0 | {
"file_path": "trl/docs/source/callbacks.md",
"repo_id": "trl",
"token_count": 110
} | 536 |
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl_banner_dark.png">
</div>
# TRL - Transformer Reinforcement Learning
TRL is a full stack library where we provide a set of tools to train transformer language models with methods like Supervised Fine-Tuning (SFT), Group Relative Policy Optimization (GRPO), Direct Preference Optimization (DPO), Reward Modeling, and more.
The library is integrated with 🤗 [transformers](https://github.com/huggingface/transformers).
## 🎉 What's New
**✨ OpenAI GPT OSS Support**: TRL now fully supports fine-tuning the latest [OpenAI GPT OSS models](https://huggingface.co/collections/openai/gpt-oss-68911959590a1634ba11c7a4)! Check out the:
- [OpenAI Cookbook](https://cookbook.openai.com/articles/gpt-oss/fine-tune-transfomers)
- [GPT OSS recipes](https://github.com/huggingface/gpt-oss-recipes)
- [Our example script](https://github.com/huggingface/trl/blob/main/examples/scripts/sft_gpt_oss.py)
You can also explore TRL-related models, datasets, and demos in the [TRL Hugging Face organization](https://huggingface.co/trl-lib).
## Learn
Learn post-training with TRL and other libraries in 🤗 [smol course](https://github.com/huggingface/smol-course).
## Contents
The documentation is organized into the following sections:
- **Getting Started**: installation and quickstart guide.
- **Conceptual Guides**: dataset formats, training FAQ, and understanding logs.
- **How-to Guides**: reducing memory usage, speeding up training, distributing training, etc.
- **Integrations**: DeepSpeed, Liger Kernel, PEFT, etc.
- **Examples**: example overview, community tutorials, etc.
- **API**: trainers, utils, etc.
## Blog posts
<div class="mt-10">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/trl-vlm-alignment">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/trl_vlm/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on August 7, 2025</p>
<p class="text-gray-700">Vision Language Model Alignment in TRL ⚡️</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/vllm-colocate">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/vllm-colocate/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on June 3, 2025</p>
<p class="text-gray-700">NO GPU left behind: Unlocking Efficiency with Co-located vLLM in TRL</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/liger-grpo">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/liger-grpo/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on May 25, 2025</p>
<p class="text-gray-700">🐯 Liger GRPO meets TRL</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/open-r1">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/open-r1/thumbnails.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on January 28, 2025</p>
<p class="text-gray-700">Open-R1: a fully open reproduction of DeepSeek-R1</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/dpo_vlm">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/dpo_vlm/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on July 10, 2024</p>
<p class="text-gray-700">Preference Optimization for Vision Language Models with TRL</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/putting_rl_back_in_rlhf_with_rloo">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/putting_rl_back_in_rlhf_with_rloo/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on June 12, 2024</p>
<p class="text-gray-700">Putting RL back in RLHF</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/trl-ddpo">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/166_trl_ddpo/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on September 29, 2023</p>
<p class="text-gray-700">Finetune Stable Diffusion Models with DDPO via TRL</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/dpo-trl">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/157_dpo_trl/dpo_thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on August 8, 2023</p>
<p class="text-gray-700">Fine-tune Llama 2 with DPO</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/stackllama">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/138_stackllama/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on April 5, 2023</p>
<p class="text-gray-700">StackLLaMA: A hands-on guide to train LLaMA with RLHF</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/trl-peft">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/133_trl_peft/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on March 9, 2023</p>
<p class="text-gray-700">Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/rlhf">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/120_rlhf/thumbnail.png" alt="thumbnail" class="mt-0">
<p class="text-gray-500 text-sm">Published on December 9, 2022</p>
<p class="text-gray-700">Illustrating Reinforcement Learning from Human Feedback</p>
</a>
</div>
</div>
| trl/docs/source/index.md/0 | {
"file_path": "trl/docs/source/index.md",
"repo_id": "trl",
"token_count": 2548
} | 537 |
# Examples
Please check out https://huggingface.co/docs/trl/example_overview for documentation on our examples. | trl/examples/README.md/0 | {
"file_path": "trl/examples/README.md",
"repo_id": "trl",
"token_count": 30
} | 538 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
from huggingface_hub import ModelCard
from transformers import HfArgumentParser
@dataclass
class ScriptArguments:
r"""
Arguments for the script.
Args:
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the dataset to the Hugging Face Hub.
repo_id (`str`, *optional*, defaults to `"trl-lib/tldr"`):
Hugging Face repository ID to push the dataset to.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of workers to use for dataset processing.
"""
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the dataset to the Hugging Face Hub."},
)
repo_id: str = field(
default="trl-lib/tldr",
metadata={"help": "Hugging Face repository ID to push the dataset to."},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of workers to use for dataset processing."},
)
def to_prompt_completion(example):
tldr_format_str = "SUBREDDIT: r/{subreddit}\n\nTITLE: {title}\n\nPOST: {post}\n\nTL;DR:"
prompt = tldr_format_str.format(subreddit=example["subreddit"], title=example["title"], post=example["post"])
completion = " " + example["summary"] # Add a space to separate the prompt from the completion
return {"prompt": prompt, "completion": completion}
model_card = ModelCard("""
---
tags: [trl]
---
# TL;DR Dataset
## Summary
The TL;DR dataset is a processed version of Reddit posts, specifically curated to train models using the [TRL library](https://github.com/huggingface/trl) for summarization tasks. It leverages the common practice on Reddit where users append "TL;DR" (Too Long; Didn't Read) summaries to lengthy posts, providing a rich source of paired text data for training summarization models.
## Data Structure
- **Format**: [Standard](https://huggingface.co/docs/trl/main/dataset_formats#standard)
- **Type**: [Prompt-completion](https://huggingface.co/docs/trl/main/dataset_formats#prompt-completion)
Columns:
- `"prompt"`: The unabridged Reddit post.
- `"completion"`: The concise "TL;DR" summary appended by the author.
This structure enables models to learn the relationship between detailed content and its abbreviated form, enhancing their summarization capabilities.
## Generation script
The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/tldr.py).
""")
if __name__ == "__main__":
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
# Filtered reddit TL;DR dataset from https://github.com/openai/summarize-from-feedback?tab=readme-ov-file#reddit-tldr-dataset
data_files = {
"train": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/train.jsonl",
"validation": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/valid.jsonl",
"test": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/test.jsonl",
}
dataset = load_dataset("json", data_files=data_files)
dataset = dataset.map(
to_prompt_completion,
num_proc=script_args.dataset_num_proc,
remove_columns=["id", "subreddit", "title", "post", "summary"],
)
if script_args.push_to_hub:
dataset.push_to_hub(script_args.repo_id)
model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
| trl/examples/datasets/tldr.py/0 | {
"file_path": "trl/examples/datasets/tldr.py",
"repo_id": "trl",
"token_count": 1504
} | 539 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional, Union
import evaluate
import numpy as np
import torch
import torch.nn as nn
from datasets import load_dataset
from peft import LoraConfig, TaskType, get_peft_model
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizerBase,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
from transformers.utils import PaddingStrategy
# Define and parse arguments.
@dataclass
class ScriptArguments:
"""
These arguments vary depending on how many GPUs you have, what their capacity and features are, and what size model you want to train.
"""
local_rank: Optional[int] = field(default=-1, metadata={"help": "Used for multi-gpu"})
resume_from_checkpoint: Optional[bool] = field(
default=False,
metadata={"help": "If you want to resume training where it left off."},
)
deepspeed: Optional[str] = field(
default=None,
metadata={
"help": "Path to deepspeed config if using deepspeed. You may need this if the model that you want to train doesn't fit on a single GPU."
},
)
per_device_train_batch_size: Optional[int] = field(default=4)
per_device_eval_batch_size: Optional[int] = field(default=1)
gradient_accumulation_steps: Optional[int] = field(default=1)
learning_rate: Optional[float] = field(default=2e-5)
weight_decay: Optional[float] = field(default=0.001)
model_name: Optional[str] = field(
default="gpt2",
metadata={
"help": "The model that you want to train from the Hugging Face hub. E.g. gpt2, gpt2-xl, bert, etc."
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "The tokenizer for your model, if left empty will use the default for your model",
},
)
bf16: Optional[bool] = field(
default=True,
metadata={
"help": "This essentially cuts the training time in half if you want to sacrifice a little precision and have a supported GPU."
},
)
num_train_epochs: Optional[int] = field(
default=1,
metadata={"help": "The number of training epochs for the reward model."},
)
train_subset: Optional[int] = field(
default=100000,
metadata={"help": "The size of the subset of the training data to use"},
)
eval_subset: Optional[int] = field(
default=50000,
metadata={"help": "The size of the subset of the eval data to use"},
)
gradient_checkpointing: Optional[bool] = field(
default=False,
metadata={"help": "Enables gradient checkpointing."},
)
optim: Optional[str] = field(
default="adamw_hf",
metadata={"help": "The optimizer to use."},
)
lr_scheduler_type: Optional[str] = field(
default="linear",
metadata={"help": "The lr scheduler"},
)
max_length: Optional[int] = field(default=512)
eval_first_step: Optional[bool] = field(
default=False,
metadata={"help": "Whether to run eval after the first step"},
)
seed: Optional[int] = field(
default=0, metadata={"help": "Random seed that will be set at the beginning of training."}
)
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
set_seed(script_args.seed)
# Load the human stack-exchange-paired dataset for tuning the reward model.
train_dataset = load_dataset(
"lvwerra/stack-exchange-paired", data_dir="data/reward", split="train", verification_mode="no_checks"
)
if script_args.train_subset > 0:
train_dataset = train_dataset.select(range(script_args.train_subset))
eval_dataset = load_dataset(
"lvwerra/stack-exchange-paired", data_dir="data/evaluation", split="train", verification_mode="no_checks"
)
if script_args.eval_subset > 0:
eval_dataset = eval_dataset.select(range(script_args.eval_subset))
# Define the training args. Needs to be done before the model is loaded if you are using deepspeed.
model_name_split = script_args.model_name.split("/")[-1]
output_name = (
f"{model_name_split}_peft_stack-exchange-paired_rmts__{script_args.train_subset}_{script_args.learning_rate}"
)
training_args = TrainingArguments(
output_dir=output_name,
learning_rate=script_args.learning_rate,
per_device_train_batch_size=script_args.per_device_train_batch_size,
per_device_eval_batch_size=script_args.per_device_eval_batch_size,
num_train_epochs=script_args.num_train_epochs,
weight_decay=script_args.weight_decay,
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
gradient_accumulation_steps=script_args.gradient_accumulation_steps,
gradient_checkpointing=script_args.gradient_checkpointing,
deepspeed=script_args.deepspeed,
local_rank=script_args.local_rank,
remove_unused_columns=False,
label_names=[],
bf16=script_args.bf16,
logging_strategy="steps",
optim=script_args.optim,
lr_scheduler_type=script_args.lr_scheduler_type,
seed=script_args.seed,
)
# Load the value-head model and tokenizer.
tokenizer_name = script_args.tokenizer_name if script_args.tokenizer_name is not None else script_args.model_name
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_auth_token=True)
tokenizer.pad_token = tokenizer.eos_token
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
model = AutoModelForSequenceClassification.from_pretrained(
script_args.model_name, num_labels=1, torch_dtype=torch.bfloat16
)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
# Need to do this for gpt2, because it doesn't have an official pad token.
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = tokenizer.eos_token_id
model.config.use_cache = not script_args.gradient_checkpointing
num_proc = 24 # Can adjust to be higher if you have more processors.
original_columns = train_dataset.column_names
# Turn the dataset into pairs of post + summaries, where text_j is the preferred question + answer and text_k is the other.
# Then tokenize the dataset.
def preprocess_function(examples):
new_examples = {
"input_ids_j": [],
"attention_mask_j": [],
"input_ids_k": [],
"attention_mask_k": [],
}
for question, response_j, response_k in zip(examples["question"], examples["response_j"], examples["response_k"]):
tokenized_j = tokenizer("Question: " + question + "\n\nAnswer: " + response_j, truncation=True)
tokenized_k = tokenizer("Question: " + question + "\n\nAnswer: " + response_k, truncation=True)
new_examples["input_ids_j"].append(tokenized_j["input_ids"])
new_examples["attention_mask_j"].append(tokenized_j["attention_mask"])
new_examples["input_ids_k"].append(tokenized_k["input_ids"])
new_examples["attention_mask_k"].append(tokenized_k["attention_mask"])
return new_examples
# preprocess the dataset and filter out QAs that are longer than script_args.max_length
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=num_proc,
remove_columns=original_columns,
)
train_dataset = train_dataset.filter(
lambda x: len(x["input_ids_j"]) <= script_args.max_length and len(x["input_ids_k"]) <= script_args.max_length,
num_proc=num_proc,
)
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=num_proc,
remove_columns=original_columns,
)
eval_dataset = eval_dataset.filter(
lambda x: len(x["input_ids_j"]) <= script_args.max_length and len(x["input_ids_k"]) <= script_args.max_length,
num_proc=num_proc,
)
# We need to define a special data collator that batches the data in our j vs k format.
@dataclass
class RewardDataCollatorWithPadding:
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]:
features_j = []
features_k = []
for feature in features:
features_j.append(
{
"input_ids": feature["input_ids_j"],
"attention_mask": feature["attention_mask_j"],
}
)
features_k.append(
{
"input_ids": feature["input_ids_k"],
"attention_mask": feature["attention_mask_k"],
}
)
batch_j = self.tokenizer.pad(
features_j,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
batch_k = self.tokenizer.pad(
features_k,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
batch = {
"input_ids_j": batch_j["input_ids"],
"attention_mask_j": batch_j["attention_mask"],
"input_ids_k": batch_k["input_ids"],
"attention_mask_k": batch_k["attention_mask"],
"return_loss": True,
}
return batch
# Define the metric that we'll use for validation.
accuracy = evaluate.load("accuracy")
def compute_metrics(eval_pred):
predictions, _ = eval_pred
# Here, predictions is rewards_j and rewards_k.
# We want to see how much of the time rewards_j > rewards_k.
predictions = np.argmax(predictions, axis=0)
labels = np.zeros(predictions.shape)
return accuracy.compute(predictions=predictions, references=labels)
class RewardTrainer(Trainer):
# Define how to compute the reward loss. We use the InstructGPT pairwise logloss: https://huggingface.co/papers/2203.02155
def compute_loss(self, model, inputs, return_outputs=False):
rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0]
rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0]
loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean()
if return_outputs:
return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k}
return loss
# Train the model, woohoo.
trainer = RewardTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
data_collator=RewardDataCollatorWithPadding(tokenizer=tokenizer),
)
if script_args.eval_first_step:
class EvaluateFirstStepCallback(TrainerCallback):
def on_step_end(self, args, state, control, **kwargs):
if state.global_step == 1:
control.should_evaluate = True
trainer.add_callback(EvaluateFirstStepCallback())
trainer.train(script_args.resume_from_checkpoint)
print("Saving last checkpoint of the model")
model.save_pretrained(output_name + "_peft_last_checkpoint")
| trl/examples/research_projects/stack_llama/scripts/reward_modeling.py/0 | {
"file_path": "trl/examples/research_projects/stack_llama/scripts/reward_modeling.py",
"repo_id": "trl",
"token_count": 4684
} | 540 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "trl @ git+https://github.com/huggingface/trl.git",
# "peft",
# "Pillow>=9.4.0",
# ]
# ///
"""
Without dataset streaming:
```
accelerate launch examples/scripts/dpo_vlm.py \
--dataset_name HuggingFaceH4/rlaif-v_formatted \
--model_name_or_path Qwen/Qwen2.5-VL-3B-Instruct \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 32 \
--dataset_num_proc 32 \
--output_dir dpo_idefics_rlaif-v \
--torch_dtype bfloat16 \
--gradient_checkpointing \
--use_peft \
--lora_target_modules=all-linear \
--report_to wandb
```
With dataset streaming:
```
accelerate launch examples/scripts/dpo_vlm.py \
--dataset_name HuggingFaceH4/rlaif-v_formatted \
--dataset_streaming \
--model_name_or_path Qwen/Qwen2.5-VL-3B-Instruct \
--per_device_train_batch_size 2 \
--max_steps 100 \
--gradient_accumulation_steps 32 \
--dataset_num_proc 32 \
--output_dir dpo_idefics_rlaif-v \
--torch_dtype bfloat16 \
--gradient_checkpointing \
--use_peft \
--lora_target_modules=all-linear \
--report_to wandb
```
"""
import torch
from datasets import load_dataset
from transformers import AutoModelForImageTextToText, AutoProcessor
from trl import (
DPOConfig,
DPOTrainer,
ModelConfig,
ScriptArguments,
TrlParser,
get_kbit_device_map,
get_peft_config,
get_quantization_config,
)
if __name__ == "__main__":
parser = TrlParser((ScriptArguments, DPOConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_and_config()
################
# Model & Tokenizer
################
torch_dtype = (
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
)
quantization_config = get_quantization_config(model_args)
model_kwargs = dict(
revision=model_args.model_revision,
attn_implementation=model_args.attn_implementation,
torch_dtype=torch_dtype,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
model = AutoModelForImageTextToText.from_pretrained(
model_args.model_name_or_path,
trust_remote_code=model_args.trust_remote_code,
**model_kwargs,
)
peft_config = get_peft_config(model_args)
if peft_config is None:
ref_model = AutoModelForImageTextToText.from_pretrained(
model_args.model_name_or_path,
trust_remote_code=model_args.trust_remote_code,
**model_kwargs,
)
else:
ref_model = None
processor = AutoProcessor.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, do_image_splitting=False
)
tokenizer = processor.tokenizer
# Set up the chat template
if model.config.model_type == "idefics2":
pass # the processor already has a valid chat template
elif model.config.model_type == "paligemma":
processor.chat_template = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}<|im_start|>{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] if item['type'] == 'text' %}{{ item['text'] }}<|im_end|>{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}"""
elif model.config.model_type == "llava":
processor.chat_template = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image>{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}"""
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
if script_args.ignore_bias_buffers:
# torch distributed hack
model._ddp_params_and_buffers_to_ignore = [
name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool
]
################
# Dataset
################
dataset = load_dataset(
script_args.dataset_name,
name=script_args.dataset_config,
streaming=script_args.dataset_streaming,
)
################
# Training
################
trainer = DPOTrainer(
model,
ref_model,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
processing_class=processor,
peft_config=peft_config,
)
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
| trl/examples/scripts/dpo_vlm.py/0 | {
"file_path": "trl/examples/scripts/dpo_vlm.py",
"repo_id": "trl",
"token_count": 2346
} | 541 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from datetime import date
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument("--slack_channel_name", default="trl-push-examples-ci")
parser.add_argument("--text_file_name", required=True)
def main(text_file_name, slack_channel_name=None):
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
message = ""
if os.path.isfile(text_file_name):
final_results = {}
try:
with open(text_file_name) as file:
for line in file:
result, config_name = line.strip().split(",")
config_name = config_name.split("/")[-1].split(".yaml")[0]
final_results[config_name] = int(result)
except Exception as e:
logger.error(f"Error reading file {text_file_name}: {str(e)}")
final_results = {}
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": "🌞 There were no failures on the example tests!"
if not len(final_results) == 0
else "Something went wrong there is at least one empty file - please check GH action results.",
"emoji": True,
},
}
total_num_failed = sum(final_results.values())
else:
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": "❌ Something is wrong with the workflow please check ASAP!"
"Something went wrong there is no text file being produced. Please check ASAP.",
"emoji": True,
},
}
total_num_failed = 0
test_type_name = text_file_name.replace(".txt", "").replace("temp_results_", "").replace("_", " ").title()
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "🤗 Results of the {} TRL {} example tests.".format(
os.environ.get("TEST_TYPE", ""), test_type_name
),
},
},
]
if total_num_failed > 0:
message += f"{total_num_failed} failed tests for example tests!"
for test_name, failed in final_results.items():
failed_table = tabulate(
[[test_name, "✅" if not failed else "❌"]],
headers=["Test Name", "Status"],
showindex="always",
tablefmt="grid",
maxcolwidths=[12],
)
message += "\n```\n" + failed_table + "\n```"
print(f"### {message}")
else:
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
try:
from slack_sdk import WebClient
except ImportError:
logger.error("slack_sdk is not installed. Please install it to use Slack integration.")
return
if len(message) > MAX_LEN_MESSAGE:
print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}")
message = message[:MAX_LEN_MESSAGE] + "..."
if len(message) != 0:
md_report = {
"type": "section",
"text": {"type": "mrkdwn", "text": message},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {"type": "mrkdwn", "text": "*For more details:*"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/trl/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"On Push - main {os.environ.get('TEST_TYPE')} test results for {date.today()}",
},
],
}
payload.append(date_report)
print(payload)
try:
client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
response = client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
if response["ok"]:
logger.info("Message sent successfully to Slack.")
else:
logger.error(f"Failed to send message to Slack: {response['error']}")
except Exception as e:
logger.error(f"Error sending message to Slack: {str(e)}")
if __name__ == "__main__":
args = parser.parse_args()
main(args.text_file_name, args.slack_channel_name)
| trl/scripts/log_example_reports.py/0 | {
"file_path": "trl/scripts/log_example_reports.py",
"repo_id": "trl",
"token_count": 2644
} | 542 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
from io import StringIO
from unittest.mock import patch
import yaml
from .testing_utils import TrlTestCase
@unittest.skipIf(
sys.version_info < (3, 10),
"Transformers' generation codebase uses a Python >3.10 syntax (`str | None`), which seems to cause the CLI tests "
"to fail on Python <3.10.", # let's say it's a known issue, but not expected to be fixed, because too niche
)
class TestCLI(TrlTestCase):
def test_dpo(self):
from trl.cli import main
command = f"trl dpo --output_dir {self.tmp_dir} --model_name_or_path trl-internal-testing/tiny-Qwen2ForCausalLM-2.5 --dataset_name trl-internal-testing/zen --dataset_config standard_preference --report_to none"
with patch("sys.argv", command.split(" ")):
main()
def test_dpo_multiple_loss_types(self):
from trl.cli import main
command = f"trl dpo --output_dir {self.tmp_dir} --model_name_or_path trl-internal-testing/tiny-Qwen2ForCausalLM-2.5 --dataset_name trl-internal-testing/zen --dataset_config standard_preference --report_to none --loss_type sigmoid bco_pair --loss_weights 1.0 0.5"
with patch("sys.argv", command.split(" ")):
main()
@patch("sys.stdout", new_callable=StringIO)
def test_env(self, mock_stdout):
from trl.cli import main
command = "trl env"
with patch("sys.argv", command.split(" ")):
main()
self.assertIn("TRL version: ", mock_stdout.getvalue().strip())
def test_grpo(self):
from trl.cli import main
command = f"trl grpo --output_dir {self.tmp_dir} --model_name_or_path trl-internal-testing/tiny-Qwen2ForCausalLM-2.5 --reward_model_name_or_path trl-internal-testing/tiny-Qwen2ForSequenceClassification-2.5 --dataset_name trl-internal-testing/zen --dataset_config standard_prompt_only --num_generations 4 --max_completion_length 32 --report_to none"
with patch("sys.argv", command.split(" ")):
main()
def test_kto(self):
from trl.cli import main
command = f"trl kto --output_dir {self.tmp_dir} --model_name_or_path trl-internal-testing/tiny-Qwen2ForCausalLM-2.5 --dataset_name trl-internal-testing/zen --dataset_config standard_unpaired_preference --report_to none"
with patch("sys.argv", command.split(" ")):
main()
def test_sft(self):
from trl.cli import main
command = f"trl sft --output_dir {self.tmp_dir} --model_name_or_path trl-internal-testing/tiny-Qwen2ForCausalLM-2.5 --dataset_name trl-internal-testing/zen --dataset_config standard_language_modeling --report_to none"
with patch("sys.argv", command.split(" ")):
main()
def test_sft_config_file(self):
from trl.cli import main
output_dir = os.path.join(self.tmp_dir, "output")
# Create a temporary config file
config_path = os.path.join(self.tmp_dir, "config.yaml")
config_content = {
"model_name_or_path": "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
"dataset_name": "trl-internal-testing/zen",
"dataset_config": "standard_language_modeling",
"report_to": "none",
"output_dir": output_dir,
"lr_scheduler_type": "cosine_with_restarts",
}
with open(config_path, "w") as config_file:
yaml.dump(config_content, config_file)
# Test the CLI with config file
command = f"trl sft --config {config_path}"
with patch("sys.argv", command.split(" ")):
main()
# Verify that output directory was created
self.assertTrue(os.path.exists(output_dir))
if __name__ == "__main__":
unittest.main()
| trl/tests/test_cli.py/0 | {
"file_path": "trl/tests/test_cli.py",
"repo_id": "trl",
"token_count": 1737
} | 543 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import random
import shutil
import tempfile
import unittest
import warnings
import torch
from transformers import is_bitsandbytes_available, is_comet_available, is_sklearn_available, is_wandb_available
from transformers.testing_utils import torch_device
from transformers.utils import is_rich_available
from trl import BaseBinaryJudge, BasePairwiseJudge
from trl.import_utils import (
is_diffusers_available,
is_joblib_available,
is_llm_blender_available,
is_mergekit_available,
is_vllm_available,
)
# transformers.testing_utils contains a require_bitsandbytes function, but relies on pytest markers which we don't use
# in our test suite. We therefore need to implement our own version of this function.
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires bitsandbytes. Skips the test if bitsandbytes is not available.
"""
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case)
def require_comet(test_case):
"""
Decorator marking a test that requires Comet. Skips the test if Comet is not available.
"""
return unittest.skipUnless(is_comet_available(), "test requires comet_ml")(test_case)
def require_diffusers(test_case):
"""
Decorator marking a test that requires diffusers. Skips the test if diffusers is not available.
"""
return unittest.skipUnless(is_diffusers_available(), "test requires diffusers")(test_case)
def require_llm_blender(test_case):
"""
Decorator marking a test that requires llm-blender. Skips the test if llm-blender is not available.
"""
return unittest.skipUnless(is_llm_blender_available(), "test requires llm-blender")(test_case)
def require_mergekit(test_case):
"""
Decorator marking a test that requires mergekit. Skips the test if mergekit is not available.
"""
return unittest.skipUnless(is_mergekit_available(), "test requires mergekit")(test_case)
def require_rich(test_case):
"""
Decorator marking a test that requires rich. Skips the test if rich is not available.
"""
return unittest.skipUnless(is_rich_available(), "test requires rich")(test_case)
def require_sklearn(test_case):
"""
Decorator marking a test that requires sklearn. Skips the test if sklearn is not available.
"""
return unittest.skipUnless(is_sklearn_available() and is_joblib_available(), "test requires sklearn")(test_case)
def require_vllm(test_case):
"""
Decorator marking a test that requires vllm. Skips the test if vllm is not available.
"""
return unittest.skipUnless(is_vllm_available(), "test requires vllm")(test_case)
def require_no_wandb(test_case):
"""
Decorator marking a test that requires no wandb. Skips the test if wandb is available.
"""
return unittest.skipUnless(not is_wandb_available(), "test requires no wandb")(test_case)
def require_3_accelerators(test_case):
"""
Decorator marking a test that requires at least 3 accelerators. Skips the test if 3 accelerators are not available.
"""
torch_accelerator_module = getattr(torch, torch_device, torch.cuda)
return unittest.skipUnless(
torch_accelerator_module.device_count() >= 3, f"test requires at least 3 {torch_device}s"
)(test_case)
class RandomBinaryJudge(BaseBinaryJudge):
"""
Random binary judge, for testing purposes.
"""
def judge(self, prompts, completions, gold_completions=None, shuffle_order=True):
return [random.choice([0, 1, -1]) for _ in range(len(prompts))]
class RandomPairwiseJudge(BasePairwiseJudge):
"""
Random pairwise judge, for testing purposes.
"""
def judge(self, prompts, completions, shuffle_order=True, return_scores=False):
if not return_scores:
return [random.randint(0, len(completion) - 1) for completion in completions]
else:
return [random.random() for _ in range(len(prompts))]
class TrlTestCase(unittest.TestCase):
"""
Base test case for TRL tests. Sets up a temporary directory for testing.
"""
def setUp(self):
super().setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
super().tearDown()
def ignore_warnings(message: str = None, category: type[Warning] = Warning) -> callable:
"""
Decorator to ignore warnings with a specific message and/or category.
Args:
message (`str`, *optional*, defaults to `None`):
Regex pattern for the warning message to ignore. If `None`, all messages are ignored.
category (`type[Warning]`, *optional*, defaults to `Warning`):
Warning class to ignore. Defaults to `Warning`, which ignores all warnings.
"""
def decorator(test_func):
@functools.wraps(test_func)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=message, category=category)
return test_func(*args, **kwargs)
return wrapper
return decorator
| trl/tests/testing_utils.py/0 | {
"file_path": "trl/tests/testing_utils.py",
"repo_id": "trl",
"token_count": 1959
} | 544 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import base64
import logging
import socket
import time
from io import BytesIO
from typing import Optional, Union
from urllib.parse import urlparse
import torch
from torch import nn
from ..import_utils import is_requests_available, is_vllm_ascend_available, is_vllm_available
if is_requests_available():
import requests
from requests import ConnectionError
if is_vllm_available():
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
from vllm.distributed.utils import StatelessProcessGroup
if is_vllm_ascend_available():
from vllm_ascend.distributed.device_communicators.pyhccl import PyHcclCommunicator as PyNcclCommunicator
logger = logging.getLogger(__name__)
class VLLMClient:
"""
A client class to interact with a vLLM server.
This class provides methods to generate completions, initialize and manage weight update groups, and update model
weights in a distributed setting. Before using it, start the vLLM server with `trl vllm-serve`.
Args:
base_url (`str` or `None`, *optional*, defaults to `None`):
Base URL for the vLLM server (e.g., `"http://localhost:8000"`). If provided, `host` and `server_port` are
ignored.
host (`str`, *optional*, defaults to `"0.0.0.0"`):
IP address of the vLLM server. Ignored if `base_url` is provided.
server_port (`int`, *optional*, defaults to `8000`):
Port number of the vLLM server. Ignored if `base_url` is provided.
group_port (`int`, *optional*, defaults to `51216`):
Port number for the weight update group.
connection_timeout (`float`, *optional*, defaults to `0.0`):
Total timeout duration in seconds to wait for the server to be up. If the server is not up after the
timeout, a `ConnectionError` is raised.
Examples:
Run the vLLM server with the model `Qwen/Qwen2.5-7B`:
```
$ trl vllm-serve --model Qwen/Qwen2.5-7B
...
INFO: Application startup complete.
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
```
Use the client to generate completions and update model weights:
```python
>>> from trl.extras.vllm_client import VLLMClient
>>> client = VLLMClient()
>>> client.generate(["Hello, AI!", "Tell me a joke"])
[[2980, 498, 1492, 752, 448, 264, 13027, 8645, 30, 358, 2776, 4460, 311, 3270, 264, 2025],
[911, 7988, 1251, 382, 3838, 653, 498, 1618, 4325, 879, 2581, 20027, 264, 21428, 30, 362]]
>>> from transformers import AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-7B", device_map="cuda")
>>> client.init_communicator(device="cuda")
>>> client.update_model_params(model)
```
There are several ways to initialize the client:
```python
VLLMClient(base_url="http://localhost:8000")
VLLMClient(base_url="http://192.168.1.100:8000")
VLLMClient(host="localhost", server_port=8000)
VLLMClient(host="192.168.1.100", server_port=8000)
```
"""
def __init__(
self,
base_url: Optional[str] = None,
host: str = "0.0.0.0",
server_port: int = 8000,
group_port: int = 51216,
connection_timeout: float = 0.0,
):
if not is_requests_available():
raise ImportError("requests is not installed. Please install it with `pip install requests`.")
if not is_vllm_available():
raise ImportError("vLLM is not installed. Please install it with `pip install vllm`.")
self.session = requests.Session()
if base_url is not None:
# Parse the base_url to extract host and port
parsed_url = urlparse(base_url)
self.host = socket.gethostbyname(parsed_url.hostname)
scheme = parsed_url.scheme or "http"
self.base_url = f"{scheme}://{parsed_url.netloc}{parsed_url.path}"
else:
self.host = host
self.server_port = server_port
self.base_url = f"http://{self.host}:{self.server_port}"
self.group_port = group_port
self.check_server(connection_timeout) # check server and fail after timeout
def check_server(self, total_timeout: float = 0.0, retry_interval: float = 2.0):
"""
Check server availability with retries on failure, within a total timeout duration. If the server is not up
after the total timeout duration, raise a `ConnectionError`.
Args:
retry_interval (`float`, *optional*, defaults to `2.0`):
Interval in seconds between retries.
total_timeout (`float`, *optional*, defaults to `0.0`):
Total timeout duration in seconds.
"""
url = f"{self.base_url}/health/"
start_time = time.time() # Record the start time
while True:
try:
response = requests.get(url)
except requests.exceptions.RequestException as exc:
# Check if the total timeout duration has passed
elapsed_time = time.time() - start_time
if elapsed_time >= total_timeout:
raise ConnectionError(
f"The vLLM server can't be reached at {self.base_url} after {total_timeout} seconds. Make "
"sure the server is running by running `trl vllm-serve`."
) from exc
else:
if response.status_code == 200:
if "X-Forwarded-For" in response.headers:
self.host = response.headers["X-Forwarded-For"]
logger.info("Server is up!")
return None
# Retry logic: wait before trying again
logger.info(f"Server is not up yet. Retrying in {retry_interval} seconds...")
time.sleep(retry_interval)
def generate(
self,
prompts: list[str],
images: Optional[list] = None,
n: int = 1,
repetition_penalty: float = 1.0,
temperature: float = 1.0,
top_p: float = 1.0,
top_k: int = -1,
min_p: float = 0.0,
max_tokens: int = 16,
guided_decoding_regex: Optional[str] = None,
generation_kwargs: Optional[dict] = None,
) -> list[list[int]]:
"""
Generates model completions for the provided prompts.
Args:
prompts (`list[str]`):
List of text prompts for which the model will generate completions.
images (`list[PIL.Image]` or `None`, *optional*, defaults to `None`):
List of PIL Images to send along with the prompts.
n (`int`, *optional*, defaults to `1`):
Number of completions to generate for each prompt.
repetition_penalty (`float`, *optional*, defaults to `1.0`):
Parameter for repetition penalty. 1.0 means no penalty.
temperature (`float`, *optional*, defaults to `1.0`):
Temperature parameter for sampling. Higher values increase diversity.
top_p (`float`, *optional*, defaults to `1.0`):
Top-p sampling parameter.`1.0` means no truncation.
top_k (`int`, *optional*, defaults to `-1`):
Top-k sampling parameter. `-1` means no truncation.
min_p (`float`, *optional*, defaults to `0.0`):
Minimum probability for sampling.
max_tokens (`int`, *optional*, defaults to `16`):
Maximum number of tokens to generate for each prompt.
guided_decoding_regex (`str` or `None`, *optional*, defaults to `None`):
Regular expression to guide the decoding process.
generation_kwargs (`dict` or `None`, *optional*, defaults to `None`):
Additional generation parameters to pass to the vLLM `SamplingParams`. This can include parameters like
`seed`, `frequency_penalty`, etc. If it contains keys that conflict with the other parameters, they
will override them.
Returns:
`list[list[int]]`:
List of lists of token IDs representing the model-generated completions for each prompt.
"""
url = f"{self.base_url}/generate/"
def pil_to_base64(image):
buffer = BytesIO()
image.save(buffer, format="PNG")
img_bytes = buffer.getvalue()
return base64.b64encode(img_bytes).decode("utf-8")
# Convert PIL images to base64 strings
images = [pil_to_base64(img) for img in images] if images else None
response = self.session.post(
url,
json={
"prompts": prompts,
"images": images,
"n": n,
"repetition_penalty": repetition_penalty,
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"min_p": min_p,
"max_tokens": max_tokens,
"guided_decoding_regex": guided_decoding_regex,
"generation_kwargs": generation_kwargs or {},
},
)
if response.status_code == 200:
return response.json()["completion_ids"]
else:
raise Exception(f"Request failed: {response.status_code}, {response.text}")
def init_communicator(self, device: Union[torch.device, str, int] = 0):
"""
Initializes the weight update group in a distributed setup for model synchronization.
Args:
device (`torch.device`, `str`, or `int`, *optional*, defaults to `0`):
Device of trainer main process. It's the device that will be used for the weights synchronization. Can
be a `torch.device` object, a string like `'cuda:0'`, or an integer device index.
"""
# Get the world size from the server
url = f"{self.base_url}/get_world_size/"
response = requests.get(url)
if response.status_code == 200:
vllm_world_size = response.json()["world_size"]
else:
raise Exception(f"Request failed: {response.status_code}, {response.text}")
world_size = vllm_world_size + 1 # add the client to the world
self.rank = vllm_world_size # the client's rank is the last process
# Initialize weight update group
url = f"{self.base_url}/init_communicator/"
client_device_uuid = str(torch.cuda.get_device_properties(device).uuid)
# In the server side, the host is set to 0.0.0.0
response = self.session.post(
url,
json={
"host": "0.0.0.0",
"port": self.group_port,
"world_size": world_size,
"client_device_uuid": client_device_uuid,
},
)
if response.status_code != 200:
raise Exception(f"Request failed: {response.status_code}, {response.text}")
# Brief delay to allow server initialization. While not strictly required (client socket will retry on
# connection failure), this prevents log warnings like:
# [W416 23:24:57.460001114 socket.cpp:204] [c10d] The hostname of the client socket cannot be retrieved. err=-3
time.sleep(0.1)
# Set up the communication group for weight broadcasting
pg = StatelessProcessGroup.create(host=self.host, port=self.group_port, rank=self.rank, world_size=world_size)
self.pynccl_comm = PyNcclCommunicator(pg, device=device)
# When the client object is deleted, close the weight update group
atexit.register(self.close_communicator)
def update_named_param(self, name: str, weights: torch.Tensor):
"""
Updates a specific named parameter in the model and broadcasts it to other processes.
Args:
name (`str`):
Name of the layer whose weights are being updated.
weights (`torch.Tensor`):
Tensor containing the updated weights.
"""
dtype, shape = str(weights.dtype), tuple(weights.shape)
url = f"{self.base_url}/update_named_param/"
response = self.session.post(url, json={"name": name, "dtype": dtype, "shape": shape})
if response.status_code != 200:
raise Exception(f"Request failed: {response.status_code}, {response.text}")
# Broadcast the weights to the other processes
self.pynccl_comm.broadcast(weights, src=self.rank)
self.pynccl_comm.group.barrier()
def update_model_params(self, model: nn.Module):
"""
Updates all parameters of the given model by calling `update_named_param` for each parameter in the model.
Args:
model (`nn.Module`):
Model whose parameters (weights/biases) are to be updated.
"""
for name, param in model.named_parameters():
# Update each parameter individually
self.update_named_param(name, param.data)
def reset_prefix_cache(self):
"""
Resets the prefix cache for the model.
"""
url = f"{self.base_url}/reset_prefix_cache/"
response = self.session.post(url)
if response.status_code != 200:
raise Exception(f"Request failed: {response.status_code}, {response.text}")
def close_communicator(self):
"""
Closes the weight update group and cleans up the communication group.
"""
url = f"{self.base_url}/close_communicator/"
try:
response = self.session.post(url)
except ConnectionError:
# The server might be already down, so we don't need to close the communicator
pass
else:
if response.status_code != 200:
raise Exception(f"Request failed: {response.status_code}, {response.text}")
# Example usage
if __name__ == "__main__":
from vllm import SamplingParams
client = VLLMClient()
client.init_communicator(device="cuda")
# Generate completions
responses = client.generate(["Hello, AI!", "Tell me a joke"], n=4, max_tokens=32, sampling_params=SamplingParams())
print("Responses:", responses) # noqa
# Update model weights
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-7B").to("cuda")
client.update_model_params(model)
| trl/trl/extras/vllm_client.py/0 | {
"file_path": "trl/trl/extras/vllm_client.py",
"repo_id": "trl",
"token_count": 6498
} | 545 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "trl @ git+https://github.com/huggingface/trl.git",
# "peft",
# ]
# ///
"""
# Full training
```bash
python trl/scripts/dpo.py \
--dataset_name trl-lib/ultrafeedback_binarized \
--dataset_streaming \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--learning_rate 5.0e-7 \
--num_train_epochs 1 \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 8 \
--gradient_checkpointing \
--eval_strategy steps \
--eval_steps 50 \
--output_dir Qwen2-0.5B-DPO \
--no_remove_unused_columns
--report_to wandb
```
# LoRA:
```bash
python trl/scripts/dpo.py \
--dataset_name trl-lib/ultrafeedback_binarized \
--dataset_streaming \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--learning_rate 5.0e-6 \
--num_train_epochs 1 \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 8 \
--gradient_checkpointing \
--eval_strategy steps \
--eval_steps 50 \
--output_dir Qwen2-0.5B-DPO \
--no_remove_unused_columns \
--use_peft \
--lora_r 32 \
--lora_alpha 16
--report_to wandb
```
"""
import argparse
import torch
from accelerate import logging
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from trl import (
DatasetMixtureConfig,
DPOConfig,
DPOTrainer,
ModelConfig,
ScriptArguments,
TrlParser,
get_dataset,
get_kbit_device_map,
get_peft_config,
get_quantization_config,
)
from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE
logger = logging.get_logger(__name__)
def main(script_args, training_args, model_args, dataset_args):
################
# Model & Tokenizer
###################
torch_dtype = (
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
)
quantization_config = get_quantization_config(model_args)
model_kwargs = dict(
revision=model_args.model_revision,
attn_implementation=model_args.attn_implementation,
torch_dtype=torch_dtype,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs
)
peft_config = get_peft_config(model_args)
if peft_config is None:
ref_model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs
)
else:
ref_model = None
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
if tokenizer.chat_template is None:
tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE
if script_args.ignore_bias_buffers:
# torch distributed hack
model._ddp_params_and_buffers_to_ignore = [
name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool
]
# Load the dataset
if dataset_args.datasets and script_args.dataset_name:
logger.warning(
"Both `datasets` and `dataset_name` are provided. The `datasets` argument will be used to load the "
"dataset and `dataset_name` will be ignored."
)
elif dataset_args.datasets and not script_args.dataset_name:
dataset = get_dataset(dataset_args)
elif not dataset_args.datasets and script_args.dataset_name:
dataset = load_dataset(
script_args.dataset_name, name=script_args.dataset_config, streaming=script_args.dataset_streaming
)
else:
raise ValueError("Either `datasets` or `dataset_name` must be provided.")
# Initialize the DPO trainer
trainer = DPOTrainer(
model,
ref_model,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
processing_class=tokenizer,
peft_config=peft_config,
)
# Train the model
trainer.train()
if training_args.eval_strategy != "no":
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Save and push to Hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
def make_parser(subparsers: argparse._SubParsersAction = None):
dataclass_types = (ScriptArguments, DPOConfig, ModelConfig, DatasetMixtureConfig)
if subparsers is not None:
parser = subparsers.add_parser("dpo", help="Run the DPO training script", dataclass_types=dataclass_types)
else:
parser = TrlParser(dataclass_types)
return parser
if __name__ == "__main__":
parser = make_parser()
# When using the trl cli, this script may be run with additional arguments, corresponding accelerate arguments.
# To ensure that their parsing does not interfere with the script arguments, parse the arguments with
# `return_remaining_strings=True`, then ignore the remaining strings.
script_args, training_args, model_args, dataset_args, _ = parser.parse_args_and_config(
return_remaining_strings=True
)
main(script_args, training_args, model_args, dataset_args)
| trl/trl/scripts/dpo.py/0 | {
"file_path": "trl/trl/scripts/dpo.py",
"repo_id": "trl",
"token_count": 2485
} | 546 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from transformers import is_bitsandbytes_available
from ..core import flatten_dict
@dataclass
class DDPOConfig:
r"""
Configuration class for the [`DDPOTrainer`].
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
exp_name (`str`, *optional*, defaults to `os.path.basename(sys.argv[0])[: -len(".py")]`):
Name of this experiment (by default is the file name without the extension name).
run_name (`str`, *optional*, defaults to `""`):
Name of this run.
seed (`int`, *optional*, defaults to `0`):
Random seed.
log_with (`Literal["wandb", "tensorboard"]]` or `None`, *optional*, defaults to `None`):
Log with either 'wandb' or 'tensorboard', check
https://huggingface.co/docs/accelerate/usage_guides/tracking for more details.
tracker_kwargs (`Dict`, *optional*, defaults to `{}`):
Keyword arguments for the tracker (e.g. wandb_project).
accelerator_kwargs (`Dict`, *optional*, defaults to `{}`):
Keyword arguments for the accelerator.
project_kwargs (`Dict`, *optional*, defaults to `{}`):
Keyword arguments for the accelerator project config (e.g. `logging_dir`).
tracker_project_name (`str`, *optional*, defaults to `"trl"`):
Name of project to use for tracking.
logdir (`str`, *optional*, defaults to `"logs"`):
Top-level logging directory for checkpoint saving.
num_epochs (`int`, *optional*, defaults to `100`):
Number of epochs to train.
save_freq (`int`, *optional*, defaults to `1`):
Number of epochs between saving model checkpoints.
num_checkpoint_limit (`int`, *optional*, defaults to `5`):
Number of checkpoints to keep before overwriting old ones.
mixed_precision (`str`, *optional*, defaults to `"fp16"`):
Mixed precision training.
allow_tf32 (`bool`, *optional*, defaults to `True`):
Allow `tf32` on Ampere GPUs.
resume_from (`str`, *optional*, defaults to `""`):
Resume training from a checkpoint.
sample_num_steps (`int`, *optional*, defaults to `50`):
Number of sampler inference steps.
sample_eta (`float`, *optional*, defaults to `1.0`):
Eta parameter for the DDIM sampler.
sample_guidance_scale (`float`, *optional*, defaults to `5.0`):
Classifier-free guidance weight.
sample_batch_size (`int`, *optional*, defaults to `1`):
Batch size (per GPU) to use for sampling.
sample_num_batches_per_epoch (`int`, *optional*, defaults to `2`):
Number of batches to sample per epoch.
train_batch_size (`int`, *optional*, defaults to `1`):
Batch size (per GPU) to use for training.
train_use_8bit_adam (`bool`, *optional*, defaults to `False`):
Use 8bit Adam optimizer from bitsandbytes.
train_learning_rate (`float`, *optional*, defaults to `3e-4`):
Learning rate.
train_adam_beta1 (`float`, *optional*, defaults to `0.9`):
Adam beta1.
train_adam_beta2 (`float`, *optional*, defaults to `0.999`):
Adam beta2.
train_adam_weight_decay (`float`, *optional*, defaults to `1e-4`):
Adam weight decay.
train_adam_epsilon (`float`, *optional*, defaults to `1e-8`):
Adam epsilon.
train_gradient_accumulation_steps (`int`, *optional*, defaults to `1`):
Number of gradient accumulation steps.
train_max_grad_norm (`float`, *optional*, defaults to `1.0`):
Maximum gradient norm for gradient clipping.
train_num_inner_epochs (`int`, *optional*, defaults to `1`):
Number of inner epochs per outer epoch.
train_cfg (`bool`, *optional*, defaults to `True`):
Whether to use classifier-free guidance during training.
train_adv_clip_max (`float`, *optional*, defaults to `5.0`):
Clip advantages to the range.
train_clip_range (`float`, *optional*, defaults to `1e-4`):
PPO clip range.
train_timestep_fraction (`float`, *optional*, defaults to `1.0`):
Fraction of timesteps to train on.
per_prompt_stat_tracking (`bool`, *optional*, defaults to `False`):
Whether to track statistics for each prompt separately.
per_prompt_stat_tracking_buffer_size (`int`, *optional*, defaults to `16`):
Number of reward values to store in the buffer for each prompt.
per_prompt_stat_tracking_min_count (`int`, *optional*, defaults to `16`):
Minimum number of reward values to store in the buffer.
async_reward_computation (`bool`, *optional*, defaults to `False`):
Whether to compute rewards asynchronously.
max_workers (`int`, *optional*, defaults to `2`):
Maximum number of workers to use for async reward computation.
negative_prompts (`str`, *optional*, defaults to `""`):
Comma-separated list of prompts to use as negative examples.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the final model checkpoint to the Hub.
"""
exp_name: str = field(
default=os.path.basename(sys.argv[0])[: -len(".py")],
metadata={"help": "Name of this experiment (by default is the file name without the extension name)."},
)
run_name: str = field(
default="",
metadata={"help": "Name of this run."},
)
seed: int = field(
default=0,
metadata={"help": "Random seed."},
)
log_with: Optional[str] = field(
default=None,
metadata={
"help": "Log with either 'wandb' or 'tensorboard'.",
"choices": ["wandb", "tensorboard"],
},
)
tracker_kwargs: dict = field(
default_factory=dict,
metadata={"help": "Keyword arguments for the tracker (e.g. wandb_project)."},
)
accelerator_kwargs: dict = field(
default_factory=dict,
metadata={"help": "Keyword arguments for the accelerator."},
)
project_kwargs: dict = field(
default_factory=dict,
metadata={"help": "Keyword arguments for the accelerator project config (e.g. `logging_dir`)."},
)
tracker_project_name: str = field(
default="trl",
metadata={"help": "Name of project to use for tracking."},
)
logdir: str = field(
default="logs",
metadata={"help": "Top-level logging directory for checkpoint saving."},
)
num_epochs: int = field(
default=100,
metadata={"help": "Number of epochs to train."},
)
save_freq: int = field(
default=1,
metadata={"help": "Number of epochs between saving model checkpoints."},
)
num_checkpoint_limit: int = field(
default=5,
metadata={"help": "Number of checkpoints to keep before overwriting old ones."},
)
mixed_precision: str = field(
default="fp16",
metadata={"help": "Mixed precision training."},
)
allow_tf32: bool = field(
default=True,
metadata={"help": "Allow `tf32` on Ampere GPUs."},
)
resume_from: str = field(
default="",
metadata={"help": "Resume training from a checkpoint."},
)
sample_num_steps: int = field(
default=50,
metadata={"help": "Number of sampler inference steps."},
)
sample_eta: float = field(
default=1.0,
metadata={"help": "Eta parameter for the DDIM sampler."},
)
sample_guidance_scale: float = field(
default=5.0,
metadata={"help": "Classifier-free guidance weight."},
)
sample_batch_size: int = field(
default=1,
metadata={"help": "Batch size (per GPU) to use for sampling."},
)
sample_num_batches_per_epoch: int = field(
default=2,
metadata={"help": "Number of batches to sample per epoch."},
)
train_batch_size: int = field(
default=1,
metadata={"help": "Batch size (per GPU) to use for training."},
)
train_use_8bit_adam: bool = field(
default=False,
metadata={"help": "Use 8bit Adam optimizer from bitsandbytes."},
)
train_learning_rate: float = field(
default=3e-4,
metadata={"help": "Learning rate."},
)
train_adam_beta1: float = field(
default=0.9,
metadata={"help": "Adam beta1."},
)
train_adam_beta2: float = field(
default=0.999,
metadata={"help": "Adam beta2."},
)
train_adam_weight_decay: float = field(
default=1e-4,
metadata={"help": "Adam weight decay."},
)
train_adam_epsilon: float = field(
default=1e-8,
metadata={"help": "Adam epsilon."},
)
train_gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of gradient accumulation steps."},
)
train_max_grad_norm: float = field(
default=1.0,
metadata={"help": "Maximum gradient norm for gradient clipping."},
)
train_num_inner_epochs: int = field(
default=1,
metadata={"help": "Number of inner epochs per outer epoch."},
)
train_cfg: bool = field(
default=True,
metadata={"help": "Whether to use classifier-free guidance during training."},
)
train_adv_clip_max: float = field(
default=5.0,
metadata={"help": "Clip advantages to the range."},
)
train_clip_range: float = field(
default=1e-4,
metadata={"help": "PPO clip range."},
)
train_timestep_fraction: float = field(
default=1.0,
metadata={"help": "Fraction of timesteps to train on."},
)
per_prompt_stat_tracking: bool = field(
default=False,
metadata={"help": "Whether to track statistics for each prompt separately."},
)
per_prompt_stat_tracking_buffer_size: int = field(
default=16,
metadata={"help": "Number of reward values to store in the buffer for each prompt."},
)
per_prompt_stat_tracking_min_count: int = field(
default=16,
metadata={"help": "Minimum number of reward values to store in the buffer."},
)
async_reward_computation: bool = field(
default=False,
metadata={"help": "Whether to compute rewards asynchronously."},
)
max_workers: int = field(
default=2,
metadata={"help": "Maximum number of workers to use for async reward computation."},
)
negative_prompts: str = field(
default="",
metadata={"help": "Comma-separated list of prompts to use as negative examples."},
)
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the final model checkpoint to the Hub."},
)
def to_dict(self):
output_dict = {}
for key, value in self.__dict__.items():
output_dict[key] = value
return flatten_dict(output_dict)
def __post_init__(self):
if self.train_use_8bit_adam and not is_bitsandbytes_available():
raise ImportError(
"You need to install bitsandbytes to use 8bit Adam. "
"You can install it with `pip install bitsandbytes`."
)
| trl/trl/trainer/ddpo_config.py/0 | {
"file_path": "trl/trl/trainer/ddpo_config.py",
"repo_id": "trl",
"token_count": 4999
} | 547 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from transformers import TrainingArguments
@dataclass
class OnlineDPOConfig(TrainingArguments):
r"""
Configuration class for the [`OnlineDPOTrainer`].
This class includes only the parameters that are specific to Online DPO training. For a full list of training
arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this
class may differ from those in [`~transformers.TrainingArguments`].
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
reward_model_path (`str` or `None`, *optional*, defaults to `None`):
Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both.
judge (`str` or `None`, *optional*, defaults to `None`):
Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both.
max_new_tokens (`int`, *optional*, defaults to `64`):
Maximum number of tokens to generate per completion.
max_length (`int`, *optional*, defaults to `256`):
Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If the
sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the completion as
possible.
temperature (`float`, *optional*, defaults to `0.9`):
Temperature for sampling. The higher the temperature, the more random the completions.
missing_eos_penalty (`float` or `None`, *optional*, defaults to `None`):
Penalty applied to the score when the model fails to generate an EOS token. This is useful to encourage to
generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be a positive
value.
beta (`float` or `list[float]`, *optional*, defaults to `0.1`):
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in
the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β is
selected for each new epoch and the last β is used for the rest of the epochs.
loss_type (`str`, *optional*, defaults to `"sigmoid"`):
Type of loss to use. Possible values are:
- `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper.
- `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of processes to use for processing the dataset.
disable_dropout (`bool`, *optional*, defaults to `True`):
Whether to disable dropout in the model and reference model.
use_vllm (`bool`, *optional*, defaults to `False`):
Whether to use vLLM for generating completions. Requires vLLM to be installed (`pip install vllm`).
vllm_model_impl (`str`, *optional*, defaults to `"vllm"`):
Model implementation to use for vLLM. Must be one of `"transformers"` or `"vllm"`. `"transformers"`: Use
the `transformers` backend for model implementation. `"vllm"`: Use the `vllm` library for model
implementation.
gpu_memory_utilization (`float`, *optional*, defaults to `0.55`):
The vLLM memory utilization. The default value is 0.55.
ds3_gather_for_generation (`bool`, *optional*, defaults to `True`):
This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation,
improving generation speed. However, disabling this option allows training models that exceed the VRAM
capacity of a single GPU, albeit at the cost of slower generation.
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
string.
"""
# Parameters whose default values are overridden from TrainingArguments
learning_rate: float = field(
default=5e-7,
metadata={"help": "The initial learning rate for AdamW."},
)
logging_steps: float = field(
default=10,
metadata={
"help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, "
"will be interpreted as ratio of total training steps."
},
)
gradient_checkpointing: bool = field(
default=True,
metadata={
"help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass."
},
)
bf16: Optional[bool] = field(
default=None,
metadata={
"help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA "
"architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if "
"`fp16` is not set."
},
)
reward_model_path: Optional[str] = field(
default=None,
metadata={
"help": "Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both."
},
)
judge: Optional[str] = field(
default=None,
metadata={
"help": "Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both."
},
)
max_new_tokens: int = field(
default=64,
metadata={"help": "Maximum number of tokens to generate per completion."},
)
max_length: int = field(
default=512,
metadata={
"help": "Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If "
"the sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the "
"completion as possible."
},
)
temperature: float = field(
default=0.9,
metadata={"help": "Temperature for sampling. The higher the temperature, the more random the completions."},
)
missing_eos_penalty: Optional[float] = field(
default=None,
metadata={
"help": "Penalty applied to the score when the model fails to generate an EOS token. This is useful to "
"encourage to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be "
"a positive value."
},
)
beta: list[float] = field(
default_factory=lambda: [0.1],
metadata={
"help": "Parameter controlling the deviation from the reference model. Higher β means less deviation from "
"the reference model. For the IPO loss (`loss_type='ipo'`), β is the regularization parameter denoted by "
"τ in the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β "
"is selected for each new epoch and the last β is used for the rest of the epochs."
},
)
loss_type: str = field(
default="sigmoid",
metadata={
"help": "Type of loss to use.",
"choices": ["sigmoid", "ipo"],
},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of processes to use for processing the dataset."},
)
disable_dropout: bool = field(
default=True,
metadata={"help": "Whether to disable dropout in the model."},
)
use_vllm: bool = field(
default=False,
metadata={
"help": "Whether to use vLLM for generating completions. Requires vLLM to be installed "
"(`pip install vllm`)."
},
)
vllm_model_impl: str = field(
default="vllm",
metadata={
"help": "Model implementation to use for vLLM. Must be one of `transformers` or `vllm`. `transformers`: "
"Use the `transformers` backend for model implementation. `vllm`: Use the `vllm` library for "
"model implementation."
},
)
gpu_memory_utilization: Optional[float] = field(
default=0.55,
metadata={
"help": "The vLLM memory utilization. The default value is 0.55.",
},
)
ds3_gather_for_generation: bool = field(
default=True,
metadata={
"help": "This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for "
"generation, improving generation speed. However, disabling this option allows training models that "
"exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation."
},
)
model_init_kwargs: Optional[dict[str, Any]] = field(
default=None,
metadata={
"help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model "
"from a string."
},
)
def __post_init__(self):
self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16
super().__post_init__()
if hasattr(self.beta, "__len__") and len(self.beta) == 1:
self.beta = self.beta[0]
| trl/trl/trainer/online_dpo_config.py/0 | {
"file_path": "trl/trl/trainer/online_dpo_config.py",
"repo_id": "trl",
"token_count": 3912
} | 548 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import textwrap
from typing import Any, Callable, Optional, Union
import jinja2
import torch
import torch.nn as nn
import torch.nn.functional as F
from datasets import Dataset, IterableDataset
from transformers import (
BaseImageProcessor,
FeatureExtractionMixin,
PreTrainedModel,
PreTrainedTokenizerBase,
ProcessorMixin,
TrainerCallback,
is_apex_available,
is_wandb_available,
)
from transformers.trainer_utils import EvalPrediction
from transformers.training_args import OptimizerNames
from transformers.utils import is_peft_available
from ..data_utils import is_conversational, maybe_apply_chat_template
from ..models.utils import unwrap_model_for_generation
from .judges import BasePairwiseJudge
from .online_dpo_trainer import OnlineDPOTrainer
from .utils import (
SIMPLE_CHAT_TEMPLATE,
empty_cache,
generate_model_card,
get_comet_experiment_url,
get_reward,
selective_log_softmax,
truncate_right,
)
from .xpo_config import XPOConfig
if is_apex_available():
from apex import amp
if is_wandb_available():
import wandb
if is_peft_available():
from peft import PeftModel
class XPOTrainer(OnlineDPOTrainer):
r"""
Initialize XPOTrainer as a subclass of [`OnlineDPOConfig`].
Args:
model (`transformers.PreTrainedModel`):
The model to train, preferably an `AutoModelForCausalLM`.
ref_model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation
and loss. If no reference model is provided, the trainer will create a reference model with the same
architecture as the model to be optimized.
reward_model (`transformers.PreTrainedModel`):
The reward model to score completions with, preferably an `AutoModelForSequenceClassification`.
judge (`BasePairwiseJudge`):
The judge to use for pairwise comparison of model completions.
args (`XPOConfig`):
The XPO config arguments to use for training.
data_collator (`transformers.DataCollator`):
The data collator to use for training. If None is specified, the default data collator
(`DPODataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the
sequences in the batch, given a dataset of paired sequences.
train_dataset (`datasets.Dataset`):
The dataset to use for training.
eval_dataset (`datasets.Dataset`):
The dataset to use for evaluation.
processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*, defaults to `None`):
Processing class used to process the data. If provided, will be used to automatically process the inputs
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
reuse the fine-tuned model.
peft_config (`dict`):
The peft config to use for training.
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to
metric values.
callbacks (`list[transformers.TrainerCallback]`):
The callbacks to use for training.
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
The optimizer and scheduler to use for training.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
The function to use to preprocess the logits before computing the metrics.
"""
_tag_names = ["trl", "xpo"]
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
ref_model: Union[PreTrainedModel, nn.Module] = None,
reward_model: Optional[nn.Module] = None,
judge: Optional[BasePairwiseJudge] = None,
args: Optional[XPOConfig] = None,
data_collator: Optional[Callable] = None,
train_dataset: Optional[Union[Dataset, IterableDataset]] = None,
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
processing_class: Optional[
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
] = None,
peft_config: Optional[dict] = None,
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
callbacks: Optional[list[TrainerCallback]] = None,
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
) -> None:
super().__init__(
model=model,
ref_model=ref_model,
judge=judge,
reward_model=reward_model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=processing_class,
reward_processing_class=processing_class, # for now, XPOTrainer can't use any reward model
peft_config=peft_config,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
self._alpha = self.args.alpha
# Overwrite the stats dictionary to include XPO specific statistics
self.stats = {
# Remove "non_score_reward", "rlhf_reward", "scores"
# Add "loss/dpo", "loss/xpo"
"loss/dpo": [],
"loss/xpo": [],
"objective/kl": [],
"objective/entropy": [],
"rewards/chosen": [],
"rewards/rejected": [],
"rewards/accuracies": [],
"rewards/margins": [],
"logps/chosen": [],
"logps/rejected": [],
# Replace "contain_eos_token" by "model_contain_eos_token" and "ref_contain_eos_token"
"val/model_contain_eos_token": [],
"val/ref_contain_eos_token": [],
"alpha": [],
"beta": [],
}
if self.reward_model is not None:
# Replace "scores" by "model_scores" and "ref_scores"
self.stats["objective/model_scores"] = []
self.stats["objective/ref_scores"] = []
self.stats["objective/scores_margin"] = []
@property
def alpha(self):
if isinstance(self._alpha, list):
epoch = self.state.epoch
return self._alpha[epoch] if epoch < len(self._alpha) else self._alpha[-1]
else:
return self._alpha
def _generate_completions(self, prompts, model):
with unwrap_model_for_generation(model, self.accelerator) as unwrapped_policy_model_for_gen:
model_output = unwrapped_policy_model_for_gen.generate(
input_ids=prompts["input_ids"],
attention_mask=prompts["attention_mask"],
generation_config=self.generation_config,
)
actual_model_for_ref_generation: torch.nn.Module
if self.ref_model is None:
unwrapped_main_model_for_ref_logic = self.accelerator.unwrap_model(model)
if is_peft_available() and isinstance(unwrapped_main_model_for_ref_logic, PeftModel):
actual_model_for_ref_generation = unwrapped_main_model_for_ref_logic.get_base_model()
else:
actual_model_for_ref_generation = unwrapped_main_model_for_ref_logic
else:
actual_model_for_ref_generation = self.accelerator.unwrap_model(self.ref_model)
with unwrap_model_for_generation(actual_model_for_ref_generation, self.accelerator) as final_ref_model_for_gen:
ref_output = final_ref_model_for_gen.generate(
input_ids=prompts["input_ids"],
attention_mask=prompts["attention_mask"],
generation_config=self.generation_config,
)
return model_output, ref_output
def _process_completions(self, model_output, ref_output, prompts):
context_length = prompts["input_ids"].shape[1]
# Process model completions
model_completion_ids = model_output[:, context_length:]
model_completion_ids, model_completion_mask = truncate_right(
model_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id
)
model_data = {
"input_ids": torch.cat((prompts["input_ids"], model_completion_ids), dim=1),
"attention_mask": torch.cat((prompts["attention_mask"], model_completion_mask), dim=1),
"raw": prompts["raw"],
}
# Process reference model completions
ref_completion_ids = ref_output[:, context_length:]
ref_completion_ids, ref_completion_mask = truncate_right(
ref_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id
)
ref_data = {
"input_ids": torch.cat((prompts["input_ids"], ref_completion_ids), dim=1),
"attention_mask": torch.cat((prompts["attention_mask"], ref_completion_mask), dim=1),
"raw": prompts["raw"],
}
return model_data, ref_data
def _compute_rewards(self, model_data, ref_data, context_length):
with torch.no_grad():
_, model_scores, _ = get_reward(
self.reward_model, model_data["input_ids"], self.processing_class.pad_token_id, context_length
)
_, ref_scores, _ = get_reward(
self.reward_model, ref_data["input_ids"], self.processing_class.pad_token_id, context_length
)
# Apply EOS penalty if needed
if self.args.missing_eos_penalty is not None:
model_contain_eos = torch.any(model_data["input_ids"] == self.processing_class.eos_token_id, dim=-1)
ref_contain_eos = torch.any(ref_data["input_ids"] == self.processing_class.eos_token_id, dim=-1)
model_scores[~model_contain_eos] -= self.args.missing_eos_penalty
ref_scores[~ref_contain_eos] -= self.args.missing_eos_penalty
return model_scores, ref_scores
def _compute_judge(self, model_data, ref_data, context_length):
prompts = model_data["raw"]
model_data_completions = self.processing_class.batch_decode(
model_data["input_ids"][:, context_length:], skip_special_tokens=True
)
model_data_completions = [completion.strip() for completion in model_data_completions]
ref_data_completions = self.processing_class.batch_decode(
ref_data["input_ids"][:, context_length:], skip_special_tokens=True
)
ref_data_completions = [completion.strip() for completion in ref_data_completions]
if is_conversational({"prompt": prompts[0]}):
model_data_completions = [
[{"role": "assistant", "content": completion}] for completion in model_data_completions
]
environment = jinja2.Environment()
template = environment.from_string(SIMPLE_CHAT_TEMPLATE)
prompts = [template.render(messages=message) for message in prompts]
model_data_completions = [template.render(messages=completion) for completion in model_data_completions]
ref_data_completions = [
[{"role": "assistant", "content": completion}] for completion in ref_data_completions
]
ref_data_completions = [template.render(messages=completion) for completion in ref_data_completions]
ranks_of_first_completion = self.judge.judge(
prompts,
list(zip(model_data_completions, ref_data_completions)),
)
# convert ranks to a True/False mask:
# when rank == 0, it means the first completion is the best
# when rank == 1, it means the second completion is the best
return torch.tensor([rank == 0 for rank in ranks_of_first_completion], device=model_data["input_ids"].device)
def _compute_logprobs(self, model, model_data, ref_data, context_length):
def compute_logprobs_for_data(m, data):
output = m(data["input_ids"], attention_mask=data["attention_mask"])
logits = output.logits[:, context_length - 1 : -1]
token_logprobs = selective_log_softmax(logits, data["input_ids"][:, context_length:])
return token_logprobs
# Compute logprobs for model completions
model_logprobs_model_data = compute_logprobs_for_data(model, model_data)
# Compute logprobs for model on reference completions (for XPO loss)
model_logprobs_ref_data = compute_logprobs_for_data(model, ref_data)
# Compute logprobs for reference model completions
with torch.no_grad():
if self.ref_model is None:
with model.disable_adapter():
ref_logprobs_model_data = compute_logprobs_for_data(model, model_data)
ref_logprobs_ref_data = compute_logprobs_for_data(model, ref_data)
else:
ref_logprobs_model_data = compute_logprobs_for_data(self.ref_model, model_data)
ref_logprobs_ref_data = compute_logprobs_for_data(self.ref_model, ref_data)
# Mask padding tokens
model_padding_mask = model_data["attention_mask"][:, context_length:] == 0
ref_padding_mask = ref_data["attention_mask"][:, context_length:] == 0
model_logprobs_model_data = model_logprobs_model_data.masked_fill(model_padding_mask, 0.0)
model_logprobs_ref_data = model_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0)
ref_logprobs_ref_data = ref_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0)
ref_logprobs_model_data = ref_logprobs_model_data.masked_fill(model_padding_mask, 0.0)
return model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data
def _compute_losses(
self,
model_logprobs_model_data,
model_logprobs_ref_data,
ref_logprobs_ref_data,
ref_logprobs_model_data,
chosen_mask,
):
# Compute log probs
model_logprobs_model_data_sum = model_logprobs_model_data.sum(1)
model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1)
ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1)
ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1)
chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum)
chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum)
chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs
rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum)
rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum)
rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs
# Compute logits as the difference between chosen and rejected log ratios
logits = chosen_log_ratios - rejected_log_ratios
if self.args.loss_type == "sigmoid":
dpo_losses = -F.logsigmoid(self.beta * logits)
elif self.args.loss_type == "ipo":
dpo_losses = (logits - 1 / (2 * self.beta)) ** 2
else:
raise NotImplementedError(f"invalid loss type {self.args.loss_type}")
# Compute XPO specific loss
xpo_losses = self.alpha * model_logprobs_ref_data_sum
# Total loss
loss = (dpo_losses + xpo_losses).mean()
return loss, dpo_losses, xpo_losses
def _log_statistics(
self,
model_data,
ref_data,
model_logprobs_model_data,
model_logprobs_ref_data,
ref_logprobs_ref_data,
ref_logprobs_model_data,
chosen_mask,
dpo_losses,
xpo_losses,
context_length,
model_scores=None,
ref_scores=None,
):
# Helper function to gather and compute mean
def gather_mean(tensor):
return self.accelerator.gather_for_metrics(tensor).mean().item()
# Log losses
self.stats["loss/dpo"].append(gather_mean(dpo_losses))
self.stats["loss/xpo"].append(gather_mean(xpo_losses))
# Log scores
if self.reward_model is not None:
self.stats["objective/model_scores"].append(gather_mean(model_scores))
self.stats["objective/ref_scores"].append(gather_mean(ref_scores))
self.stats["objective/scores_margin"].append(gather_mean(model_scores - ref_scores))
# Log logprobs
model_logprobs_model_data_sum = model_logprobs_model_data.sum(1)
model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1)
ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1)
ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1)
chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum)
chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum)
chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs
rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum)
rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum)
rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs
self.stats["logps/chosen"].append(gather_mean(chosen_model_logprobs.mean() + chosen_ref_logprobs.mean()))
self.stats["logps/rejected"].append(gather_mean(rejected_model_logprobs.mean() + rejected_ref_logprobs.mean()))
# Log rewards
# Compute various statistics
chosen_rewards = chosen_log_ratios * self.beta
rejected_rewards = rejected_log_ratios * self.beta
self.stats["rewards/chosen"].append(gather_mean(chosen_rewards.mean()))
self.stats["rewards/rejected"].append(gather_mean(rejected_rewards.mean()))
# Calculate KL divergence for model and ref data
kl_model_data = model_logprobs_model_data - ref_logprobs_model_data
kl_ref_data = model_logprobs_ref_data - ref_logprobs_ref_data
mean_kl = (kl_model_data.sum(1) + kl_ref_data.sum(1)).mean() / 2
self.stats["objective/kl"].append(gather_mean(mean_kl))
# Calculate entropy for model and ref data
entropy_model_data = -model_logprobs_model_data.sum(1)
entropy_ref_data = -model_logprobs_ref_data.sum(1)
mean_entropy = (entropy_model_data.mean() + entropy_ref_data.mean()) / 2
self.stats["objective/entropy"].append(gather_mean(mean_entropy))
# Calculate margins
margin = chosen_rewards - rejected_rewards
self.stats["rewards/margins"].append(gather_mean(margin.mean()))
# Calculate accuracy
accuracy = (margin > 0).float()
self.stats["rewards/accuracies"].append(gather_mean(accuracy.mean()))
# Log EOS token statistics
model_eos = (model_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1)
ref_eos = (ref_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1)
self.stats["val/model_contain_eos_token"].append(gather_mean(model_eos.float()))
self.stats["val/ref_contain_eos_token"].append(gather_mean(ref_eos.float()))
# Log alpha and beta
self.stats["alpha"].append(self.alpha)
self.stats["beta"].append(self.beta)
def training_step(
self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None
) -> torch.Tensor:
model.train()
# Apply chat template and tokenize the input
batch_size = len(next(iter(inputs.values())))
prompts = inputs["prompt"]
inputs = [{k: v[i] for k, v in inputs.items()} for i in range(batch_size)]
inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs]
inputs = [self.tokenize_row(x, self.model.config.is_encoder_decoder, self.processing_class) for x in inputs]
inputs = self.data_collator(inputs)
# need the prompt_ only
inputs = self._prepare_inputs(inputs)
context_length = inputs["prompt_input_ids"].shape[1]
prompts = {
"input_ids": inputs["prompt_input_ids"],
"attention_mask": inputs["prompt_attention_mask"],
"raw": prompts,
}
del inputs
# Sample completions from both the model and the reference model
model_output, ref_output = self._generate_completions(prompts, model)
# Process model completions
model_data, ref_data = self._process_completions(model_output, ref_output, prompts)
# Compute rewards
if self.reward_model is not None:
model_scores, ref_scores = self._compute_rewards(model_data, ref_data, context_length)
chosen_mask = model_scores >= ref_scores
else:
model_scores, ref_scores = None, None
chosen_mask = self._compute_judge(model_data, ref_data, context_length)
# Compute logprobs
model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data = (
self._compute_logprobs(model, model_data, ref_data, context_length)
)
# Compute loss
loss, dpo_losses, xpo_losses = self._compute_losses(
model_logprobs_model_data,
model_logprobs_ref_data,
ref_logprobs_ref_data,
ref_logprobs_model_data,
chosen_mask,
)
# Log everything
self._log_statistics(
model_data,
ref_data,
model_logprobs_model_data.detach(),
model_logprobs_ref_data.detach(),
ref_logprobs_ref_data,
ref_logprobs_model_data,
chosen_mask,
dpo_losses.detach(),
xpo_losses.detach(),
context_length,
model_scores,
ref_scores,
)
if (
self.args.torch_empty_cache_steps is not None
and self.state.global_step % self.args.torch_empty_cache_steps == 0
):
empty_cache()
kwargs = {}
# For LOMO optimizers you need to explicitly use the learning rate
if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:
kwargs["learning_rate"] = self._get_learning_rate()
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
self.accelerator.backward(loss, **kwargs)
return loss.detach() / self.args.gradient_accumulation_steps
def create_model_card(
self,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
tags: Union[str, list[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
model_name (`str` or `None`, *optional*, defaults to `None`):
Name of the model.
dataset_name (`str` or `None`, *optional*, defaults to `None`):
Name of the dataset used for training.
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
Tags to be associated with the model card.
"""
if not self.is_world_process_zero():
return
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
base_model = self.model.config._name_or_path
else:
base_model = None
# normalize `tags` to a mutable set
if tags is None:
tags = set()
elif isinstance(tags, str):
tags = {tags}
else:
tags = set(tags)
if hasattr(self.model.config, "unsloth_version"):
tags.add("unsloth")
tags.update(self._tag_names)
# docstyle-ignore
citation = textwrap.dedent("""\
@article{jung2024binary,
title = {{Exploratory Preference Optimization: Harnessing Implicit Q*-Approximation for Sample-Efficient RLHF}},
author = {Tengyang Xie and Dylan J. Foster and Akshay Krishnamurthy and Corby Rosset and Ahmed Awadallah and Alexander Rakhlin},
year = 2024,
eprint = {arXiv:2405.21046}
}""")
model_card = generate_model_card(
base_model=base_model,
model_name=model_name,
hub_model_id=self.hub_model_id,
dataset_name=dataset_name,
tags=tags,
wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None,
comet_url=get_comet_experiment_url(),
trainer_name="XPO",
trainer_citation=citation,
paper_title="Exploratory Preference Optimization: Harnessing Implicit Q*-Approximation for Sample-Efficient RLHF",
paper_id="2405.21046",
)
model_card.save(os.path.join(self.args.output_dir, "README.md"))
| trl/trl/trainer/xpo_trainer.py/0 | {
"file_path": "trl/trl/trainer/xpo_trainer.py",
"repo_id": "trl",
"token_count": 11733
} | 549 |
# Let's Fine-Tune Your Model for Function-Calling
We're now ready to fine-tune our first model for function-calling 🔥.
## How do we train our model for function-calling?
> Answer: We need **data**
A model training process can be divided into 3 steps:
1. **The model is pre-trained on a large quantity of data**. The output of that step is a **pre-trained model**. For instance, [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b). It's a base model and only knows how **to predict the next token without strong instruction following capabilities**.
2. To be useful in a chat context, the model then needs to be **fine-tuned** to follow instructions. In this step, it can be trained by model creators, the open-source community, you, or anyone. For instance, [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) is an instruction-tuned model by the Google Team behind the Gemma project.
3. The model can then be **aligned** to the creator's preferences. For instance, a customer service chat model that must never be impolite to customers.
Usually a complete product like Gemini or Mistral **will go through all 3 steps**, whereas the models you can find on Hugging Face have completed one or more steps of this training.
In this tutorial, we will build a function-calling model based on [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it). We choose the fine-tuned model [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) instead of the base model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b) because the fine-tuned model has been improved for our use-case.
Starting from the pre-trained model **would require more training in order to learn instruction following, chat AND function-calling**.
By starting from the instruction-tuned model, **we minimize the amount of information that our model needs to learn**.
## LoRA (Low-Rank Adaptation of Large Language Models)
LoRA is a popular and lightweight training technique that significantly **reduces the number of trainable parameters**.
It works by **inserting a smaller number of new weights as an adapter into the model to train**. This makes training with LoRA much faster, memory-efficient, and produces smaller model weights (a few hundred MBs), which are easier to store and share.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/blog_multi-lora-serving_LoRA.gif" alt="LoRA inference" width="50%"/>
LoRA works by adding pairs of rank decomposition matrices to Transformer layers, typically focusing on linear layers. During training, we will "freeze" the rest of the model and will only update the weights of those newly added adapters.
By doing so, the number of **parameters** that we need to train drops considerably as we only need to update the adapter's weights.
During inference, the input is passed into the adapter and the base model, or these adapter weights can be merged with the base model, resulting in no additional latency overhead.
LoRA is particularly useful for adapting **large** language models to specific tasks or domains while keeping resource requirements manageable. This helps reduce the memory **required** to train a model.
If you want to learn more about how LoRA works, you should check out this [tutorial](https://huggingface.co/learn/nlp-course/chapter11/4?fw=pt).
## Fine-Tuning a Model for Function-Calling
You can access the tutorial notebook 👉 [here](https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit1/bonus-unit1.ipynb).
Then, click on [](https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit1/bonus-unit1.ipynb) to be able to run it in a Colab Notebook.
| agents-course/units/en/bonus-unit1/fine-tuning.mdx/0 | {
"file_path": "agents-course/units/en/bonus-unit1/fine-tuning.mdx",
"repo_id": "agents-course",
"token_count": 1068
} | 0 |
# Onboarding: Your First Steps ⛵
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="Time to Onboard" width="100%"/>
Now that you have all the details, let's get started! We're going to do four things:
1. **Create your Hugging Face Account** if it's not already done
2. **Sign up to Discord and introduce yourself** (don't be shy 🤗)
3. **Follow the Hugging Face Agents Course** on the Hub
4. **Spread the word** about the course
### Step 1: Create Your Hugging Face Account
(If you haven't already) create a Hugging Face account <a href='https://huggingface.co/join' target='_blank'>here</a>.
### Step 2: Join Our Discord Community
👉🏻 Join our discord server <a href="https://discord.gg/UrrTSsSyjb" target="_blank">here.</a>
When you join, remember to introduce yourself in `#introduce-yourself`.
We have multiple AI Agent-related channels:
- `agents-course-announcements`: for the **latest course information**.
- `🎓-agents-course-general`: for **general discussions and chitchat**.
- `agents-course-questions`: to **ask questions and help your classmates**.
- `agents-course-showcase`: to **show your best agents**.
In addition you can check:
- `smolagents`: for **discussion and support with the library**.
If this is your first time using Discord, we wrote a Discord 101 to get the best practices. Check [the next section](discord101).
### Step 3: Follow the Hugging Face Agent Course Organization
Stay up to date with the latest course materials, updates, and announcements **by following the Hugging Face Agents Course Organization**.
👉 Go <a href="https://huggingface.co/agents-course" target="_blank">here</a> and click on **follow**.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/hf_course_follow.gif" alt="Follow" width="100%"/>
### Step 4: Spread the word about the course
Help us make this course more visible! There are two ways you can help us:
1. Show your support by ⭐ <a href="https://github.com/huggingface/agents-course" target="_blank">the course's repository</a>.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/please_star.gif" alt="Repo star"/>
2. Share Your Learning Journey: Let others **know you're taking this course**! We've prepared an illustration you can use in your social media posts
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png">
You can download the image by clicking 👉 [here](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png?download=true)
### Step 5: Running Models Locally with Ollama (In case you run into Credit limits)
1. **Install Ollama**
Follow the official Instructions <a href="https://ollama.com/download" target="_blank"> here.</a>
2. **Pull a model Locally**
```bash
ollama pull qwen2:7b
```
Here, we pull the <a href="https://ollama.com/library/qwen2:7b" target="_blank"> qwen2:7b model</a>. Check out <a href="https://ollama.com/search" target="_blank">the ollama website</a> for more models.
3. **Start Ollama in the background (In one terminal)**
``` bash
ollama serve
```
If you run into the error "listen tcp 127.0.0.1:11434: bind: address already in use", you can use command `sudo lsof -i :11434` to identify the process
ID (PID) that is currently using this port. If the process is `ollama`, it is likely that the installation script above has started ollama
service, so you can skip this command to start Ollama.
4. **Use `LiteLLMModel` Instead of `InferenceClientModel`**
To use `LiteLLMModel` module in `smolagents`, you may run `pip` command to install the module.
``` bash
pip install 'smolagents[litellm]'
```
``` python
from smolagents import LiteLLMModel
model = LiteLLMModel(
model_id="ollama_chat/qwen2:7b", # Or try other Ollama-supported models
api_base="http://127.0.0.1:11434", # Default Ollama local server
num_ctx=8192,
)
```
5. **Why this works?**
- Ollama serves models locally using an OpenAI-compatible API at `http://localhost:11434`.
- `LiteLLMModel` is built to communicate with any model that supports the OpenAI chat/completion API format.
- This means you can simply swap out `InferenceClientModel` for `LiteLLMModel` no other code changes required. It’s a seamless, plug-and-play solution.
Congratulations! 🎉 **You've completed the onboarding process**! You're now ready to start learning about AI Agents. Have fun!
Keep Learning, stay awesome 🤗
| agents-course/units/en/unit0/onboarding.mdx/0 | {
"file_path": "agents-course/units/en/unit0/onboarding.mdx",
"repo_id": "agents-course",
"token_count": 1477
} | 1 |
# What are LLMs?
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-1.jpg" alt="Unit 1 planning"/>
In the previous section we learned that each Agent needs **an AI Model at its core**, and that LLMs are the most common type of AI models for this purpose.
Now we will learn what LLMs are and how they power Agents.
This section offers a concise technical explanation of the use of LLMs. If you want to dive deeper, you can check our <a href="https://huggingface.co/learn/nlp-course/chapter1/1" target="_blank">free Natural Language Processing Course</a>.
## What is a Large Language Model?
An LLM is a type of AI model that excels at **understanding and generating human language**. They are trained on vast amounts of text data, allowing them to learn patterns, structure, and even nuance in language. These models typically consist of many millions of parameters.
Most LLMs nowadays are **built on the Transformer architecture**—a deep learning architecture based on the "Attention" algorithm, that has gained significant interest since the release of BERT from Google in 2018.
<figure>
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/transformer.jpg" alt="Transformer"/>
<figcaption>The original Transformer architecture looked like this, with an encoder on the left and a decoder on the right.
</figcaption>
</figure>
There are 3 types of transformers:
1. **Encoders**
An encoder-based Transformer takes text (or other data) as input and outputs a dense representation (or embedding) of that text.
- **Example**: BERT from Google
- **Use Cases**: Text classification, semantic search, Named Entity Recognition
- **Typical Size**: Millions of parameters
2. **Decoders**
A decoder-based Transformer focuses **on generating new tokens to complete a sequence, one token at a time**.
- **Example**: Llama from Meta
- **Use Cases**: Text generation, chatbots, code generation
- **Typical Size**: Billions (in the US sense, i.e., 10^9) of parameters
3. **Seq2Seq (Encoder–Decoder)**
A sequence-to-sequence Transformer _combines_ an encoder and a decoder. The encoder first processes the input sequence into a context representation, then the decoder generates an output sequence.
- **Example**: T5, BART
- **Use Cases**: Translation, Summarization, Paraphrasing
- **Typical Size**: Millions of parameters
Although Large Language Models come in various forms, LLMs are typically decoder-based models with billions of parameters. Here are some of the most well-known LLMs:
| **Model** | **Provider** |
|-----------------------------------|-------------------------------------------|
| **Deepseek-R1** | DeepSeek |
| **GPT4** | OpenAI |
| **Llama 3** | Meta (Facebook AI Research) |
| **SmolLM2** | Hugging Face |
| **Gemma** | Google |
| **Mistral** | Mistral |
The underlying principle of an LLM is simple yet highly effective: **its objective is to predict the next token, given a sequence of previous tokens**. A "token" is the unit of information an LLM works with. You can think of a "token" as if it was a "word", but for efficiency reasons LLMs don't use whole words.
For example, while English has an estimated 600,000 words, an LLM might have a vocabulary of around 32,000 tokens (as is the case with Llama 2). Tokenization often works on sub-word units that can be combined.
For instance, consider how the tokens "interest" and "ing" can be combined to form "interesting", or "ed" can be appended to form "interested."
You can experiment with different tokenizers in the interactive playground below:
<iframe
src="https://agents-course-the-tokenizer-playground.static.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
Each LLM has some **special tokens** specific to the model. The LLM uses these tokens to open and close the structured components of its generation. For example, to indicate the start or end of a sequence, message, or response. Moreover, the input prompts that we pass to the model are also structured with special tokens. The most important of those is the **End of sequence token** (EOS).
The forms of special tokens are highly diverse across model providers.
The table below illustrates the diversity of special tokens.
<table>
<thead>
<tr>
<th><strong>Model</strong></th>
<th><strong>Provider</strong></th>
<th><strong>EOS Token</strong></th>
<th><strong>Functionality</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>GPT4</strong></td>
<td>OpenAI</td>
<td><code><|endoftext|></code></td>
<td>End of message text</td>
</tr>
<tr>
<td><strong>Llama 3</strong></td>
<td>Meta (Facebook AI Research)</td>
<td><code><|eot_id|></code></td>
<td>End of sequence</td>
</tr>
<tr>
<td><strong>Deepseek-R1</strong></td>
<td>DeepSeek</td>
<td><code><|end_of_sentence|></code></td>
<td>End of message text</td>
</tr>
<tr>
<td><strong>SmolLM2</strong></td>
<td>Hugging Face</td>
<td><code><|im_end|></code></td>
<td>End of instruction or message</td>
</tr>
<tr>
<td><strong>Gemma</strong></td>
<td>Google</td>
<td><code><end_of_turn></code></td>
<td>End of conversation turn</td>
</tr>
</tbody>
</table>
<Tip>
We do not expect you to memorize these special tokens, but it is important to appreciate their diversity and the role they play in the text generation of LLMs. If you want to know more about special tokens, you can check out the configuration of the model in its Hub repository. For example, you can find the special tokens of the SmolLM2 model in its <a href="https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct/blob/main/tokenizer_config.json">tokenizer_config.json</a>.
</Tip>
## Understanding next token prediction.
LLMs are said to be **autoregressive**, meaning that **the output from one pass becomes the input for the next one**. This loop continues until the model predicts the next token to be the EOS token, at which point the model can stop.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AutoregressionSchema.gif" alt="Visual Gif of autoregressive decoding" width="60%">
In other words, an LLM will decode text until it reaches the EOS. But what happens during a single decoding loop?
While the full process can be quite technical for the purpose of learning agents, here's a brief overview:
- Once the input text is **tokenized**, the model computes a representation of the sequence that captures information about the meaning and the position of each token in the input sequence.
- This representation goes into the model, which outputs scores that rank the likelihood of each token in its vocabulary as being the next one in the sequence.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/DecodingFinal.gif" alt="Visual Gif of decoding" width="60%">
Based on these scores, we have multiple strategies to select the tokens to complete the sentence.
- The easiest decoding strategy would be to always take the token with the maximum score.
You can interact with the decoding process yourself with SmolLM2 in this Space (remember, it decodes until reaching an **EOS** token which is **<|im_end|>** for this model):
<iframe
src="https://agents-course-decoding-visualizer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
- But there are more advanced decoding strategies. For example, *beam search* explores multiple candidate sequences to find the one with the maximum total score–even if some individual tokens have lower scores.
<iframe
src="https://agents-course-beam-search-visualizer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
If you want to know more about decoding, you can take a look at the [NLP course](https://huggingface.co/learn/nlp-course).
## Attention is all you need
A key aspect of the Transformer architecture is **Attention**. When predicting the next word,
not every word in a sentence is equally important; words like "France" and "capital" in the sentence *"The capital of France is ..."* carry the most meaning.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AttentionSceneFinal.gif" alt="Visual Gif of Attention" width="60%">
This process of identifying the most relevant words to predict the next token has proven to be incredibly effective.
Although the basic principle of LLMs—predicting the next token—has remained consistent since GPT-2, there have been significant advancements in scaling neural networks and making the attention mechanism work for longer and longer sequences.
If you've interacted with LLMs, you're probably familiar with the term *context length*, which refers to the maximum number of tokens the LLM can process, and the maximum _attention span_ it has.
## Prompting the LLM is important
Considering that the only job of an LLM is to predict the next token by looking at every input token, and to choose which tokens are "important", the wording of your input sequence is very important.
The input sequence you provide an LLM is called _a prompt_. Careful design of the prompt makes it easier **to guide the generation of the LLM toward the desired output**.
## How are LLMs trained?
LLMs are trained on large datasets of text, where they learn to predict the next word in a sequence through a self-supervised or masked language modeling objective.
From this unsupervised learning, the model learns the structure of the language and **underlying patterns in text, allowing the model to generalize to unseen data**.
After this initial _pre-training_, LLMs can be fine-tuned on a supervised learning objective to perform specific tasks. For example, some models are trained for conversational structures or tool usage, while others focus on classification or code generation.
## How can I use LLMs?
You have two main options:
1. **Run Locally** (if you have sufficient hardware).
2. **Use a Cloud/API** (e.g., via the Hugging Face Serverless Inference API).
Throughout this course, we will primarily use models via APIs on the Hugging Face Hub. Later on, we will explore how to run these models locally on your hardware.
## How are LLMs used in AI Agents?
LLMs are a key component of AI Agents, **providing the foundation for understanding and generating human language**.
They can interpret user instructions, maintain context in conversations, define a plan and decide which tools to use.
We will explore these steps in more detail in this Unit, but for now, what you need to understand is that the LLM is **the brain of the Agent**.
---
That was a lot of information! We've covered the basics of what LLMs are, how they function, and their role in powering AI agents.
If you'd like to dive even deeper into the fascinating world of language models and natural language processing, don't hesitate to check out our <a href="https://huggingface.co/learn/nlp-course/chapter1/1" target="_blank">free NLP course</a>.
Now that we understand how LLMs work, it's time to see **how LLMs structure their generations in a conversational context**.
To run <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit1/dummy_agent_library.ipynb" target="_blank">this notebook</a>, **you need a Hugging Face token** that you can get from <a href="https://hf.co/settings/tokens" target="_blank">https://hf.co/settings/tokens</a>.
For more information on how to run Jupyter Notebooks, checkout <a href="https://huggingface.co/docs/hub/notebooks">Jupyter Notebooks on the Hugging Face Hub</a>.
You also need to request access to <a href="https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct" target="_blank">the Meta Llama models</a>.
| agents-course/units/en/unit1/what-are-llms.mdx/0 | {
"file_path": "agents-course/units/en/unit1/what-are-llms.mdx",
"repo_id": "agents-course",
"token_count": 3788
} | 2 |
# Quick Self-Check (ungraded) [[quiz2]]
What?! Another Quiz? We know, we know, ... 😅 But this short, ungraded quiz is here to **help you reinforce key concepts you've just learned**.
This quiz covers agent workflows and interactions - essential components for building effective AI agents.
### Q1: What is the purpose of AgentWorkflow in LlamaIndex?
<Question
choices={[
{
text: "To run one or more agents with tools",
explain: "Yes, the AgentWorkflow is the main way to quickly create a system with one or more agents.",
correct: true
},
{
text: "To create a single agent that can query your data without memory",
explain: "No, the AgentWorkflow is more capable than that, the QueryEngine is for simple queries over your data.",
},
{
text: "To automatically build tools for agents",
explain: "The AgentWorkflow does not build tools, that is the job of the developer.",
},
{
text: "To manage agent memory and state",
explain: "Managing memory and state is not the primary purpose of AgentWorkflow.",
}
]}
/>
---
### Q2: What object is used for keeping track of the state of the workflow?
<Question
choices={[
{
text: "State",
explain: "State is not the correct object for workflow state management.",
},
{
text: "Context",
explain: "Context is the correct object used for keeping track of workflow state.",
correct: true
},
{
text: "WorkflowState",
explain: "WorkflowState is not the correct object.",
},
{
text: "Management",
explain: "Management is not a valid object for workflow state.",
}
]}
/>
---
### Q3: Which method should be used if you want an agent to remember previous interactions?
<Question
choices={[
{
text: "run(query_str)",
explain: ".run(query_str) does not maintain conversation history.",
},
{
text: "chat(query_str, ctx=ctx)",
explain: "chat() is not a valid method on workflows.",
},
{
text: "interact(query_str)",
explain: "interact() is not a valid method for agent interactions.",
},
{
text: "run(query_str, ctx=ctx)",
explain: "By passing in and maintaining the context, we can maintain state!",
correct: true
}
]}
/>
---
### Q4: What is a key feature of Agentic RAG?
<Question
choices={[
{
text: "It can only use document-based tools, to answer questions in a RAG workflow",
explain: "Agentic RAG can use different tools, including document-based tools.",
},
{
text: "It automatically answers questions without tools, like a chatbot",
explain: "Agentic RAG does use tools to answer questions.",
},
{
text: "It can decide to use any tool to answer questions, including RAG tools",
explain: "Agentic RAG has the flexibility to use different tools to answer questions.",
correct: true
},
{
text: "It only works with Function Calling Agents",
explain: "Agentic RAG is not limited to Function Calling Agents.",
}
]}
/>
---
Got it? Great! Now let's **do a brief recap of the unit!**
| agents-course/units/en/unit2/llama-index/quiz2.mdx/0 | {
"file_path": "agents-course/units/en/unit2/llama-index/quiz2.mdx",
"repo_id": "agents-course",
"token_count": 814
} | 3 |
# ¿Qué es la Llamada a Funciones?
La llamada a funciones es una **forma para que un LLM realice acciones en su entorno**. Fue [introducida primero en GPT-4](https://openai.com/index/function-calling-and-other-api-updates/), y posteriormente fue reproducida en otros modelos.
Al igual que las herramientas de un Agente, la llamada a funciones le da al modelo la capacidad de **realizar una acción en su entorno**. Sin embargo, la capacidad de llamada a funciones **es aprendida por el modelo**, y **depende menos de los prompts que otras técnicas de agentes**.
Durante la Unidad 1, el Agente **no aprendió a usar las Herramientas**, simplemente proporcionamos la lista, y confiamos en el hecho de que el modelo **era capaz de generalizar al definir un plan usando estas Herramientas**.
Mientras que aquí, **con la llamada a funciones, el Agente es ajustado (entrenado) para usar Herramientas**.
## ¿Cómo "aprende" el modelo a realizar una acción?
En la Unidad 1, exploramos el flujo de trabajo general de un agente. Una vez que el usuario ha dado algunas herramientas al agente y le ha proporcionado una consulta, el modelo pasará por el ciclo:
1. *Pensar* : ¿Qué acción(es) necesito tomar para cumplir el objetivo?
2. *Actuar* : Formatear la acción con el parámetro correcto y detener la generación.
3. *Observar* : Obtener el resultado de la ejecución.
En una conversación "típica" con un modelo a través de una API, la conversación alternará entre mensajes del usuario y del asistente de esta manera:
```python
conversation = [
{"role": "user", "content": "Necesito ayuda con mi pedido"},
{"role": "assistant", "content": "Estaré encantado de ayudar. ¿Podrías proporcionar tu número de pedido?"},
{"role": "user", "content": "Es ORDER-123"},
]
```
¡La llamada a funciones trae **nuevos roles a la conversación**!
1. Un nuevo rol para una **Acción**
2. Un nuevo rol para una **Observación**
Si tomamos la [API de Mistral](https://docs.mistral.ai/capabilities/function_calling/) como ejemplo, se vería así:
```python
conversation = [
{
"role": "user",
"content": "¿Cuál es el estado de mi transacción T1001?"
},
{
"role": "assistant",
"content": "",
"function_call": {
"name": "retrieve_payment_status",
"arguments": "{\"transaction_id\": \"T1001\"}"
}
},
{
"role": "tool",
"name": "retrieve_payment_status",
"content": "{\"status\": \"Paid\"}"
},
{
"role": "assistant",
"content": "Tu transacción T1001 ha sido pagada exitosamente."
}
]
```
> ... ¿Pero dijiste que hay un nuevo rol para las llamadas a funciones?
**Sí y no**, en este caso y en muchas otras APIs, el modelo formatea la acción a tomar como un mensaje de "asistente". La plantilla de chat luego representará esto como **tokens especiales** para la llamada a funciones.
- `[AVAILABLE_TOOLS]` – Inicio de la lista de herramientas disponibles
- `[/AVAILABLE_TOOLS]` – Fin de la lista de herramientas disponibles
- `[TOOL_CALLS]` – Hacer una llamada a una herramienta (es decir, realizar una "Acción")
- `[TOOL_RESULTS]` – "Observar" el resultado de la acción
- `[/TOOL_RESULTS]` – Fin de la observación (es decir, el modelo puede decodificar nuevamente)
Hablaremos nuevamente sobre la llamada a funciones en este curso, pero si quieres profundizar puedes consultar [esta excelente sección de documentación](https://docs.mistral.ai/capabilities/function_calling/).
---
Ahora que hemos aprendido qué es la llamada a funciones y cómo funciona, vamos a **agregar algunas capacidades de llamada a funciones a un modelo que aún no tiene esas capacidades**: [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it), agregando algunos nuevos tokens especiales al modelo.
Para poder hacer eso, **primero necesitamos entender el fine-tuning y LoRA**.
| agents-course/units/es/bonus-unit1/what-is-function-calling.mdx/0 | {
"file_path": "agents-course/units/es/bonus-unit1/what-is-function-calling.mdx",
"repo_id": "agents-course",
"token_count": 1573
} | 4 |
# Acciones: Permitiendo al Agente Interactuar con Su Entorno
<Tip>
En esta sección, exploramos los pasos concretos que un agente de IA toma para interactuar con su entorno.
Cubriremos cómo se representan las acciones (usando JSON o código), la importancia del enfoque de detener y analizar, e introduciremos diferentes tipos de agentes.
</Tip>
Las acciones son los pasos concretos que un **agente de IA toma para interactuar con su entorno**.
Ya sea navegando por la web en busca de información o controlando un dispositivo físico, cada acción es una operación deliberada ejecutada por el agente.
Por ejemplo, un agente que asiste con servicio al cliente podría recuperar datos del cliente, ofrecer artículos de soporte o transferir problemas a un representante humano.
## Tipos de Acciones de Agentes
Hay múltiples tipos de Agentes que realizan acciones de manera diferente:
| Tipo de Agente | Descripción |
|------------------------|--------------------------------------------------------------------------------------------------|
| Agente JSON | La Acción a tomar se especifica en formato JSON. |
| Agente de Código | El Agente escribe un bloque de código que es interpretado externamente. |
| Agente de llamada a funciones | Es una subcategoría del Agente JSON que ha sido ajustado para generar un nuevo mensaje para cada acción.|
Las acciones en sí pueden servir para muchos propósitos:
| Tipo de Acción | Descripción |
|--------------------------|------------------------------------------------------------------------------------------|
| Recopilación de Información| Realizar búsquedas web, consultar bases de datos o recuperar documentos. |
| Uso de Herramientas | Hacer llamadas a API, realizar cálculos y ejecutar código. |
| Interacción con el Entorno | Manipular interfaces digitales o controlar dispositivos físicos. |
| Comunicación | Interactuar con usuarios a través de chat o colaborar con otros agentes. |
Una parte crucial de un agente es la **capacidad de DETENER la generación de nuevos tokens cuando una acción está completa**, y eso es cierto para todos los formatos de Agente: JSON, código o llamada a funciones. Esto previene la salida no intencionada y asegura que la respuesta del agente sea clara y precisa.
El LLM solo maneja texto y lo usa para describir la acción que quiere tomar y los parámetros a suministrar a la herramienta.
## El Enfoque de Detener y Analizar
Un método clave para implementar acciones es el **enfoque de detener y analizar**. Este método asegura que la salida del agente sea estructurada y predecible:
1. **Generación en un Formato Estructurado**:
El agente produce su acción prevista en un formato claro y predeterminado (JSON o código).
2. **Deteniendo la Generación Adicional**:
Una vez que la acción está completa, **el agente deja de generar tokens adicionales**. Esto previene salidas adicionales o erróneas.
3. **Analizando la Salida**:
Un analizador externo lee la acción formateada, determina qué Herramienta llamar y extrae los parámetros requeridos.
Por ejemplo, un agente que necesita verificar el clima podría producir:
```json
Thought: Necesito verificar el clima actual para Nueva York.
Action :
{
"action": "get_weather",
"action_input": {"location": "Nueva York"}
}
```
El framework puede entonces analizar fácilmente el nombre de la función a llamar y los argumentos a aplicar.
Este formato claro y legible por máquina minimiza errores y permite que herramientas externas procesen con precisión el comando del agente.
Nota: Los agentes de llamada a funciones operan de manera similar estructurando cada acción para que una función designada sea invocada con los argumentos correctos.
Profundizaremos en esos tipos de Agentes en una Unidad futura.
## Agentes de Código
Un enfoque alternativo es usar *Agentes de Código*.
La idea es: **en lugar de producir un simple objeto JSON**, un Agente de Código genera un **bloque de código ejecutable—típicamente en un lenguaje de alto nivel como Python**.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/code-vs-json-actions.png" alt="Agentes de Código" />
Este enfoque ofrece varias ventajas:
- **Expresividad:** El código puede representar naturalmente lógica compleja, incluyendo bucles, condicionales y funciones anidadas, proporcionando mayor flexibilidad que JSON.
- **Modularidad y Reusabilidad:** El código generado puede incluir funciones y módulos que son reutilizables a través de diferentes acciones o tareas.
- **Depuración Mejorada:** Con una sintaxis de programación bien definida, los errores de código son a menudo más fáciles de detectar y corregir.
- **Integración Directa:** Los Agentes de Código pueden integrarse directamente con bibliotecas y APIs externas, permitiendo operaciones más complejas como procesamiento de datos o toma de decisiones en tiempo real.
Por ejemplo, un Agente de Código encargado de obtener el clima podría generar el siguiente fragmento de Python:
```python
# Ejemplo de Agente de Código: Recuperar Información del Clima
def get_weather(city):
import requests
api_url = f"https://api.weather.com/v1/location/{city}?apiKey=YOUR_API_KEY"
response = requests.get(api_url)
if response.status_code == 200:
data = response.json()
return data.get("weather", "No hay información del clima disponible")
else:
return "Error: No se pudo obtener datos del clima."
# Ejecutar la función y preparar la respuesta final
result = get_weather("Nueva York")
final_answer = f"El clima actual en Nueva York es: {result}"
print(final_answer)
```
En este ejemplo, el Agente de Código:
- Recupera datos del clima **a través de una llamada a API**,
- Procesa la respuesta,
- Y usa la función print() para producir una respuesta final.
Este método **también sigue el enfoque de detener y analizar** delimitando claramente el bloque de código y señalando cuando la ejecución está completa (aquí, imprimiendo el final_answer).
---
Aprendimos que las Acciones conectan el razonamiento interno de un agente y sus interacciones con el mundo real ejecutando tareas claras y estructuradas—ya sea a través de JSON, código o llamadas a funciones.
Esta ejecución deliberada asegura que cada acción sea precisa y esté lista para el procesamiento externo a través del enfoque de detener y analizar. En la siguiente sección, exploraremos las Observaciones para ver cómo los agentes capturan e integran retroalimentación de su entorno.
Después de esto, ¡**finalmente estaremos listos para construir nuestro primer Agente**!
| agents-course/units/es/unit1/actions.mdx/0 | {
"file_path": "agents-course/units/es/unit1/actions.mdx",
"repo_id": "agents-course",
"token_count": 2708
} | 5 |
# Componentes de LangGraph
Para crear aplicaciones con LangGraph necesitas conocer sus elementos principales. Exploremos los componentes fundamentales que conforman una aplicacion con LangGraph.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/Building_blocks.png" alt="Building Blocks" width="70%"/>
Una aplicación en LangGraph empieza con una **entrada**, y dependiendo de la ejecución, el flujo puede ir a una función o a otra hasta que llega al FINAL.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/application.png" alt="Application"/>
## 1. Estado(State)
**Estado(State)** es un concepto central en LangGraphh. Representa toda la información que fluye a traves de la aplicación.
```python
from typing_extensions import TypedDict
class State(TypedDict):
graph_state: str
```
El estado es **definido por el usuario**, es por eso que los campos deben contruirse cuidadosamente para contener toda la información necesaria para el proceso de toma de decisiones!
> 💡 **Tip:** Piensa cuidadosamente que información necesita tu aplicación para rastrear entre pasos.
## 2. Nodos(Nodes)
**Nodos(Nodes)** son funciones de python. Cada nodo:
- Toma el estado como entrada
- Realiza alguna operación
- Regresa actualizaciones al estado
```python
def node_1(state):
print("---Node 1---")
return {"graph_state": state['graph_state'] +" Yo estoy"}
def node_2(state):
print("---Node 2---")
return {"graph_state": state['graph_state'] +" feliz!"}
def node_3(state):
print("---Node 3---")
return {"graph_state": state['graph_state'] +" triste!"}
```
Por ejemplo, los nodos pueden contener:
- **llamadas a LLM**: Generar texto o tomar decisiones
- **llamadas a Tool**: Interactuar con sistemas externos
- **Lógica Condicional**: Determinar los siguientes pasos
- **Intervención Humana**: Obtener datos del usuario
> 💡 **Info:** Algunos nodos necesarios para el flujo completo como START y END existen directamente desde langGraph.
## 3. Aristas (Edges)
**Aristas (Edges)** conectan nodos y definen los posibles caminos a través de tu grafo:
```python
import random
from typing import Literal
def decide_mood(state) -> Literal["node_2", "node_3"]:
# A menudo, usaremos el estado para decidir el próximo nodo a visitar
usuario_entrada = state['graph_state']
# Aquí, hagamos simplemente una división 50 / 50 entre los nodos 2, 3
if random.random() < 0.5:
# 50% del tiempo, devolvemos el Nodo 2
return "node_2"
# 50% del tiempo, devolvemos el Nodo 3
return "node_3"RetryClaude can make mistakes. Please double-check responses.
```
Las aristas(edges) pueden ser:
- **Directas (Direct)**: Siempre van del nodo A al nodo B
- **Condicionales (Conditional)**: Eligen el próximo nodo basándose en el estado actual
## 4. StateGraph
El **StateGraph** es el contenedor que alberga todo el flujo de trabajo de tu agente:
```python
from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
# Construir grafo
builder = StateGraph(State)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
# Lógica
builder.add_edge(START, "node_1")
builder.add_conditional_edges("node_1", decide_mood)
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Añadir
graph = builder.compile()
```
¡Que luego puede ser visualizado!
```python
# Ver
display(Image(graph.get_graph().draw_mermaid_png()))
```
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/basic_graph.jpeg" alt="Graph Visualization"/>
Pero lo más importante, invocado::
```python
graph.invoke({"graph_state" : "Hola, soy Lance."})
```
output :
```
---Node 1---
---Node 3---
{'graph_state': 'Hola, soy Lance. ¡Estoy triste!'}
```
## ¿Qué sigue?
En la siguiente sección, pondremos estos conceptos en práctica construyendo nuestro primer grafo. Este grafo permite a Alfred recibir tus correos electrónicos, clasificarlos y elaborar una respuesta preliminar si son genuinos.
| agents-course/units/es/unit2/langgraph/building_blocks.mdx/0 | {
"file_path": "agents-course/units/es/unit2/langgraph/building_blocks.mdx",
"repo_id": "agents-course",
"token_count": 1564
} | 6 |
# Crear workflows agenticos en LlamaIndex
Un workflow en LlamaIndex proporciona una forma estructurada de organizar su código en pasos secuenciales y manejables.
Tal workflow se crea definiendo `Steps` que se activan por `Events`, y emiten `Events` para activar pasos posteriores.
Echemos un vistazo a Alfred mostrando un workflow de LlamaIndex para una tarea de RAG.

**Workflows ofrecen varios beneficios clave:**
- Organizacion clara de código en pasos discretos
- Arquitectura de control de flujo basada en eventos flexible
- Comunicación de tipado segura entre pasos
- Manejo de estado integrado
- Apoyo para agentes simples y complejos
Como podra haber adivinado, **workflows ofrecen un equilibrio entre la autonomía de los agentes mientras se mantiene el control sobre el workflow global.**
Asi que, vamos a aprender como crear un workflow nosotros mismos!
## Crear Workflows
<Tip>
Puede seguir el código en <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/llama-index/workflows.ipynb" target="_blank">esta notebook</a> que puede ejecutar utilizando Google Colab.
</Tip>
### Crear un workflow básico
<details>
<summary>Instalar el paquete Workflow</summary>
Como se introdujo en la [sección sobre LlamaHub](llama-hub), podemos instalar el paquete Workflow con el siguiente comando:
```python
pip install llama-index-utils-workflow
```
</details>
Podemos crear un flujo de trabajo de un solo paso definiendo una clase que herede de `Workflow` y decorando tus funciones con `@step`.
También necesitaremos añadir `StartEvent` y `StopEvent`, que son eventos especiales que se utilizan para indicar el inicio y el fin del flujo de trabajo.
```python
from llama_index.core.workflow import StartEvent, StopEvent, Workflow, step
class MyWorkflow(Workflow):
@step
async def my_step(self, ev: StartEvent) -> StopEvent:
# hacer algo aquí
return StopEvent(result="¡Hola, mundo!")
w = MyWorkflow(timeout=10, verbose=False)
result = await w.run()
```
Como puedes ver, ahora podemos ejecutar el flujo de trabajo llamando a `w.run()`.
### Conectando Múltiples Pasos
Para conectar múltiples pasos, **creamos eventos personalizados que transportan datos entre pasos.**
Para hacerlo, necesitamos agregar un `Event` que se pasa entre los pasos y transfiere la salida del primer paso al segundo paso.
```python
from llama_index.core.workflow import Event
class ProcessingEvent(Event):
intermediate_result: str
class MultiStepWorkflow(Workflow):
@step
async def step_one(self, ev: StartEvent) -> ProcessingEvent:
# Procesar datos iniciales
return ProcessingEvent(intermediate_result="Paso 1 completado")
@step
async def step_two(self, ev: ProcessingEvent) -> StopEvent:
# Usar el resultado intermedio
final_result = f"Procesamiento finalizado: {ev.intermediate_result}"
return StopEvent(result=final_result)
w = MultiStepWorkflow(timeout=10, verbose=False)
result = await w.run()
result
```
La indicación de tipo es importante aquí, ya que asegura que el flujo de trabajo se ejecute correctamente. ¡Vamos a complicar un poco más las cosas!
### Bucles y Ramificaciones
La indicación de tipo es la parte más poderosa de los flujos de trabajo porque nos permite crear ramificaciones, bucles y uniones para facilitar flujos de trabajo más complejos.
Veamos un ejemplo de **creación de un bucle** usando el operador de unión `|`.
En el ejemplo siguiente, vemos que el `LoopEvent` se toma como entrada para el paso y también puede devolverse como salida.
```python
from llama_index.core.workflow import Event
import random
class ProcessingEvent(Event):
intermediate_result: str
class LoopEvent(Event):
loop_output: str
class MultiStepWorkflow(Workflow):
@step
async def step_one(self, ev: StartEvent) -> ProcessingEvent | LoopEvent:
if random.randint(0, 1) == 0:
print("Ocurrió algo malo")
return LoopEvent(loop_output="Volver al paso uno.")
else:
print("Ocurrió algo bueno")
return ProcessingEvent(intermediate_result="Primer paso completado.")
@step
async def step_two(self, ev: ProcessingEvent | LoopEvent) -> StopEvent:
# Usar el resultado intermedio
final_result = f"Procesamiento finalizado: {ev.intermediate_result}"
return StopEvent(result=final_result)
w = MultiStepWorkflow(verbose=False)
result = await w.run()
result
```
### Dibujando Flujos de Trabajo
También podemos dibujar flujos de trabajo. Usemos la función `draw_all_possible_flows` para dibujar el flujo de trabajo. Esto almacena el flujo de trabajo en un archivo HTML.
```python
from llama_index.utils.workflow import draw_all_possible_flows
w = ... # como se definió en la sección anterior
draw_all_possible_flows(w, "flow.html")
```

Hay un último truco interesante que cubriremos en el curso, que es la capacidad de añadir estado al flujo de trabajo.
### Gestión de Estado
La gestión de estado es útil cuando quieres hacer un seguimiento del estado del flujo de trabajo, para que cada paso tenga acceso al mismo estado.
Podemos hacer esto usando la pista de tipo `Context` encima de un parámetro en la función del paso.
```python
from llama_index.core.workflow import Context, StartEvent, StopEvent
@step
async def query(self, ctx: Context, ev: StartEvent) -> StopEvent:
# almacenar en contexto
await ctx.store.set("query", "¿Cuál es la capital de Francia?")
# hacer algo con el contexto y el evento
val = ...
# recuperar del contexto
query = await ctx.store.get("query")
return StopEvent(result=result)
```
¡Genial! ¡Ahora sabes cómo crear flujos de trabajo básicos en LlamaIndex!
<Tip>Hay algunos matices más complejos en los flujos de trabajo, que puedes aprender en <a href="https://docs.llamaindex.ai/en/stable/understanding/workflows/">la documentación de LlamaIndex</a>.</Tip>
Sin embargo, hay otra forma de crear flujos de trabajo, que se basa en la clase `AgentWorkflow`. Veamos cómo podemos usar esto para crear un flujo de trabajo multiagente.
## Automatizando flujos de trabajo con Flujos de Trabajo Multiagente
En lugar de la creación manual de flujos de trabajo, podemos usar la clase **`AgentWorkflow` para crear un flujo de trabajo multiagente**.
El `AgentWorkflow` utiliza Agentes de Flujo de Trabajo para permitirte crear un sistema de uno o más agentes que pueden colaborar y transferir tareas entre sí según sus capacidades especializadas.
Esto permite construir sistemas de agentes complejos donde diferentes agentes manejan diferentes aspectos de una tarea.
En lugar de importar clases de `llama_index.core.agent`, importaremos las clases de agente de `llama_index.core.agent.workflow`.
Un agente debe ser designado como el agente raíz en el constructor de `AgentWorkflow`.
Cuando llega un mensaje de usuario, primero se dirige al agente raíz.
Cada agente puede entonces:
- Manejar la solicitud directamente usando sus herramientas
- Transferir a otro agente mejor preparado para la tarea
- Devolver una respuesta al usuario
Veamos cómo crear un flujo de trabajo multiagente.
```python
from llama_index.core.agent.workflow import AgentWorkflow, ReActAgent
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
# Definir algunas herramientas
def add(a: int, b: int) -> int:
"""Sumar dos números."""
return a + b
def multiply(a: int, b: int) -> int:
"""Multiplicar dos números."""
return a * b
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
# podemos pasar funciones directamente sin FunctionTool -- la función/docstring se analizan para el nombre/descripción
multiply_agent = ReActAgent(
name="multiply_agent",
description="Es capaz de multiplicar dos enteros",
system_prompt="Un asistente útil que puede usar una herramienta para multiplicar números.",
tools=[multiply],
llm=llm,
)
addition_agent = ReActAgent(
name="add_agent",
description="Es capaz de sumar dos enteros",
system_prompt="Un asistente útil que puede usar una herramienta para sumar números.",
tools=[add],
llm=llm,
)
# Crear el flujo de trabajo
workflow = AgentWorkflow(
agents=[multiply_agent, addition_agent],
root_agent="multiply_agent",
)
# Ejecutar el sistema
response = await workflow.run(user_msg="¿Puedes sumar 5 y 3?")
```
Las herramientas del agente también pueden modificar el estado del flujo de trabajo que mencionamos anteriormente. Antes de iniciar el flujo de trabajo, podemos proporcionar un diccionario de estado inicial que estará disponible para todos los agentes.
El estado se almacena en la clave state del contexto del flujo de trabajo. Se inyectará en el state_prompt que aumenta cada nuevo mensaje de usuario.
Vamos a inyectar un contador para contar las llamadas a funciones modificando el ejemplo anterior:
```python
from llama_index.core.workflow import Context
# Definir algunas herramientas
async def add(ctx: Context, a: int, b: int) -> int:
"""Sumar dos números."""
# actualizar nuestro contador
cur_state = await ctx.store.get("state")
cur_state["num_fn_calls"] += 1
await ctx.store.set("state", cur_state)
return a + b
async def multiply(ctx: Context, a: int, b: int) -> int:
"""Multiplicar dos números."""
# actualizar nuestro contador
cur_state = await ctx.store.get("state")
cur_state["num_fn_calls"] += 1
await ctx.store.set("state", cur_state)
return a * b
...
workflow = AgentWorkflow(
agents=[multiply_agent, addition_agent],
root_agent="multiply_agent"
initial_state={"num_fn_calls": 0},
state_prompt="Estado actual: {state}. Mensaje del usuario: {msg}",
)
# ejecutar el flujo de trabajo con contexto
ctx = Context(workflow)
response = await workflow.run(user_msg="¿Puedes sumar 5 y 3?", ctx=ctx)
# extraer e inspeccionar el estado
state = await ctx.store.get("state")
print(state["num_fn_calls"])
```
¡Felicidades! ¡Ahora has dominado los conceptos básicos de los Agentes en LlamaIndex! 🎉
¡Continuemos con un último cuestionario para consolidar tu conocimiento! 🚀
| agents-course/units/es/unit2/llama-index/workflows.mdx/0 | {
"file_path": "agents-course/units/es/unit2/llama-index/workflows.mdx",
"repo_id": "agents-course",
"token_count": 3888
} | 7 |
# Conclusión
En esta unidad, hemos aprendido a crear un sistema RAG agéntico para ayudar a Alfred, nuestro amigable agente de vecindario, a prepararse y a gestionar una gala extravagante.
La combinación de RAG con las capacidades agéntico demuestra cómo los asistentes de IA pueden volverse muy poderosos cuando tienen:
- Acceso a conocimientos estructurados (información de los invitados)
- Capacidad de recuperar información en tiempo real (búsqueda web)
- Herramientas específicas del dominio (información climática, estadísticas de Hub)
- Memoria de las interacciones pasadas
Con estas capacidades, Alfred está bien equipado para ser el perfecto anfitrión, capaz de responder a preguntas sobre los invitados, proporcionar información actualizada y asegurarse de que la gala se realice sin problemas -incluso gestionando el momento perfecto para el lanzamiento de los fuegos artificiales!
<Tip>
Ahora que has construido un agente completo, podrías querer explorar:
- Crear herramientas especializadas para tus propios casos de uso
- Implementar sistemas RAG más sofisticados con embeddings
- Crear sistemas multi-agente donde los agentes puedan colaborar
- Desplegar tu agente como un servicio con el que los demás puedan interactuar
</Tip>
| agents-course/units/es/unit3/agentic-rag/conclusion.mdx/0 | {
"file_path": "agents-course/units/es/unit3/agentic-rag/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 432
} | 8 |
# Introduction

Bienvenue dans l'**Unité Bonus 2** ! Dans ce chapitre, vous explorerez des stratégies avancées pour observer, évaluer et finalement améliorer les performances de vos agents.
---
## 📚 Quand dois-je faire cette Unité Bonus ?
Cette unité bonus est parfaite si vous :
- **Développez et déployez des agents :** Vous voulez vous assurer que vos agents performent de manière fiable en production.
- **Avez besoin d'informations détaillées :** Vous cherchez à diagnostiquer des problèmes, optimiser les performances, ou comprendre le fonctionnement interne de votre agent.
- **Visez à réduire les coûts opérationnels :** En surveillant les coûts des agents, la latence et les détails d'exécution, vous pouvez gérer efficacement les ressources.
- **Recherchez une amélioration continue :** Vous êtes intéressé par l'intégration de retour utilisateur en temps réel et d'évaluation automatisée dans vos applications.
En résumé, pour tous ceux qui veulent mettre leurs agents face à des utilisateurs !
---
## 🤓 Ce que vous allez apprendre
Dans cette unité, vous verrez comment :
- **Instrumenter votre agent :** Apprenez comment intégrer des outils d'observabilité via *OpenTelemetry* avec le *framework* smolagents.
- **Surveiller les métriques :** Suivez les indicateurs de performance tels que l'utilisation du nombre de *tokens* (coûts), la latence et les traces d'erreur.
- **Évaluer en temps réel :** Comprenez les techniques pour l'évaluation en direct, y compris la collecte de retour utilisateur et l'utilisation d'un *LLM-as-a-judge*.
- **Analyse hors ligne :** Utilisez des jeux de données de référence (par exemple, GSM8K) pour tester et comparer les performances des agents.
---
## 🚀 Prêt à commencer ?
Dans la prochaine section, vous allez voir les bases de l'observabilité et de l'évaluation des agents. Après cela, il sera temps de les voir en action ! | agents-course/units/fr/bonus-unit2/introduction.mdx/0 | {
"file_path": "agents-course/units/fr/bonus-unit2/introduction.mdx",
"repo_id": "agents-course",
"token_count": 694
} | 9 |
# Comprendre les agents à travers le cycle Réflexion-Action-Observation
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-3.jpg" alt="Planification de l'Unité 1"/>
Dans les sections précédentes, nous avons appris :
- **Comment les outils sont mis à disposition de l'agent dans le *prompt* système**.
- **Comment les agents sont des systèmes capables de « raisonner », planifier et interagir avec leur environnement**.
Dans cette section, **nous explorerons l'ensemble du *workflow* de l'agent**, un cycle que nous avons défini comme Réflexion-Action-Observation (*Thought-Action-Observation*).
Puis, nous approfondirons chacune de ces étapes.
## Les composants de base
Les agents fonctionnent selon un cycle continu : **réfléchir (Réflexion) → agir (Action) et observer (Observation)** (***thinking (Thought) → acting (Act) and observing (Observe)***).
Décomposons ensemble ces actions :
1. **Réflexion** : La partie LLM de l'agent décide de la prochaine étape.
2. **Action** : L'agent passe à l'action en appelant les outils avec les arguments associés.
3. **Observation** : Le modèle analyse la réponse renvoyée par l'outil.
## Le cycle Réflexion-Action-Observation
Les trois composants fonctionnent ensemble dans une boucle continue. Pour utiliser une analogie issue de la programmation, l'agent utilise une **boucle while** : la boucle continue jusqu'à ce que l'objectif de l'agent soit atteint.
Visuellement, cela ressemble à ceci :
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AgentCycle.gif" alt="Cycle Réflexion, Action, Observation"/>
Dans de nombreux *frameworks* d'agents, **les règles et directives sont intégrées directement dans le *prompt* système**, garantissant que chaque cycle respecte une logique définie.
Dans une version simplifiée, notre *prompt* système pourrait ressembler à ceci :
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/system_prompt_cycle.png" alt="Cycle Réflexion, Action, Observation"/>
Nous voyons ici que dans le message système, nous avons défini :
- Le *comportement de l'agent*.
- Les *outils auxquels il a accès*, comme nous l'avons décrit dans la section précédente.
- Le *cycle Réflexion-Action-Observation*, que nous intégrons dans les instructions du LLM.
Prenons un petit exemple pour comprendre le processus avant d'approfondir chacune des étapes.
## Alfred, l'agent météorologiste
Nous avons créé Alfred, l'agent météorologiste.
Un utilisateur demande à Alfred : « Quel temps fait-il à New York aujourd'hui ? »
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent.jpg" alt="Agent Alfred"/>
La mission d'Alfred est de répondre à cette question en utilisant un outil d'API météo.
Voici comment le cycle se déroule :
### Réflexion
**Raisonnement interne :**
Après avoir reçu la requête, le dialogue interne d'Alfred pourrait être :
*"L'utilisateur a besoin d'informations météorologiques actuelles pour New York. J'ai accès à un outil qui récupère les données météo. D'abord, je dois appeler l'API météo pour obtenir des détails à jour."*
Cette étape montre l'agent décomposant le problème en étapes : d'abord, rassembler les données nécessaires.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-1.jpg" alt="Agent Alfred"/>
### Action
**Utilisation d'un outil :**
En se basant sur son raisonnement et sur le fait qu'Alfred connaît l'outil `get_weather`, Alfred prépare une commande au format JSON qui appelle l'outil API météo. Par exemple, sa première action pourrait être :
Raisonnement : Je dois vérifier la météo actuelle pour New York.
```
{
"action": "get_weather",
"action_input": {
"location": "New York"
}
}
```
Ici, l'action précise clairement quel outil appeler (par exemple, `get_weather`) et quel paramètre passer (la « location » : « New York »).
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-2.jpg" alt="Agent Alfred"/>
### Observation
**Retour d'information de l'environnement :**
Après l'appel à l'outil, Alfred reçoit une observation. Cela pourrait être les données météo brutes renvoyées par l'API, par exemple :
*"Météo actuelle à New York : partiellement nuageux, 15°C, 60% d'humidité."*
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-3.jpg" alt="Agent Alfred"/>
Cette observation est ensuite ajoutée au *prompt* en tant que contexte supplémentaire. Elle agit comme un retour du monde réel, confirmant si l'action a réussi et fournissant les détails nécessaires.
### Raisonnement mis à jour
**Réflexion :**
Avec l'observation en main, Alfred met à jour son raisonnement interne :
*"Maintenant que j'ai les données météo pour New York, je peux préparer une réponse pour l'utilisateur."*
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-4.jpg" alt="Agent Alfred"/>
### Action finale
Alfred génère ensuite une réponse finale formatée comme nous le lui avons indiqué :
Raisonnement : J'ai maintenant les données météo. La météo actuelle à New York est partiellement nuageuse avec une température de 15°C et 60% d'humidité.
Réponse finale : La météo actuelle à New York est partiellement nuageuse avec une température de 15°C et 60% d'humidité.
Cette action finale renvoie la réponse à l'utilisateur, bouclant ainsi le cycle.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-5.jpg" alt="Agent Alfred"/>
Ce que nous voyons dans cet exemple :
- **Les agents itèrent dans une boucle jusqu'à ce que l'objectif soit atteint :**
Le processus d'Alfred est cyclique. Il commence par un raisonnement, passe à l'action en appelant un outil, puis observe le résultat. Si l'observation avait indiqué une erreur ou des données incomplètes, Alfred aurait pu reprendre le cycle pour corriger son approche.
- **Intégration des outils :**
La capacité d'appeler un outil (comme une API météo) permet à Alfred d'aller **au-delà de la connaissance statique et de récupérer des données en temps réel**, un aspect essentiel de nombreux agents.
- **Adaptation dynamique :**
Chaque cycle permet à l'agent d'intégrer des informations fraîches (observations) dans son raisonnement (réflexions), garantissant que la réponse finale est bien informée et précise.
Cet exemple illustre le concept fondamental du *cycle ReAct* (un concept que nous allons développer dans la section suivante) : **l'interaction entre Réflexion, Action et Observation permet aux agents de résoudre de manière itérative des tâches complexes**.
En comprenant et en appliquant ces principes, vous pouvez concevoir des agents qui non seulement réfléchissent à leurs tâches, mais utilisent également efficacement des outils externes pour les accomplir, tout en affinant continuellement leur production en fonction des retours de l'environnement.
---
Plongeons maintenant plus en profondeur dans le Réflexion, l'Action et l'Observation en tant qu'étapes individuelles du processus.
| agents-course/units/fr/unit1/agent-steps-and-structure.mdx/0 | {
"file_path": "agents-course/units/fr/unit1/agent-steps-and-structure.mdx",
"repo_id": "agents-course",
"token_count": 2617
} | 10 |
# Conclusion
Félicitations pour avoir terminé le module `LangGraph` de cette deuxième unité ! 🥳
Vous **maîtrisez les fondamentaux** de la construction de *workflows* structurés avec LangGraph que vous pourrez envoyer en production.
Ce module n'est que le début de votre parcours avec LangGraph. Pour des sujets plus avancés, nous recommandons :
- D'explorer la [documentation officielle de LangGraph](https://github.com/langchain-ai/langgraph)
- De suivre le cours complet [*Introduction to LangGraph*](https://academy.langchain.com/courses/intro-to-langgraph) de *LangChain Academy*
- De construire quelque chose par vous-même !
Dans la prochaine unité, vous explorerez maintenant des cas d'usage réels. Il est temps de quitter la théorie pour passer à la pratique !
Enfin, nous serions ravis **d'entendre ce que vous pensez du cours et comment nous pourrions l'améliorer**. Si vous avez des retours, n'hésitez pas à [remplir ce formulaire](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog).
### Continuez à apprendre, restez géniaux 🤗
| agents-course/units/fr/unit2/langgraph/conclusion.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/langgraph/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 409
} | 11 |
<CourseFloatingBanner
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/smolagents/code_agents.ipynb"},
]}
askForHelpUrl="http://hf.co/join/discord"
/>
# Construire des agents qui utilisent du code
Les agents à code sont le type d'agent par défaut dans `smolagents`. Ils génèrent des appels d'outils Python pour effectuer des actions, obtenant des représentations d'actions qui sont efficaces, expressives et précises.
Leur approche simplifiée réduit le nombre d'actions requises, simplifie les opérations complexes et permet la réutilisation de fonctions de code existantes. `smolagents` fournit un *framework* léger pour construire de tels agents en environ 1 000 lignes de code.

Graphique issu du papier [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030)
<Tip>
Si vous voulez en savoir plus sur pourquoi les agents à code sont efficaces, consultez <a href="https://huggingface.co/docs/smolagents/en/conceptual_guides/intro_agents#code-agents" target="_blank">ce guide</a> de la documentation smolagents.
</Tip>
## Pourquoi les agents à code ?
Dans un processus d'agent multi-étapes, le LLM écrit et exécute des actions, impliquant généralement des appels d'outils externes. Les approches traditionnelles utilisent un format JSON pour spécifier les noms d'outils et les arguments sous forme de chaînes, **que le système doit analyser pour déterminer quel outil exécuter**.
Cependant, la recherche montre que **les LLM d'appel d'outils fonctionnent plus efficacement avec du code directement**. C'est un principe fondamental de `smolagents`, comme le montre le diagramme ci-dessus issu de [*Executable Code Actions Elicit Better LLM Agents*](https://huggingface.co/papers/2402.01030).
Écrire des actions en code plutôt qu'en JSON offre plusieurs avantages clés :
* **Composabilité** : Combiner et réutiliser facilement des actions
* **Gestion d'objets** : Travailler directement avec des structures complexes comme des images
* **Généralité** : Exprimer toute tâche computationnellement possible
* **Naturel pour les LLM** : Du code de haute qualité est déjà présent dans les données d'entraînement des LLM
## Comment fonctionne un agent à code ?

Le diagramme ci-dessus illustre comment `CodeAgent.run()` fonctionne, suivant le *framework* ReAct que nous avons mentionné dans l'Unité 1. L'abstraction principale pour les agents dans `smolagents` est un `MultiStepAgent` qui sert de bloc de construction principal. Comme nous le verrons dans un exemple ci-dessous, `CodeAgent` est un type spécial de `MultiStepAgent`.
Un `CodeAgent` effectue des actions à travers un cycle d'étapes (avec les variables existantes et les connaissances étant incorporées dans le contexte de l'agent) qui est conservé dans un journal d'exécution :
1. Le *prompt* système est stocké dans un `SystemPromptStep` et la requête utilisateur est enregistrée dans un `TaskStep`.
2. Ensuite, la boucle *while* suivante est exécutée :
2.1 La méthode `agent.write_memory_to_messages()` écrit les *logs* de l'agent dans une liste de [messages de chat](https://huggingface.co/docs/transformers/main/en/chat_templating) lisibles par le LLM.
2.2 Ces messages sont envoyés à un `Model` qui génère une complétion.
2.3 La complétion est analysée pour extraire l'action, qui, dans notre cas, devrait être un extrait de code puisque nous travaillons avec un `CodeAgent`.
2.4 L'action est exécutée.
2.5 Les résultats sont enregistrés en mémoire dans un `ActionStep`.
À la fin de chaque étape, si l'agent inclut des appels de fonction (dans `agent.step_callback`), ils sont exécutés.
## Voyons quelques exemples
<Tip>
Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/smolagents/code_agents.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab.
</Tip>
Alfred planifie une fête au manoir de la famille Wayne et a besoin de votre aide pour s'assurer que tout se passe bien. Pour l'aider, nous appliquerons ce que nous avons appris sur le fonctionnement d'un `CodeAgent` multi-étapes.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-party.jpg" alt="Alfred Party"/>
Si vous n'avez pas encore installé `smolagents`, vous pouvez le faire en exécutant la commande suivante :
```bash
pip install smolagents -U
```
Connectons-nous également au Hub d'Hugging Face pour avoir accès à l'API d'inférence *Serverless*.
```python
from huggingface_hub import login
login()
```
### Sélectionner une *playlist* pour la fête en utilisant `smolagents`
La musique est un élément essentiel d'une fête réussie ! Alfred a besoin d'aide pour sélectionner la *playlist*. Heureusement, `smolagents` nous couvre ! Nous pouvons construire un agent capable de rechercher sur le web en utilisant DuckDuckGo. Pour donner à l'accès à cet outil l'agent, nous l'incluons dans la liste des outils lors de la création de l'agent.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-playlist.jpg" alt="Alfred Playlist"/>
Pour le modèle, nous nous appuierons sur `InferenceClientModel`, qui fournit l'accès à l'[API d'inférence Serverless](https://huggingface.co/docs/api-inference/index) d'Hugging Face. Le modèle par défaut est `"Qwen/Qwen2.5-Coder-32B-Instruct"`, qui est performant et disponible pour une inférence rapide, mais vous pouvez sélectionner depuis le Hub n'importe quel modèle compatible.
Exécuter un agent est assez simple :
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel
agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=InferenceClientModel())
agent.run("Recherche les meilleures recommandations musicales pour une fête au manoir des Wayne.")
```
Lorsque vous exécutez cet exemple, la sortie **affichera une trace des étapes du *workflow* en cours d'exécution**. Elle affichera également le code Python correspondant avec le message :
```python
─ Executing parsed code: ────────────────────────────────────────────────────────────────────────────────────────
results = web_search(query="best music for a Batman party")
print(results)
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
```
Après quelques étapes, vous verrez la *playlist* générée qu'Alfred peut utiliser pour la fête ! 🎵
### Utiliser un outil personnalisé pour préparer le menu
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-menu.jpg" alt="Alfred Menu"/>
Maintenant que nous avons sélectionné une *playlist*, nous devons organiser le menu pour les invités. Encore une fois, Alfred peut tirer parti de `smolagents` pour le faire. Ici, nous utilisons le décorateur `@tool` pour définir une fonction personnalisée qui agit comme un outil. Nous couvrirons la création d'outils plus en détail plus tard, donc pour l'instant, nous pouvons simplement exécuter le code.
Comme vous pouvez le voir dans l'exemple ci-dessous, nous allons créer un outil en utilisant le décorateur `@tool` et l'inclure dans la liste `tools`.
```python
from smolagents import CodeAgent, tool, InferenceClientModel
# Outil pour suggérer un menu basé sur l'occasion
@tool
def suggest_menu(occasion: str) -> str:
"""
Suggère un menu basé sur l'occasion.
Args:
occasion (str): Le type d'occasion pour la fête. Les valeurs autorisées sont :
- "casual": Menu pour une fête décontractée.
- "formal": Menu pour une fête formelle.
- "superhero": Menu pour une fête de super-héros.
- "custom": Menu personnalisé.
"""
if occasion == "casual":
return "Pizza, collations et boissons."
elif occasion == "formal":
return "Dîner 3 services avec vin et dessert."
elif occasion == "superhero":
return "Buffet avec nourriture énergétique et saine."
else:
return "Menu personnalisé pour le majordome."
# Alfred, le majordome, préparant le menu pour la fête
agent = CodeAgent(tools=[suggest_menu], model=InferenceClientModel())
# Préparer le menu pour la fête
agent.run("Prépare un menu formel pour la fête.")
```
L'agent s'exécutera pendant quelques étapes jusqu'à trouver la réponse. Préciser les valeurs autorisées dans la docstring aide à diriger l'agent vers les valeurs d'argument `occasion` qui existent et limite les hallucinations.
Le menu est prêt ! 🥗
### Utiliser des imports Python à l'intérieur de l'agent
Nous avons la *playlist* et le menu prêts, mais nous devons vérifier un dernier détail crucial : le temps de préparation !
Alfred doit calculer quand tout serait prêt s'il commençait à préparer maintenant, au cas où ils auraient besoin de l'aide d'autres super-héros.
`smolagents` est spécialisé dans les agents qui écrivent et exécutent des extraits de code Python, offrant une exécution sécurisée.
En effet, **l'exécution du code a des mesures de sécurité strictes** : les imports en dehors d'une liste sûre prédéfinie sont bloqués par défaut. Cependant, vous pouvez autoriser des imports supplémentaires en les passant sous forme de chaînes dans `additional_authorized_imports`.
Pour plus de détails sur l'exécution sécurisée du code, consultez le [guide](https://huggingface.co/docs/smolagents/tutorials/secure_code_execution) officiel.
Lors de la création de l'agent, nous utiliserons `additional_authorized_imports` pour permettre l'importation du module `datetime`.
```python
from smolagents import CodeAgent, InferenceClientModel
import numpy as np
import time
import datetime
agent = CodeAgent(tools=[], model=InferenceClientModel(), additional_authorized_imports=['datetime'])
agent.run(
"""
Alfred doit se préparer pour la fête. Voici les tâches :
1. Préparer les boissons - 30 minutes
2. Décorer le manoir - 60 minutes
3. Mettre en place le menu - 45 minutes
4. Préparer la musique et la playlist - 45 minutes
Si nous commençons maintenant, à quelle heure la fête sera-t-elle prête ?
"""
)
```
Ces exemples ne sont que le début de ce que vous pouvez faire avec les agents à code, et nous commençons déjà à voir leur utilité pour préparer la fête.
Vous pouvez en apprendre davantage sur la façon de construire de tels agents dans la [documentation de `smolagents`](https://huggingface.co/docs/smolagents).
En résumé, `smolagents` se spécialise dans les agents qui écrivent et exécutent des extraits de code Python, offrant une exécution sécurisée. Il supporte à la fois les modèles de langage locaux et basés sur API, le rendant adaptable à divers environnements de développement.
### Partager notre agent préparateur de fête personnalisé sur le Hub
Ne serait-il pas **incroyable de partager notre propre agent Alfred avec le reste du monde** ? En faisant cela, n'importe qui peut facilement télécharger et utiliser l'agent directement depuis le Hub, apportant l'ultime planificateur de fête de Gotham à portée de main ! Faisons-le ! 🎉
La bibliothèque `smolagents` rend cela possible en vous permettant de partager un agent complet avec la communauté et de télécharger ceux des autres pour une utilisation immédiate. C'est aussi simple que ce qui suit :
```python
# Changez pour votre nom d'utilisateur et nom de dépôt
agent.push_to_hub('sergiopaniego/AlfredAgent')
```
Pour télécharger à nouveau l'agent, utilisez le code ci-dessous :
```python
# Changez pour votre nom d'utilisateur et nom de dépôt
alfred_agent = agent.from_hub('sergiopaniego/AlfredAgent', trust_remote_code=True)
alfred_agent.run("Donne-moi la meilleure playlist pour une fête au manoir des Wayne. L'idée de la fête est un thème 'mascarade de méchants'")
```
Ce qui est également excitant, c'est que les agents partagés sont directement disponibles en tant que *Spaces*, vous permettant d'interagir avec eux en temps réel. Vous pouvez explorer d'autres agents [ici](https://huggingface.co/spaces/davidberenstein1957/smolagents-and-tools).
Par exemple, l'_AlfredAgent_ est disponible [ici](https://huggingface.co/spaces/sergiopaniego/AlfredAgent). Vous pouvez l'essayer directement ci-dessous :
<iframe
src="https://sergiopaniego-alfredagent.hf.space/"
frameborder="0"
width="850"
height="450"
></iframe>
Vous vous demandez peut-être comment Alfred a construit un tel agent en utilisant `smolagents` ? En intégrant plusieurs outils, il peut générer un agent comme suit. Ne vous inquiétez pas des outils pour l'instant, car nous aurons une section dédiée plus tard dans cette unité pour explorer cela en détail :
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, Tool, tool, VisitWebpageTool
@tool
def suggest_menu(occasion: str) -> str:
"""
Suggère un menu basé sur l'occasion.
Args:
occasion: Le type d'occasion pour la fête.
"""
if occasion == "casual":
return "Pizza, collations et boissons."
elif occasion == "formal":
return "Dîner 3 services avec vin et dessert."
elif occasion == "superhero":
return "Buffet avec nourriture énergétique et saine."
else:
return "Menu personnalisé pour le majordome."
@tool
def catering_service_tool(query: str) -> str:
"""
Cet outil renvoie le service de restauration le mieux noté à Gotham City.
Args:
query: Un terme de recherche pour trouver des services de restauration.
"""
# Exemple de liste de services de restauration et leurs notes
services = {
"Gotham Catering Co.": 4.9,
"Wayne Manor Catering": 4.8,
"Gotham City Events": 4.7,
}
# Trouver le service de restauration le mieux noté (simuler le filtrage de requête de recherche)
best_service = max(services, key=services.get)
return best_service
class SuperheroPartyThemeTool(Tool):
name = "superhero_party_theme_generator"
description = """
Cet outil suggère des idées créatives de fête sur le thème des super-héros basées sur une catégorie.
Il renvoie une idée de thème de fête unique."""
inputs = {
"category": {
"type": "string",
"description": "Le type de fête de super-héros (par ex., 'héros classiques', 'mascarade de méchants', 'Gotham futuriste').",
}
}
output_type = "string"
def forward(self, category: str):
themes = {
"classic heroes": "Gala de la Justice League : Les invités viennent habillés comme leurs héros DC préférés avec des cocktails thématiques comme 'Le Punch Kryptonite'.",
"villain masquerade": "Bal des Voyous de Gotham : Une mascarade mystérieuse où les invités s'habillent en méchants classiques de Batman.",
"futuristic Gotham": "Nuit Neo-Gotham : Une fête de style cyberpunk inspirée de Batman Beyond, avec des décorations néon et des gadgets futuristes."
}
return themes.get(category.lower(), "Idée de fête thématique non trouvée. Essayez 'héros classiques', 'mascarade de méchants', ou 'Gotham futuriste'.")
# Alfred, le majordome, préparant le menu pour la fête
agent = CodeAgent(
tools=[
DuckDuckGoSearchTool(),
VisitWebpageTool(),
suggest_menu,
catering_service_tool,
SuperheroPartyThemeTool(),
FinalAnswerTool()
],
model=InferenceClientModel(),
max_steps=10,
verbosity_level=2
)
agent.run("Donne-moi la meilleure playlist pour une fête au manoir des Wayne. L'idée de la fête est un thème 'mascarade de méchants'")
```
Comme vous pouvez le voir, nous avons créé un `CodeAgent` avec plusieurs outils qui améliorent la fonctionnalité de l'agent, le transformant en l'ultime planificateur de fête prêt à partager avec la communauté ! 🎉
Maintenant, c'est à votre tour : construisez votre propre agent et partagez-le avec la communauté en utilisant les connaissances que nous venons d'apprendre ! 🕵️♂️💡
<Tip>
Si vous souhaitez partager votre projet d'agent, créez un <i>Space</i> et taguez <a href="https://huggingface.co/agents-course">agents-course</a> sur le Hub. Nous serions ravis de voir ce que vous avez créé !
</Tip>
### Inspecter notre agent préparateur de fête avec OpenTelemetry et Langfuse 📡
Alors qu'Alfred peaufine l'agent, il se lasse de déboguer ses exécutions. Les agents, par nature, sont imprévisibles et difficiles à inspecter. Mais comme il vise à construire l'ultime agent préparateur de fête et à le déployer en production, il a besoin d'une traçabilité robuste pour la surveillance et l'analyse futures.
Encore une fois, `smolagents` vient à la rescousse ! Il adopte la norme [OpenTelemetry](https://opentelemetry.io/) pour instrumenter les exécutions d'agents, permettant une inspection et une journalisation transparentes. Avec l'aide de [Langfuse](https://langfuse.com/) et du `SmolagentsInstrumentor`, Alfred peut facilement suivre et analyser le comportement de son agent.
La configuration est simple !
D'abord, nous devons installer les dépendances nécessaires :
```bash
pip install opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-smolagents langfuse
```
Ensuite, Alfred a déjà créé un compte sur Langfuse et a ses clés API prêtes. Si vous ne l'avez pas encore fait, vous pouvez vous inscrire à Langfuse Cloud [ici](https://cloud.langfuse.com/) ou explorer des [alternatives](https://huggingface.co/docs/smolagents/tutorials/inspect_runs).
Une fois que vous avez vos clés API, elles doivent être correctement configurées comme suit :
```python
import os
# Obtenez les clés pour votre projet depuis la page des paramètres du projet : https://cloud.langfuse.com
os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..."
os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..."
os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 Région UE
# os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 Région US
```
Avec les variables d'environnement définies, nous pouvons maintenant initialiser le client Langfuse. `get_client()` initialise le client Langfuse en utilisant les identifiants fournis dans les variables d'environnement.
```python
from langfuse import get_client
langfuse = get_client()
# Vérifier la connexion
if langfuse.auth_check():
print("Le client Langfuse est authentifié et prêt !")
else:
print("L'authentification a échoué. Veuillez vérifier vos identifiants et l'hôte.")
```
Enfin, Alfred est prêt à initialiser le `SmolagentsInstrumentor` et commencer à suivre les performances de son agent.
```python
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
SmolagentsInstrumentor().instrument()
```
Alfred est maintenant connecté 🔌 ! Les exécutions de `smolagents` sont enregistrées dans Langfuse, lui donnant une visibilité complète sur le comportement de l'agent. Avec cette configuration, il est prêt à revisiter les exécutions précédentes et à affiner encore plus son agent préparateur de fête.
<Tip>Pour en savoir plus sur le traçage de vos agents et l'utilisation des données collectées pour évaluer leurs performances, consultez <a href="https://huggingface.co/learn/agents-course/fr/bonus-unit2/introduction">l'Unité Bonus 2</a>.</Tip>
```python
from smolagents import CodeAgent, InferenceClientModel
agent = CodeAgent(tools=[], model=InferenceClientModel())
alfred_agent = agent.from_hub('sergiopaniego/AlfredAgent', trust_remote_code=True)
alfred_agent.run("Donne-moi la meilleure playlist pour une fête au manoir des Wayne. L'idée de la fête est un thème 'mascarade de méchants'")
```
Alfred peut maintenant accéder aux logs [ici](https://cloud.langfuse.com/project/cm7bq0abj025rad078ak3luwi/traces/995fc019255528e4f48cf6770b0ce27b?timestamp=2025-02-19T10%3A28%3A36.929Z) pour les relire et les analyser.
<Tip>
En fait, une erreur mineure s'est produite lors de l'exécution. Pouvez-vous la repérer dans les logs ? Essayez de suivre comment l'agent la gère et renvoie quand même une réponse valide. <a href="https://cloud.langfuse.com/project/cm7bq0abj025rad078ak3luwi/traces/995fc019255528e4f48cf6770b0ce27b?timestamp=2025-02-19T10%3A28%3A36.929Z&observation=80ca57ace4f69b52">Voici</a> le lien direct vers l'erreur si vous voulez vérifier votre réponse. Bien sûr, l'erreur a été corrigée entre-temps, plus de détails peuvent être trouvés dans cette <a href="https://github.com/huggingface/smolagents/issues/838"><i>issue</i></a>.
</Tip>
Pendant ce temps, la [*playlist* suggérée](https://open.spotify.com/playlist/0gZMMHjuxMrrybQ7wTMTpw) crée l'ambiance parfaite pour les préparatifs de la fête. Cool, non ? 🎶
---
Maintenant que nous avons créé notre premier *Code Agent*, **apprenons comment nous pouvons créer des *Tool Calling Agents***, le deuxième type d'agent disponible dans `smolagents`.
## Ressources
- [Blog smolagents](https://huggingface.co/blog/smolagents) - Introduction à smolagents et aux interactions de code
- [smolagents : Construire de bons agents](https://huggingface.co/docs/smolagents/tutorials/building_good_agents) - Meilleures pratiques pour des agents fiables
- [Construire des agents efficaces - Anthropic](https://www.anthropic.com/research/building-effective-agents) - Principes de conception d'agents
- [Partager des exécutions avec OpenTelemetry](https://huggingface.co/docs/smolagents/tutorials/inspect_runs) - Détails sur la façon de configurer OpenTelemetry pour suivre vos agents.
| agents-course/units/fr/unit2/smolagents/code_agents.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/smolagents/code_agents.mdx",
"repo_id": "agents-course",
"token_count": 8278
} | 12 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.