text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLM model. """ import itertools import math from dataclasses import dataclass from typing import Callable, Optional, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu, get_activation from ...cache_utils import DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging from .configuration_xlm import XLMConfig logger = logging.get_logger(__name__) def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out.requires_grad = False out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() def get_masks(slen, lengths, causal, padding_mask=None): """ Generate hidden states mask, and optionally an attention mask. """ alen = torch.arange(slen, dtype=torch.long, device=lengths.device) if padding_mask is not None: mask = padding_mask else: assert lengths.max().item() <= slen mask = alen < lengths[:, None] # attention mask is the same as mask, or triangular inferior attention (causal) bs = lengths.size(0) if causal: attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None] else: attn_mask = mask # sanity check assert mask.size() == (bs, slen) assert causal is False or attn_mask.size() == (bs, slen, slen) return mask, attn_mask @dataclass @auto_docstring( custom_intro=""" Base class for outputs of question answering models using a [`~modeling_utils.XLMSQuADHead`]. """ ) class XLMSquadHeadOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the `is_impossible` label of the answers. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None class XLMPoolerStartLogits(nn.Module): """ Compute SQuAD start logits from sequence hidden states. Args: config ([`XLMConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model. """ def __init__(self, config: XLMConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. Returns: `torch.FloatTensor`: The start logits for SQuAD. """ x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if p_mask.dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class XLMPoolerEndLogits(nn.Module): """ Compute SQuAD end logits from sequence hidden states. Args: config ([`XLMConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` to use. """ def __init__(self, config: XLMConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): The hidden states of the first tokens for the labeled span. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the first token for the labeled span. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. <Tip> One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides `start_states`. </Tip> Returns: `torch.FloatTensor`: The end logits for SQuAD. """ assert start_states is not None or start_positions is not None, ( "One of start_states, start_positions should be not None" ) if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if p_mask.dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class XLMPoolerAnswerClass(nn.Module): """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. Args: config ([`XLMConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model. """ def __init__(self, config: XLMConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): The hidden states of the first tokens for the labeled span. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the first token for the labeled span. cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Position of the CLS token for each sentence in the batch. If `None`, takes the last token. <Tip> One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides `start_states`. </Tip> Returns: `torch.FloatTensor`: The SQuAD 2.0 answer class. """ # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. hsz = hidden_states.shape[-1] assert start_states is not None or start_positions is not None, ( "One of start_states, start_positions should be not None" ) if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) else: cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x class XLMSQuADHead(nn.Module): r""" A SQuAD head inspired by XLNet. Args: config ([`XLMConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` to use. """ def __init__(self, config: XLMConfig): super().__init__() self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.start_logits = XLMPoolerStartLogits(config) self.end_logits = XLMPoolerEndLogits(config) self.answer_class = XLMPoolerAnswerClass(config) @auto_docstring def forward( self, hidden_states: torch.FloatTensor, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, is_impossible: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, return_dict: bool = False, ) -> Union[XLMSquadHeadOutput, tuple[torch.FloatTensor]]: r""" hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): Final hidden states of the model on the sequence tokens. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Positions of the first token for the labeled span. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Positions of the last token for the labeled span. cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Position of the CLS token for each sentence in the batch. If `None`, takes the last token. is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Whether the question has a possible answer in the paragraph or not. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. """ start_logits = self.start_logits(hidden_states, p_mask=p_mask) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 return XLMSquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) if not return_dict: return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) else: return XLMSquadHeadOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, ) class XLMSequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`XLMConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: XLMConfig): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = nn.Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity() self.first_dropout = nn.Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = nn.Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output class MultiHeadAttention(nn.Module): NEW_ID = itertools.count() def __init__(self, n_heads, dim, config): super().__init__() self.layer_id = next(MultiHeadAttention.NEW_ID) self.dim = dim self.n_heads = n_heads self.head_dim = dim // n_heads self.dropout = config.attention_dropout assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) self.out_lin = nn.Linear(dim, dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads) # Prune linear layers self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.dim = attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False, cache_position=None, ): """ Self-attention (if kv is None) or attention over source sentence (provided by kv). """ # Input is (bs, qlen, dim) # Mask is (bs, klen) (non-causal) or (bs, klen, klen) bs, qlen, dim = input.size() is_cross_attention = kv is not None mask_reshape = (bs, 1, qlen, -1) if mask.dim() == 3 else (bs, 1, 1, -1) q = self.q_lin(input).view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2) if cache is not None: if isinstance(cache, EncoderDecoderCache): is_updated = cache.is_updated.get(self.layer_id) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = cache.cross_attention_cache else: curr_past_key_value = cache.self_attention_cache else: curr_past_key_value = cache current_states = kv if is_cross_attention else input if is_cross_attention and cache is not None and is_updated: # reuse k,v, cross_attentions k = curr_past_key_value.key_cache[self.layer_id] v = curr_past_key_value.value_cache[self.layer_id] else: k = self.k_lin(current_states) v = self.v_lin(current_states) k = k.view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2) v = v.view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2) if cache is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None k, v = curr_past_key_value.update(k, v, self.layer_id, {"cache_position": cache_position}) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: cache.is_updated[self.layer_id] = True q = q / math.sqrt(self.head_dim) # (bs, n_heads, qlen, head_dim) scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen) mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen) scores.masked_fill_(mask, torch.finfo(scores.dtype).min) # (bs, n_heads, qlen, klen) weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen) weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, qlen, head_dim) context = context.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.head_dim) outputs = (self.out_lin(context),) if output_attentions: outputs = outputs + (weights,) return outputs class TransformerFFN(nn.Module): def __init__(self, in_dim, dim_hidden, out_dim, config): super().__init__() self.dropout = config.dropout self.lin1 = nn.Linear(in_dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, out_dim) self.act = gelu if config.gelu_activation else nn.functional.relu self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward(self, input): return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input) def ff_chunk(self, input): x = self.lin1(input) x = self.act(x) x = self.lin2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) return x @auto_docstring class XLMPreTrainedModel(PreTrainedModel): config: XLMConfig load_tf_weights = None base_model_prefix = "transformer" def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @property def dummy_inputs(self): inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]) attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]) if self.config.use_lang_emb and self.config.n_langs > 1: langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]) else: langs_list = None return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list} def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Embedding): if self.config is not None and self.config.embed_init_std is not None: nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, nn.Linear): if self.config is not None and self.config.init_std is not None: nn.init.normal_(module.weight, mean=0, std=self.config.init_std) if module.bias is not None: nn.init.constant_(module.bias, 0.0) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, XLMModel) and self.config.sinusoidal_embeddings: create_sinusoidal_embeddings( self.config.max_position_embeddings, self.config.emb_dim, out=module.position_embeddings.weight ) @dataclass @auto_docstring( custom_intro=""" Base class for outputs of question answering models using a `XLMSQuADHead`. """ ) class XLMForQuestionAnsweringOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the `is_impossible` label of the answers. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class XLMModel(XLMPreTrainedModel): def __init__(self, config): super().__init__(config) # encoder / decoder, output layer self.is_encoder = config.is_encoder self.is_decoder = not config.is_encoder if self.is_decoder: raise NotImplementedError("Currently XLM can only be used as an encoder") # self.with_output = with_output self.causal = config.causal # dictionary / languages self.n_langs = config.n_langs self.use_lang_emb = config.use_lang_emb self.n_words = config.n_words self.eos_index = config.eos_index self.pad_index = config.pad_index # self.dico = dico # self.id2lang = config.id2lang # self.lang2id = config.lang2id # assert len(self.dico) == self.n_words # assert len(self.id2lang) == len(self.lang2id) == self.n_langs # model parameters self.dim = config.emb_dim # 512 by default self.hidden_dim = self.dim * 4 # 2048 by default self.n_heads = config.n_heads # 8 by default self.n_layers = config.n_layers self.dropout = config.dropout self.attention_dropout = config.attention_dropout assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads" # embeddings self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim) if config.n_langs > 1 and config.use_lang_emb: self.lang_embeddings = nn.Embedding(self.n_langs, self.dim) self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index) self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps) # transformer layers self.attentions = nn.ModuleList() self.layer_norm1 = nn.ModuleList() self.ffns = nn.ModuleList() self.layer_norm2 = nn.ModuleList() # if self.is_decoder: # self.layer_norm15 = nn.ModuleList() # self.encoder_attn = nn.ModuleList() for _ in range(self.n_layers): self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config)) self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) # if self.is_decoder: # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout)) self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config)) self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) if hasattr(config, "pruned_heads"): pruned_heads = config.pruned_heads.copy().items() config.pruned_heads = {} for layer, heads in pruned_heads: if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) # Initialize weights and apply final processing self.post_init() self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.attentions[layer].prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, # Dummy kwargs for now ) -> Union[tuple, BaseModelOutput]: r""" langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None: bs, slen = input_ids.size() else: bs, slen = inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device if cache is None: cache = EncoderDecoderCache(DynamicCache(), DynamicCache()) if isinstance(cache, tuple): cache = EncoderDecoderCache.from_legacy_cache(cache) if lengths is None: if input_ids is not None: lengths = (input_ids != self.pad_index).sum(dim=1).long() else: lengths = torch.tensor([slen] * bs, device=device) # check inputs assert lengths.size(0) == bs assert lengths.max().item() <= slen # generate masks mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) # position_ids if position_ids is None: position_ids = self.position_ids[:, :slen] else: assert position_ids.size() == (bs, slen) # (slen, bs) # langs if langs is not None: assert langs.size() == (bs, slen) # (slen, bs) # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.n_layers) # do not recompute cached elements if cache is not None and input_ids is not None: _slen = slen - cache.get_seq_length() input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] if langs is not None: langs = langs[:, -_slen:] mask = mask[:, -_slen:] attn_mask = attn_mask[:, -_slen:] # embeddings if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds) if langs is not None and self.use_lang_emb and self.n_langs > 1: tensor = tensor + self.lang_embeddings(langs) if token_type_ids is not None: tensor = tensor + self.embeddings(token_type_ids) tensor = self.layer_norm_emb(tensor) tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training) tensor *= mask.unsqueeze(-1).to(tensor.dtype) # transformer layers hidden_states = () if output_hidden_states else None attentions = () if output_attentions else None for i in range(self.n_layers): if output_hidden_states: hidden_states = hidden_states + (tensor,) # self attention attn_outputs = self.attentions[i]( tensor, attn_mask, cache=cache, head_mask=head_mask[i], output_attentions=output_attentions, cache_position=cache_position, ) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = nn.functional.dropout(attn, p=self.dropout, training=self.training) tensor = tensor + attn tensor = self.layer_norm1[i](tensor) # FFN tensor = tensor + self.ffns[i](tensor) tensor = self.layer_norm2[i](tensor) tensor *= mask.unsqueeze(-1).to(tensor.dtype) # Add last hidden state if output_hidden_states: hidden_states = hidden_states + (tensor,) if not return_dict: return tuple(v for v in [tensor, hidden_states, attentions] if v is not None) return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) class XLMPredLayer(nn.Module): """ Prediction layer (cross_entropy or adaptive_softmax). """ def __init__(self, config): super().__init__() self.asm = config.asm self.n_words = config.n_words self.pad_index = config.pad_index dim = config.emb_dim if config.asm is False: self.proj = nn.Linear(dim, config.n_words, bias=True) else: self.proj = nn.AdaptiveLogSoftmaxWithLoss( in_features=dim, n_classes=config.n_words, cutoffs=config.asm_cutoffs, div_value=config.asm_div_value, head_bias=True, # default is False ) def forward(self, x, y=None): """Compute the loss, and optionally the scores.""" outputs = () if self.asm is False: scores = self.proj(x) outputs = (scores,) + outputs if y is not None: loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="mean") outputs = (loss,) + outputs else: scores = self.proj.log_prob(x) outputs = (scores,) + outputs if y is not None: _, loss = self.proj(x, y) outputs = (loss,) + outputs return outputs @auto_docstring( custom_intro=""" The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class XLMWithLMHeadModel(XLMPreTrainedModel, GenerationMixin): _tied_weights_keys = ["pred_layer.proj.weight"] def __init__(self, config): super().__init__(config) self.transformer = XLMModel(config) self.pred_layer = XLMPredLayer(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.pred_layer.proj def set_output_embeddings(self, new_embeddings): self.pred_layer.proj = new_embeddings def prepare_inputs_for_generation(self, input_ids, **kwargs): # Overwritten -- this model uses config options to prepare inputs mask_token_id = self.config.mask_token_id lang_id = self.config.lang_id effective_batch_size = input_ids.shape[0] mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, mask_token], dim=1) if lang_id is not None: langs = torch.full_like(input_ids, lang_id) else: langs = None return {"input_ids": input_ids, "langs": langs} @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple, MaskedLMOutput]: r""" langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) output = transformer_outputs[0] outputs = self.pred_layer(output, labels) # (loss, logits) or (logits,) depending on if labels are provided. if not return_dict: return outputs + transformer_outputs[1:] return MaskedLMOutput( loss=outputs[0] if labels is not None else None, logits=outputs[0] if labels is None else outputs[1], hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring( custom_intro=""" XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class XLMForSequenceClassification(XLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = XLMModel(config) self.sequence_summary = XLMSequenceSummary(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) output = transformer_outputs[0] logits = self.sequence_summary(output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring( custom_intro=""" XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """ ) class XLMForQuestionAnsweringSimple(XLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = XLMModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + transformer_outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class XLMForQuestionAnswering(XLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = XLMModel(config) self.qa_outputs = XLMSQuADHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, is_impossible: Optional[torch.Tensor] = None, cls_index: Optional[torch.Tensor] = None, p_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, XLMForQuestionAnsweringOutput]: r""" langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels whether a question has an answer or no answer (SQuAD 2.0) cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the classification token to use as input for computing plausibility of the answer. p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked. Example: ```python >>> from transformers import AutoTokenizer, XLMForQuestionAnswering >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048") >>> model = XLMForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze( ... 0 ... ) # Batch size 1 >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) output = transformer_outputs[0] outputs = self.qa_outputs( output, start_positions=start_positions, end_positions=end_positions, cls_index=cls_index, is_impossible=is_impossible, p_mask=p_mask, return_dict=return_dict, ) if not return_dict: return outputs + transformer_outputs[1:] return XLMForQuestionAnsweringOutput( loss=outputs.loss, start_top_log_probs=outputs.start_top_log_probs, start_top_index=outputs.start_top_index, end_top_log_probs=outputs.end_top_log_probs, end_top_index=outputs.end_top_index, cls_logits=outputs.cls_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class XLMForTokenClassification(XLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLMModel(config) self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLMForMultipleChoice(XLMPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = XLMModel(config) self.sequence_summary = XLMSequenceSummary(config) self.logits_proj = nn.Linear(config.num_labels, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) langs (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None langs = langs.view(-1, langs.size(-1)) if langs is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) if lengths is not None: logger.warning( "The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the " "attention mask instead." ) lengths = None transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ]
transformers/src/transformers/models/xlm/modeling_xlm.py/0
{ "file_path": "transformers/src/transformers/models/xlm/modeling_xlm.py", "repo_id": "transformers", "token_count": 32472 }
565
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 XLNet model. """ from __future__ import annotations import warnings from dataclasses import dataclass import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "xlnet/xlnet-base-cased" _CONFIG_FOR_DOC = "XLNetConfig" class TFXLNetRelativeAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.d_model % config.n_head != 0: raise ValueError( f"The hidden size ({config.d_model}) is not a multiple of the number of attention " f"heads ({config.n_head}" ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head**0.5) self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = keras.layers.Dropout(config.dropout) self.config = config def build(self, input_shape=None): initializer = get_initializer(self.initializer_range) self.q = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="q" ) self.k = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="k" ) self.v = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="v" ) self.o = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="o" ) self.r = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="r" ) self.r_r_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias" ) self.r_s_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_s_bias" ) self.r_w_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias" ) self.seg_embed = self.add_weight( shape=(2, self.n_head, self.d_head), initializer=initializer, trainable=True, name="seg_embed" ) if self.built: return self.built = True if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) def prune_heads(self, heads): raise NotImplementedError def rel_shift(self, x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = shape_list(x) x = tf.reshape(x, (x_size[1], x_size[0], x_size[2], x_size[3])) x = x[1:, ...] x = tf.reshape(x, (x_size[0], x_size[1] - 1, x_size[2], x_size[3])) x = x[:, 0:klen, :, :] # x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask, head_mask, output_attentions, training=False ): """Core relative positional attention operations.""" # content based attention score ac = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_w_bias, k_head_h) # position based attention score bd = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift(bd, klen=shape_list(ac)[1]) # segment based attention score if seg_mat is None: ef = 0 else: ef = tf.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = tf.einsum("ijbs,ibns->ijbn", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == tf.float16 or attn_mask.dtype == tf.bfloat16: attn_score = attn_score - 65500 * attn_mask else: attn_score = attn_score - 1e30 * attn_mask # attention probability attn_prob = stable_softmax(attn_score, axis=1) attn_prob = self.dropout(attn_prob, training=training) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * head_mask # attention output attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, attn_prob return attn_vec def post_attention(self, h, attn_vec, residual=True, training=False): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = tf.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out, training=training) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def call( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = False, training: bool = False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h # content-based key head k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h, training=training) # g-stream # query-stream query head q_head_g = tf.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = tf.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = tf.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g, training=training) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h # content heads q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec, training=training) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class TFXLNetFeedForward(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.layer_1 = keras.layers.Dense( config.d_inner, kernel_initializer=get_initializer(config.initializer_range), name="layer_1" ) self.layer_2 = keras.layers.Dense( config.d_model, kernel_initializer=get_initializer(config.initializer_range), name="layer_2" ) self.dropout = keras.layers.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = get_tf_activation(config.ff_activation) else: self.activation_function = config.ff_activation self.config = config def call(self, inp, training=False): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output, training=training) output = self.layer_2(output) output = self.dropout(output, training=training) output = self.layer_norm(output + inp) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "layer_1", None) is not None: with tf.name_scope(self.layer_1.name): self.layer_1.build([None, None, self.config.d_model]) if getattr(self, "layer_2", None) is not None: with tf.name_scope(self.layer_2.name): self.layer_2.build([None, None, self.config.d_inner]) class TFXLNetLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.rel_attn = TFXLNetRelativeAttention(config, name="rel_attn") self.ff = TFXLNetFeedForward(config, name="ff") self.dropout = keras.layers.Dropout(config.dropout) def call( self, output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = False, training: bool = False, ): outputs = self.rel_attn( output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems, target_mapping, head_mask, output_attentions, training=training, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = self.ff(output_g, training=training) output_h = self.ff(output_h, training=training) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rel_attn", None) is not None: with tf.name_scope(self.rel_attn.name): self.rel_attn.build(None) if getattr(self, "ff", None) is not None: with tf.name_scope(self.ff.name): self.ff.build(None) class TFXLNetLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states @keras_serializable class TFXLNetMainLayer(keras.layers.Layer): config_class = XLNetConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.return_dict self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.use_bfloat16 = config.use_bfloat16 self.initializer_range = config.initializer_range self.word_embedding = TFSharedEmbeddings( config.vocab_size, config.d_model, initializer_range=config.initializer_range, name="word_embedding" ) self.layer = [TFXLNetLayer(config, name=f"layer_._{i}") for i in range(config.n_layer)] self.dropout = keras.layers.Dropout(config.dropout) self.use_mems_eval = config.use_mems_eval self.use_mems_train = config.use_mems_train def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, value): self.word_embedding.weight = value self.word_embedding.vocab_size = shape_list(value)[0] def build(self, input_shape=None): initializer = get_initializer(self.initializer_range) self.mask_emb = self.add_weight( shape=(1, 1, self.d_model), initializer=initializer, trainable=True, name="mask_emb" ) if self.built: return self.built = True if getattr(self, "word_embedding", None) is not None: with tf.name_scope(self.word_embedding.name): self.word_embedding.build(None) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: TODO Lysandre didn't fill mlen: TODO Lysandre didn't fill ``` same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] ``` """ attn_mask = tf.ones([qlen, qlen]) mask_u = tf.linalg.band_part(attn_mask, 0, -1) mask_dia = tf.linalg.band_part(attn_mask, 0, 0) attn_mask_pad = tf.zeros([qlen, mlen]) ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) if self.same_length: mask_l = tf.linalg.band_part(attn_mask, -1, 0) ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) return ret def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if `use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = tf.concat([prev_mem, curr_out], 0)[cutoff:] return tf.stop_gradient(new_mem) @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = tf.einsum("i,d->id", pos_seq, inv_freq) pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], axis=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = tf.tile(pos_emb, [1, bsz, 1]) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): """create relative positional encoding.""" freq_seq = tf.range(0, self.d_model, 2.0) inv_freq = 1 / (10000 ** (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError(f"Unknown `attn_type` {self.attn_type}.") if self.bi_data: fwd_pos_seq = tf.range(beg, end, -1.0) bwd_pos_seq = tf.range(-beg, -end, 1.0) if self.clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len) bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -self.clamp_len, self.clamp_len) if bsz is not None: if bsz % 2 != 0: raise ValueError(f"With bi_data, the batch size {bsz} should be divisible by 2") fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1) else: fwd_pos_seq = tf.range(beg, end, -1.0) if self.clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, mems: np.ndarray | tf.Tensor | None = None, perm_mask: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, input_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_mems: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ): if training and use_mems is None: use_mems = self.use_mems_train else: use_mems = self.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = tf.transpose(input_ids, perm=(1, 0)) qlen, bsz = shape_list(input_ids)[:2] elif inputs_embeds is not None: inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2)) qlen, bsz = shape_list(inputs_embeds)[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = tf.transpose(token_type_ids, perm=(1, 0)) if token_type_ids is not None else None input_mask = tf.transpose(input_mask, perm=(1, 0)) if input_mask is not None else None attention_mask = tf.transpose(attention_mask, perm=(1, 0)) if attention_mask is not None else None perm_mask = tf.transpose(perm_mask, perm=(1, 2, 0)) if perm_mask is not None else None target_mapping = tf.transpose(target_mapping, perm=(1, 2, 0)) if target_mapping is not None else None mlen = shape_list(mems[0])[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError(f"Unsupported attention type: {self.attn_type}") # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, ( "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." ) if input_mask is None and attention_mask is not None: one_cst = tf.constant(1.0) input_mask = 1.0 - tf.cast(attention_mask, dtype=one_cst.dtype) if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = tf.zeros([shape_list(data_mask)[0], mlen, bsz]) data_mask = tf.concat([mems_mask, data_mask], axis=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = tf.cast(attn_mask > 0, dtype=attn_mask.dtype) if attn_mask is not None: non_tgt_mask = -tf.eye(qlen) if mlen > 0: non_tgt_mask = tf.concat([tf.zeros([qlen, mlen]), non_tgt_mask], axis=-1) non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=non_tgt_mask.dtype) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: check_embeddings_within_bounds(input_ids, self.word_embedding.vocab_size) word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k, training=training) if target_mapping is not None: word_emb_q = tf.tile(self.mask_emb, [shape_list(target_mapping)[0], bsz, 1]) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q, training=training) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = tf.zeros([mlen, bsz], dtype=token_type_ids.dtype) cat_ids = tf.concat([mem_pad, token_type_ids], 0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = tf.cast( tf.logical_not(tf.equal(token_type_ids[:, None], cat_ids[None, :])), dtype=token_type_ids.dtype, ) seg_mat = tf.one_hot(seg_mat, 2) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = self.dropout(pos_emb, training=training) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): # cache new mems if use_mems: new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems[i], target_mapping, head_mask[i], output_attentions, training=training, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h, training=training) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = tf.transpose(output, perm=(1, 0, 2)) if not use_mems: new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(tf.transpose(h, perm=(1, 0, 2)) for hs in hidden_states for h in hs) else: hidden_states = tuple(tf.transpose(hs, perm=(1, 0, 2)) for hs in hidden_states) if output_attentions: if target_mapping is not None: # when target_mapping is provided, there are 2-tuple of attentions attentions = tuple( tuple(tf.transpose(attn_stream, perm=(2, 3, 0, 1)) for attn_stream in t) for t in attentions ) else: attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return TFXLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) class TFXLNetPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLNetConfig base_model_prefix = "transformer" @dataclass class TFXLNetModelOutput(ModelOutput): """ Output type of [`TFXLNetModel`]. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`list[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor | None = None mems: list[tf.Tensor] | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFXLNetLMHeadModelOutput(ModelOutput): """ Output type of [`TFXLNetLMHeadModel`]. Args: loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided) Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`list[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None mems: list[tf.Tensor] | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFXLNetForSequenceClassificationOutput(ModelOutput): """ Output type of [`TFXLNetForSequenceClassification`]. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (`list[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None mems: list[tf.Tensor] | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFXLNetForTokenClassificationOutput(ModelOutput): """ Output type of [`TFXLNetForTokenClassificationOutput`]. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (`list[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None mems: list[tf.Tensor] | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFXLNetForMultipleChoiceOutput(ModelOutput): """ Output type of [`TFXLNetForMultipleChoice`]. Args: loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`tf.Tensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). mems (`list[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None mems: list[tf.Tensor] | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFXLNetForQuestionAnsweringSimpleOutput(ModelOutput): """ Output type of [`TFXLNetForQuestionAnsweringSimple`]. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`tf.Tensor` of shape `(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (`tf.Tensor` of shape `(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (`list[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None start_logits: tf.Tensor | None = None end_logits: tf.Tensor | None = None mems: list[tf.Tensor] | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None XLNET_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`XLNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) input_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, ) class TFXLNetModel(TFXLNetPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, mems: np.ndarray | tf.Tensor | None = None, perm_mask: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, input_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_mems: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFXLNetModelOutput | tuple[tf.Tensor]: outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings( """ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, ) class TFXLNetLMHeadModel(TFXLNetPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.lm_loss = TFXLNetLMHead(config, self.transformer.word_embedding, name="lm_loss") # generate fails to convert to a graph with XLNet self.supports_xla_generation = False def get_lm_head(self): return self.lm_loss def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_loss.name def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_mems=None, **kwargs): # Add dummy token at the end (no attention on this one) effective_batch_size = inputs.shape[0] dummy_token = tf.zeros((effective_batch_size, 1), dtype=inputs.dtype) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 if past_key_values: input_ids = tf.concat([inputs[:, -offset:], dummy_token], axis=1) else: input_ids = tf.concat([inputs, dummy_token], axis=1) # Build permutation mask so that previous tokens don't see last token sequence_length = input_ids.shape[1] perm_mask = tf.zeros((effective_batch_size, sequence_length, sequence_length - 1)) perm_mask_seq_end = tf.ones((effective_batch_size, sequence_length, 1)) perm_mask = tf.concat([perm_mask, perm_mask_seq_end], axis=-1) # We'll only predict the last token target_mapping = tf.zeros((effective_batch_size, 1, sequence_length - 1)) target_mapping_seq_end = tf.ones((effective_batch_size, 1, 1)) target_mapping = tf.concat([target_mapping, target_mapping_seq_end], axis=-1) inputs = { "input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_mems": use_mems, } # if past is defined in model kwargs then use it for faster decoding if past_key_values: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values) return inputs @unpack_inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFXLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, mems: np.ndarray | tf.Tensor | None = None, perm_mask: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, input_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_mems: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFXLNetLMHeadModelOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. Return: Examples: ```python >>> import tensorflow as tf >>> import numpy as np >>> from transformers import AutoTokenizer, TFXLNetLMHeadModel >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased") >>> model = TFXLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased") >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=True))[ ... None, : ... ] # We will predict the masked token >>> perm_mask = np.zeros((1, input_ids.shape[1], input_ids.shape[1])) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = np.zeros( ... (1, 1, input_ids.shape[1]) ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model( ... input_ids, ... perm_mask=tf.constant(perm_mask, dtype=tf.float32), ... target_mapping=tf.constant(target_mapping, dtype=tf.float32), ... ) >>> next_token_logits = outputs[ ... 0 ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] ```""" transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_state = transformer_outputs[0] logits = self.lm_loss(hidden_state, training=training) loss = None if labels is not None: loss = self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "lm_loss", None) is not None: with tf.name_scope(self.lm_loss.name): self.lm_loss.build(None) @add_start_docstrings( """ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLNetMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.logits_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, mems: np.ndarray | tf.Tensor | None = None, perm_mask: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, input_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_mems: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFXLNetForSequenceClassificationOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "sequence_summary", None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) if getattr(self, "logits_proj", None) is not None: with tf.name_scope(self.logits_proj.name): self.logits_proj.build([None, None, self.config.d_model]) @add_start_docstrings( """ XLNET Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForMultipleChoice(TFXLNetPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.logits_proj = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, input_mask: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, mems: np.ndarray | tf.Tensor | None = None, perm_mask: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_mems: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFXLNetForMultipleChoiceOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_input_mask = tf.reshape(input_mask, (-1, seq_length)) if input_mask is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) transformer_outputs = self.transformer( flat_input_ids, flat_attention_mask, mems, perm_mask, target_mapping, flat_token_type_ids, flat_input_mask, head_mask, flat_inputs_embeds, use_mems, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "sequence_summary", None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) if getattr(self, "logits_proj", None) is not None: with tf.name_scope(self.logits_proj.name): self.logits_proj.build([None, None, self.config.d_model]) @add_start_docstrings( """ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForTokenClassification(TFXLNetPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLNetMainLayer(config, name="transformer") self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, mems: np.ndarray | tf.Tensor | None = None, perm_mask: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, input_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_mems: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFXLNetForTokenClassificationOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.classifier(output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, mems: np.ndarray | tf.Tensor | None = None, perm_mask: np.ndarray | tf.Tensor | None = None, target_mapping: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, input_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_mems: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFXLNetForQuestionAnsweringSimpleOutput | tuple[tf.Tensor]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForQuestionAnsweringSimpleOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) __all__ = [ "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetMainLayer", "TFXLNetModel", "TFXLNetPreTrainedModel", ]
transformers/src/transformers/models/xlnet/modeling_tf_xlnet.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/modeling_tf_xlnet.py", "repo_id": "transformers", "token_count": 34875 }
566
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ZoeDepth checkpoints from the original repository. URL: https://github.com/isl-org/ZoeDepth. Original logits where obtained by running the following code: !git clone -b understanding_zoedepth https://github.com/NielsRogge/ZoeDepth !python inference.py """ import argparse from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, ZoeDepthConfig, ZoeDepthForDepthEstimation, ZoeDepthImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_zoedepth_config(model_name): image_size = 384 backbone_config = BeitConfig( image_size=image_size, num_hidden_layers=24, hidden_size=1024, intermediate_size=4096, num_attention_heads=16, use_relative_position_bias=True, reshape_hidden_states=False, out_features=["stage6", "stage12", "stage18", "stage24"], # beit-large-512 uses [5, 11, 17, 23], ) neck_hidden_sizes = [256, 512, 1024, 1024] bin_centers_type = "softplus" if model_name in ["ZoeD_N", "ZoeD_NK"] else "normed" if model_name == "ZoeD_NK": bin_configurations = [ {"name": "nyu", "n_bins": 64, "min_depth": 1e-3, "max_depth": 10.0}, {"name": "kitti", "n_bins": 64, "min_depth": 1e-3, "max_depth": 80.0}, ] elif model_name in ["ZoeD_N", "ZoeD_K"]: bin_configurations = [ {"name": "nyu", "n_bins": 64, "min_depth": 1e-3, "max_depth": 10.0}, ] config = ZoeDepthConfig( backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes, bin_centers_type=bin_centers_type, bin_configurations=bin_configurations, num_patch_transformer_layers=4 if model_name == "ZoeD_NK" else None, patch_transformer_hidden_size=128 if model_name == "ZoeD_NK" else None, patch_transformer_intermediate_size=1024 if model_name == "ZoeD_NK" else None, patch_transformer_num_attention_heads=4 if model_name == "ZoeD_NK" else None, ) return config, image_size def rename_key(name): # Transformer backbone if "core.core.pretrained.model.blocks" in name: name = name.replace("core.core.pretrained.model.blocks", "backbone.encoder.layer") if "core.core.pretrained.model.patch_embed.proj" in name: name = name.replace( "core.core.pretrained.model.patch_embed.proj", "backbone.embeddings.patch_embeddings.projection" ) if "core.core.pretrained.model.cls_token" in name: name = name.replace("core.core.pretrained.model.cls_token", "backbone.embeddings.cls_token") if "norm1" in name and "patch_transformer" not in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name and "patch_transformer" not in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "gamma_1" in name: name = name.replace("gamma_1", "lambda_1") if "gamma_2" in name: name = name.replace("gamma_2", "lambda_2") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn.relative_position_bias_table" in name: name = name.replace( "attn.relative_position_bias_table", "attention.attention.relative_position_bias.relative_position_bias_table", ) if "attn.relative_position_index" in name: name = name.replace( "attn.relative_position_index", "attention.attention.relative_position_bias.relative_position_index" ) # activation postprocessing (readout projections + resize blocks) if "core.core.pretrained.act_postprocess1.0.project" in name: name = name.replace( "core.core.pretrained.act_postprocess1.0.project", "neck.reassemble_stage.readout_projects.0" ) if "core.core.pretrained.act_postprocess2.0.project" in name: name = name.replace( "core.core.pretrained.act_postprocess2.0.project", "neck.reassemble_stage.readout_projects.1" ) if "core.core.pretrained.act_postprocess3.0.project" in name: name = name.replace( "core.core.pretrained.act_postprocess3.0.project", "neck.reassemble_stage.readout_projects.2" ) if "core.core.pretrained.act_postprocess4.0.project" in name: name = name.replace( "core.core.pretrained.act_postprocess4.0.project", "neck.reassemble_stage.readout_projects.3" ) if "core.core.pretrained.act_postprocess1.3" in name: name = name.replace("core.core.pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection") if "core.core.pretrained.act_postprocess2.3" in name: name = name.replace("core.core.pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection") if "core.core.pretrained.act_postprocess3.3" in name: name = name.replace("core.core.pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection") if "core.core.pretrained.act_postprocess4.3" in name: name = name.replace("core.core.pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection") if "core.core.pretrained.act_postprocess1.4" in name: name = name.replace("core.core.pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize") if "core.core.pretrained.act_postprocess2.4" in name: name = name.replace("core.core.pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize") if "core.core.pretrained.act_postprocess4.4" in name: name = name.replace("core.core.pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize") # scratch convolutions if "core.core.scratch.layer1_rn.weight" in name: name = name.replace("core.core.scratch.layer1_rn.weight", "neck.convs.0.weight") if "core.core.scratch.layer2_rn.weight" in name: name = name.replace("core.core.scratch.layer2_rn.weight", "neck.convs.1.weight") if "core.core.scratch.layer3_rn.weight" in name: name = name.replace("core.core.scratch.layer3_rn.weight", "neck.convs.2.weight") if "core.core.scratch.layer4_rn.weight" in name: name = name.replace("core.core.scratch.layer4_rn.weight", "neck.convs.3.weight") # fusion layers # tricky here: mapping = {1:3, 2:2, 3:1, 4:0} if "core.core.scratch.refinenet1" in name: name = name.replace("core.core.scratch.refinenet1", "neck.fusion_stage.layers.3") if "core.core.scratch.refinenet2" in name: name = name.replace("core.core.scratch.refinenet2", "neck.fusion_stage.layers.2") if "core.core.scratch.refinenet3" in name: name = name.replace("core.core.scratch.refinenet3", "neck.fusion_stage.layers.1") if "core.core.scratch.refinenet4" in name: name = name.replace("core.core.scratch.refinenet4", "neck.fusion_stage.layers.0") if "resConfUnit1" in name: name = name.replace("resConfUnit1", "residual_layer1") if "resConfUnit2" in name: name = name.replace("resConfUnit2", "residual_layer2") if "conv1" in name: name = name.replace("conv1", "convolution1") if "conv2" in name and "residual_layer" in name: name = name.replace("conv2", "convolution2") if "out_conv" in name: name = name.replace("out_conv", "projection") # relative depth estimation head if "core.core.scratch.output_conv.0" in name: name = name.replace("core.core.scratch.output_conv.0", "relative_head.conv1") if "core.core.scratch.output_conv.2" in name: name = name.replace("core.core.scratch.output_conv.2", "relative_head.conv2") if "core.core.scratch.output_conv.4" in name: name = name.replace("core.core.scratch.output_conv.4", "relative_head.conv3") # patch transformer if "patch_transformer" in name: name = name.replace("patch_transformer", "metric_head.patch_transformer") if "mlp_classifier.0" in name: name = name.replace("mlp_classifier.0", "metric_head.mlp_classifier.linear1") if "mlp_classifier.2" in name: name = name.replace("mlp_classifier.2", "metric_head.mlp_classifier.linear2") if "projectors" in name: name = name.replace("projectors", "metric_head.projectors") if "seed_bin_regressors" in name: name = name.replace("seed_bin_regressors", "metric_head.seed_bin_regressors") if "seed_bin_regressor" in name and "seed_bin_regressors" not in name: name = name.replace("seed_bin_regressor", "metric_head.seed_bin_regressor") if "seed_projector" in name: name = name.replace("seed_projector", "metric_head.seed_projector") if "_net.0" in name: name = name.replace("_net.0", "conv1") if "_net.2" in name: name = name.replace("_net.2", "conv2") if "attractors" in name: name = name.replace("attractors", "metric_head.attractors") if "conditional_log_binomial" in name: name = name.replace("conditional_log_binomial", "metric_head.conditional_log_binomial") # metric depth estimation head if "conv2" in name and "metric_head" not in name and "attractors" not in name and "relative_head" not in name: name = name.replace("conv2", "metric_head.conv2") if "transformer_encoder.layers" in name: name = name.replace("transformer_encoder.layers", "transformer_encoder") return name def read_in_q_k_v_metric_head(state_dict): hidden_size = 128 for i in range(4): # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"patch_transformer.transformer_encoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"patch_transformer.transformer_encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"patch_transformer.transformer_encoder.{i}.self_attn.query.weight"] = in_proj_weight[ :hidden_size, : ] state_dict[f"patch_transformer.transformer_encoder.{i}.self_attn.query.bias"] = in_proj_bias[:hidden_size] state_dict[f"patch_transformer.transformer_encoder.{i}.self_attn.key.weight"] = in_proj_weight[ hidden_size : hidden_size * 2, : ] state_dict[f"patch_transformer.transformer_encoder.{i}.self_attn.key.bias"] = in_proj_bias[ hidden_size : hidden_size * 2 ] state_dict[f"patch_transformer.transformer_encoder.{i}.self_attn.value.weight"] = in_proj_weight[ -hidden_size:, : ] state_dict[f"patch_transformer.transformer_encoder.{i}.self_attn.value.bias"] = in_proj_bias[-hidden_size:] def convert_state_dict(orig_state_dict): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) # rename key new_name = rename_key(key) orig_state_dict[new_name] = val return orig_state_dict def remove_ignore_keys(state_dict): for key in state_dict.copy(): if ( "fc_norm" in key or "relative_position_index" in key or "k_idx" in key or "K_minus_1" in key or "core.core.pretrained.model.head" in key ): state_dict.pop(key, None) # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): hidden_size = config.backbone_config.hidden_size for i in range(config.backbone_config.num_hidden_layers): # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"core.core.pretrained.model.blocks.{i}.attn.qkv.weight") q_bias = state_dict.pop(f"core.core.pretrained.model.blocks.{i}.attn.q_bias") v_bias = state_dict.pop(f"core.core.pretrained.model.blocks.{i}.attn.v_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :] state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = q_bias state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ hidden_size : hidden_size * 2, : ] state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :] state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = v_bias # We will verify our results on an image def prepare_img(): filepath = hf_hub_download(repo_id="shariqfarooq/ZoeDepth", filename="examples/person_1.jpeg", repo_type="space") image = Image.open(filepath).convert("RGB") return image @torch.no_grad() def convert_zoedepth_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our ZoeDepth structure. """ # define ZoeDepth configuration based on URL config, _ = get_zoedepth_config(model_name) # load original model original_model = torch.hub.load( "NielsRogge/ZoeDepth:understanding_zoedepth", model_name, pretrained=True, force_reload=True ) original_model.eval() state_dict = original_model.state_dict() print("Original state dict:") for name, param in state_dict.items(): print(name, param.shape) # read in qkv matrices read_in_q_k_v(state_dict, config) if model_name == "ZoeD_NK": read_in_q_k_v_metric_head(state_dict) # rename keys state_dict = convert_state_dict(state_dict) # remove certain keys remove_ignore_keys(state_dict) # load HuggingFace model model = ZoeDepthForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() # verify image processor image = prepare_img() image_processor = ZoeDepthImageProcessor() pixel_values = image_processor(image, return_tensors="pt").pixel_values filepath = hf_hub_download( repo_id="nielsr/test-image", filename="zoedepth_pixel_values.pt", repo_type="dataset", ) original_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True) assert torch.allclose(pixel_values, original_pixel_values) # verify logits # this was done on a resized version of the cats image (384x384) filepath = hf_hub_download( repo_id="nielsr/test-image", filename="zoedepth_pixel_values.pt", repo_type="dataset", revision="1865dbb81984f01c89e83eec10f8d07efd10743d", ) cats_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True) depth = model(cats_pixel_values).predicted_depth # Verify logits # These were obtained by inserting the pixel_values at the patch embeddings of BEiT if model_name == "ZoeD_N": expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1.0328, 1.0604, 1.0747], [1.0816, 1.1293, 1.1456], [1.1117, 1.1629, 1.1766]]) elif model_name == "ZoeD_K": expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1.6567, 1.6852, 1.7065], [1.6707, 1.6764, 1.6713], [1.7195, 1.7166, 1.7118]]) elif model_name == "ZoeD_NK": expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1.1228, 1.1079, 1.1382], [1.1807, 1.1658, 1.1891], [1.2344, 1.2094, 1.2317]]) print("Shape of depth:", depth.shape) print("First 3x3 slice of depth:", depth[0, :3, :3]) assert depth.shape == torch.Size(expected_shape) assert torch.allclose(depth[0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and processor to {pytorch_dump_folder_path}") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_name_to_repo_id = { "ZoeD_N": "zoedepth-nyu", "ZoeD_K": "zoedepth-kitti", "ZoeD_NK": "zoedepth-nyu-kitti", } print("Pushing model and processor to the hub...") repo_id = model_name_to_repo_id[model_name] model.push_to_hub(f"Intel/{repo_id}") image_processor = ZoeDepthImageProcessor() image_processor.push_to_hub(f"Intel/{repo_id}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="ZoeD_N", choices=["ZoeD_N", "ZoeD_K", "ZoeD_NK"], type=str, help="Name of the original ZoeDepth checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) args = parser.parse_args() convert_zoedepth_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/zoedepth/convert_zoedepth_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/zoedepth/convert_zoedepth_to_hf.py", "repo_id": "transformers", "token_count": 7653 }
567
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import csv import importlib import json import os import pickle import sys import traceback import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Optional, Union from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..generation import GenerationConfig from ..image_processing_utils import BaseImageProcessor from ..modelcard import ModelCard from ..models.auto import AutoConfig, AutoTokenizer from ..processing_utils import ProcessorMixin from ..tokenization_utils import PreTrainedTokenizer from ..utils import ( ModelOutput, PushToHubMixin, add_end_docstrings, copy_func, infer_framework, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xpu_available, logging, ) GenericTensor = Union[list["GenericTensor"], "torch.Tensor", "tf.Tensor"] if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TFAutoModel if is_torch_available(): import torch from torch.utils.data import DataLoader, Dataset from ..modeling_utils import PreTrainedModel from ..models.auto.modeling_auto import AutoModel # Re-export for backward compatibility from .pt_utils import KeyDataset else: Dataset = None KeyDataset = None if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel logger = logging.get_logger(__name__) def no_collate_fn(items): if len(items) != 1: raise ValueError("This collate_fn is meant to be used with batch_size=1") return items[0] def _pad(items, key, padding_value, padding_side): batch_size = len(items) if isinstance(items[0][key], torch.Tensor): # Others include `attention_mask` etc... shape = items[0][key].shape dim = len(shape) if dim == 1: # We have a list of 1-dim torch tensors, which can be stacked without padding return torch.cat([item[key] for item in items], dim=0) if key in ["pixel_values", "image"]: # This is probable image so padding shouldn't be necessary # B, C, H, W return torch.cat([item[key] for item in items], dim=0) elif dim == 4 and key == "input_features": # this is probably a mel spectrogram batched return torch.cat([item[key] for item in items], dim=0) max_length = max(item[key].shape[1] for item in items) min_length = min(item[key].shape[1] for item in items) dtype = items[0][key].dtype if dim == 2: if max_length == min_length: # Bypass for `ImageGPT` which doesn't provide a padding value, yet # we can consistently pad since the size should be matching return torch.cat([item[key] for item in items], dim=0) tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value elif dim == 3: tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value elif dim == 4: tensor = torch.zeros((batch_size, max_length, shape[-2], shape[-1]), dtype=dtype) + padding_value for i, item in enumerate(items): if dim == 2: if padding_side == "left": tensor[i, -len(item[key][0]) :] = item[key][0].clone() else: tensor[i, : len(item[key][0])] = item[key][0].clone() elif dim == 3: if padding_side == "left": tensor[i, -len(item[key][0]) :, :] = item[key][0].clone() else: tensor[i, : len(item[key][0]), :] = item[key][0].clone() elif dim == 4: if padding_side == "left": tensor[i, -len(item[key][0]) :, :, :] = item[key][0].clone() else: tensor[i, : len(item[key][0]), :, :] = item[key][0].clone() return tensor else: return [item[key] for item in items] def pad_collate_fn(tokenizer, feature_extractor): # Tokenizer t_padding_side = None # Feature extractor f_padding_side = None if tokenizer is None and feature_extractor is None: raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching") if tokenizer is not None: if tokenizer.pad_token_id is None: raise ValueError( "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with " "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`." ) else: t_padding_value = tokenizer.pad_token_id t_padding_side = tokenizer.padding_side if feature_extractor is not None: # Feature extractor can be images, where no padding is expected f_padding_value = getattr(feature_extractor, "padding_value", None) f_padding_side = getattr(feature_extractor, "padding_side", None) if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side: raise ValueError( f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}" ) padding_side = "right" if t_padding_side is not None: padding_side = t_padding_side if f_padding_side is not None: padding_side = f_padding_side def inner(items): keys = set(items[0].keys()) for item in items: if set(item.keys()) != keys: raise ValueError( f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !=" f" {keys})" ) # input_values, input_pixels, input_ids, ... padded = {} for key in keys: if key in {"input_ids"}: # ImageGPT uses a feature extractor if tokenizer is None and feature_extractor is not None: _padding_value = f_padding_value else: _padding_value = t_padding_value elif key in {"input_values", "pixel_values", "input_features"}: _padding_value = f_padding_value elif key in {"p_mask", "special_tokens_mask"}: _padding_value = 1 elif key in {"attention_mask", "token_type_ids"}: _padding_value = 0 else: # This is likely another random key maybe even user provided _padding_value = 0 padded[key] = _pad(items, key, _padding_value, padding_side) return padded return inner def infer_framework_load_model( model, config: AutoConfig, model_classes: Optional[dict[str, tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. config ([`AutoConfig`]): The config associated with the model to help using the correct class model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): model_kwargs["_from_pipeline"] = task class_tuple = () look_pt = is_torch_available() and framework in {"pt", None} look_tf = is_tf_available() and framework in {"tf", None} if model_classes: if look_pt: class_tuple = class_tuple + model_classes.get("pt", (AutoModel,)) if look_tf: class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,)) if config.architectures: classes = [] for architecture in config.architectures: transformers_module = importlib.import_module("transformers") if look_pt: _class = getattr(transformers_module, architecture, None) if _class is not None: classes.append(_class) if look_tf: _class = getattr(transformers_module, f"TF{architecture}", None) if _class is not None: classes.append(_class) class_tuple = class_tuple + tuple(classes) if len(class_tuple) == 0: raise ValueError(f"Pipeline cannot infer suitable model classes from {model}") all_traceback = {} for model_class in class_tuple: kwargs = model_kwargs.copy() if framework == "pt" and model.endswith(".h5"): kwargs["from_tf"] = True logger.warning( "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " "Trying to load the model with PyTorch." ) elif framework == "tf" and model.endswith(".bin"): kwargs["from_pt"] = True logger.warning( "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " "Trying to load the model with Tensorflow." ) try: model = model_class.from_pretrained(model, **kwargs) if hasattr(model, "eval"): model = model.eval() # Stop loading on the first successful load. break except (OSError, ValueError, TypeError, RuntimeError): # `from_pretrained` may raise a `TypeError` or `RuntimeError` when the requested `dtype` # is not supported on the execution device (e.g. bf16 on a consumer GPU). We capture those so # we can transparently retry the load in float32 before surfacing an error to the user. fallback_tried = False if is_torch_available() and ("dtype" in kwargs): import torch # local import to avoid unnecessarily importing torch for TF/JAX users fallback_tried = True fp32_kwargs = kwargs.copy() fp32_kwargs["dtype"] = torch.float32 try: model = model_class.from_pretrained(model, **fp32_kwargs) if hasattr(model, "eval"): model = model.eval() logger.warning( "Falling back to torch.float32 because loading with the original dtype failed on the" " target device." ) break except Exception: # If it still fails, capture the traceback and continue to the next class. all_traceback[model_class.__name__] = traceback.format_exc() continue # If no fallback was attempted or it also failed, record the original traceback. if not fallback_tried: all_traceback[model_class.__name__] = traceback.format_exc() continue if isinstance(model, str): error = "" for class_name, trace in all_traceback.items(): error += f"while loading with {class_name}, an error is thrown:\n{trace}\n" raise ValueError( f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" ) if framework is None: framework = infer_framework(model.__class__) return framework, model def infer_framework_from_model( model, model_classes: Optional[dict[str, tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs) else: config = model.config return infer_framework_load_model( model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs ) def get_framework(model, revision: Optional[str] = None): """ Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch. """ warnings.warn( "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.", FutureWarning, ) if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): if is_torch_available() and not is_tf_available(): model = AutoModel.from_pretrained(model, revision=revision) elif is_tf_available() and not is_torch_available(): model = TFAutoModel.from_pretrained(model, revision=revision) else: try: model = AutoModel.from_pretrained(model, revision=revision) except OSError: model = TFAutoModel.from_pretrained(model, revision=revision) framework = infer_framework(model.__class__) return framework def get_default_model_and_revision( targeted_task: dict, framework: Optional[str], task_options: Optional[Any] ) -> tuple[str, str]: """ Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict`): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns Tuple: - `str` The model string representing the default model for this pipeline. - `str` The revision of the model. """ if is_torch_available() and not is_tf_available(): framework = "pt" elif is_tf_available() and not is_torch_available(): framework = "tf" defaults = targeted_task["default"] if task_options: if task_options not in defaults: raise ValueError(f"The task does not provide any default models for options {task_options}") default_models = defaults[task_options]["model"] elif "model" in defaults: default_models = targeted_task["default"]["model"] else: # XXX This error message needs to be updated to be more generic if more tasks are going to become # parametrized raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_xx_to_yy"') if framework is None: framework = "pt" return default_models[framework] def load_assistant_model( model: "PreTrainedModel", assistant_model: Optional[Union[str, "PreTrainedModel"]], assistant_tokenizer: Optional[PreTrainedTokenizer], ) -> tuple[Optional["PreTrainedModel"], Optional[PreTrainedTokenizer]]: """ Prepares the assistant model and the assistant tokenizer for a pipeline whose model that can call `generate`. Args: model ([`PreTrainedModel`]): The main model that will be used by the pipeline to make predictions. assistant_model (`str` or [`PreTrainedModel`], *optional*): The assistant model that will be used by the pipeline to make predictions. assistant_tokenizer ([`PreTrainedTokenizer`], *optional*): The assistant tokenizer that will be used by the pipeline to encode data for the model. Returns: Tuple: The loaded assistant model and (optionally) the loaded tokenizer. """ if not model.can_generate() or assistant_model is None: return None, None if getattr(model, "framework") != "pt" or not isinstance(model, PreTrainedModel): raise ValueError( "Assisted generation, triggered by the `assistant_model` argument, is only available for " "`PreTrainedModel` model instances. For instance, TF or JAX models are not supported." ) # If the model is passed as a string, load the model and the corresponding tokenizer if isinstance(assistant_model, str): assistant_config = AutoConfig.from_pretrained(assistant_model) _, loaded_assistant_model = infer_framework_load_model(assistant_model, config=assistant_config) loaded_assistant_model = loaded_assistant_model.to(device=model.device, dtype=model.dtype) loaded_assistant_tokenizer = AutoTokenizer.from_pretrained(assistant_model) else: loaded_assistant_model = assistant_model loaded_assistant_tokenizer = assistant_tokenizer # Finally, let's check the tokenizers: if the two models have different tokenizers, we need to keep the assistant # tokenizer same_vocab_size = model.config.vocab_size == loaded_assistant_model.config.vocab_size same_special_tokens = all( getattr(model.config, token) == getattr(loaded_assistant_model.config, token) for token in ("eos_token_id", "pad_token_id", "bos_token_id") ) if same_vocab_size and same_special_tokens: loaded_assistant_tokenizer = None elif loaded_assistant_tokenizer is None: raise ValueError( "The assistant model has a different tokenizer than the main model. You should pass the assistant " "tokenizer." ) return loaded_assistant_model, loaded_assistant_tokenizer class PipelineException(Exception): """ Raised by a [`Pipeline`] when handling __call__. Args: task (`str`): The task of the pipeline. model (`str`): The model used by the pipeline. reason (`str`): The error message to display. """ def __init__(self, task: str, model: str, reason: str): super().__init__(reason) self.task = task self.model = model class ArgumentHandler(ABC): """ Base interface for handling arguments for each [`~pipelines.Pipeline`]. """ @abstractmethod def __call__(self, *args, **kwargs): raise NotImplementedError() class PipelineDataFormat: """ Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes: - JSON - CSV - stdin/stdout (pipe) `PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ SUPPORTED_FORMATS = ["json", "csv", "pipe"] def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite: bool = False, ): self.output_path = output_path self.input_path = input_path self.column = column.split(",") if column is not None else [""] self.is_multi_columns = len(self.column) > 1 if self.is_multi_columns: self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column] if output_path is not None and not overwrite: if exists(abspath(self.output_path)): raise OSError(f"{self.output_path} already exists on disk") if input_path is not None: if not exists(abspath(self.input_path)): raise OSError(f"{self.input_path} doesn't exist on disk") @abstractmethod def __iter__(self): raise NotImplementedError() @abstractmethod def save(self, data: Union[dict, list[dict]]): """ Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`dict` or list of `dict`): The data to store. """ raise NotImplementedError() def save_binary(self, data: Union[dict, list[dict]]) -> str: """ Save the provided data object as a pickle-formatted binary data on the disk. Args: data (`dict` or list of `dict`): The data to store. Returns: `str`: Path where the data has been saved. """ path, _ = os.path.splitext(self.output_path) binary_path = os.path.extsep.join((path, "pickle")) with open(binary_path, "wb+") as f_output: pickle.dump(data, f_output) return binary_path @staticmethod def from_str( format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ) -> "PipelineDataFormat": """ Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. Args: format (`str`): The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. Returns: [`~pipelines.PipelineDataFormat`]: The proper data format. """ if format == "json": return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "csv": return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "pipe": return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) else: raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)") class CsvPipelineDataFormat(PipelineDataFormat): """ Support for pipelines using CSV data format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ): super().__init__(output_path, input_path, column, overwrite=overwrite) def __iter__(self): with open(self.input_path, "r") as f: reader = csv.DictReader(f) for row in reader: if self.is_multi_columns: yield {k: row[c] for k, c in self.column} else: yield row[self.column[0]] def save(self, data: list[dict]): """ Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`list[dict]`): The data to store. """ with open(self.output_path, "w") as f: if len(data) > 0: writer = csv.DictWriter(f, list(data[0].keys())) writer.writeheader() writer.writerows(data) class JsonPipelineDataFormat(PipelineDataFormat): """ Support for pipelines using JSON file format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ): super().__init__(output_path, input_path, column, overwrite=overwrite) with open(input_path, "r") as f: self._entries = json.load(f) def __iter__(self): for entry in self._entries: if self.is_multi_columns: yield {k: entry[c] for k, c in self.column} else: yield entry[self.column[0]] def save(self, data: dict): """ Save the provided data object in a json file. Args: data (`dict`): The data to store. """ with open(self.output_path, "w") as f: json.dump(data, f) class PipedPipelineDataFormat(PipelineDataFormat): """ Read data from piped input to the python process. For multi columns data, columns should separated by \t If columns are provided, then the output will be a dictionary with {column_x: value_x} Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __iter__(self): for line in sys.stdin: # Split for multi-columns if "\t" in line: line = line.split("\t") if self.column: # Dictionary to map arguments yield {kwargs: l for (kwargs, _), l in zip(self.column, line)} else: yield tuple(line) # No dictionary to map arguments else: yield line def save(self, data: dict): """ Print the data. Args: data (`dict`): The data to store. """ print(data) def save_binary(self, data: Union[dict, list[dict]]) -> str: if self.output_path is None: raise KeyError( "When using piped input on pipeline outputting large object requires an output file path. " "Please provide such output path through --output argument." ) return super().save_binary(data) class _ScikitCompat(ABC): """ Interface layer for the Scikit and Keras compatibility. """ @abstractmethod def transform(self, X): raise NotImplementedError() @abstractmethod def predict(self, X): raise NotImplementedError() def build_pipeline_init_args( has_tokenizer: bool = False, has_feature_extractor: bool = False, has_image_processor: bool = False, has_processor: bool = False, supports_binary_output: bool = True, ) -> str: docstring = r""" Arguments: model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.""" if has_tokenizer: docstring += r""" tokenizer ([`PreTrainedTokenizer`]): The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedTokenizer`].""" if has_feature_extractor: docstring += r""" feature_extractor ([`SequenceFeatureExtractor`]): The feature extractor that will be used by the pipeline to encode data for the model. This object inherits from [`SequenceFeatureExtractor`].""" if has_image_processor: docstring += r""" image_processor ([`BaseImageProcessor`]): The image processor that will be used by the pipeline to encode data for the model. This object inherits from [`BaseImageProcessor`].""" if has_processor: docstring += r""" processor ([`ProcessorMixin`]): The processor that will be used by the pipeline to encode data for the model. This object inherits from [`ProcessorMixin`]. Processor is a composite object that might contain `tokenizer`, `feature_extractor`, and `image_processor`.""" docstring += r""" modelcard (`str` or [`ModelCard`], *optional*): Model card attributed to the model for this pipeline. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. task (`str`, defaults to `""`): A task-identifier for the pipeline. num_workers (`int`, *optional*, defaults to 8): When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used. batch_size (`int`, *optional*, defaults to 1): When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read [Batching with pipelines](https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching) . args_parser ([`~pipelines.ArgumentHandler`], *optional*): Reference to the object in charge of parsing supplied pipeline parameters. device (`int`, *optional*, defaults to -1): Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native `torch.device` or a `str` too dtype (`str` or `torch.dtype`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model (`torch.float16`, `torch.bfloat16`, ... or `"auto"`)""" if supports_binary_output: docstring += r""" binary_output (`bool`, *optional*, defaults to `False`): Flag indicating if the output the pipeline should happen in a serialized format (i.e., pickle) or as the raw output data e.g. text.""" return docstring PIPELINE_INIT_ARGS = build_pipeline_init_args( has_tokenizer=True, has_feature_extractor=True, has_image_processor=True, has_processor=True, supports_binary_output=True, ) SUPPORTED_PEFT_TASKS = { "document-question-answering": ["PeftModelForQuestionAnswering"], "feature-extraction": ["PeftModelForFeatureExtraction", "PeftModel"], "question-answering": ["PeftModelForQuestionAnswering"], "summarization": ["PeftModelForSeq2SeqLM"], "table-question-answering": ["PeftModelForQuestionAnswering"], "text2text-generation": ["PeftModelForSeq2SeqLM"], "text-classification": ["PeftModelForSequenceClassification"], "sentiment-analysis": ["PeftModelForSequenceClassification"], "text-generation": ["PeftModelForCausalLM"], "token-classification": ["PeftModelForTokenClassification"], "ner": ["PeftModelForTokenClassification"], "translation": ["PeftModelForSeq2SeqLM"], "translation_xx_to_yy": ["PeftModelForSeq2SeqLM"], "zero-shot-classification": ["PeftModelForSequenceClassification"], } if is_torch_available(): from transformers.pipelines.pt_utils import ( PipelineChunkIterator, PipelineDataset, PipelineIterator, PipelinePackIterator, ) @add_end_docstrings( build_pipeline_init_args( has_tokenizer=True, has_feature_extractor=True, has_image_processor=True, has_processor=True ) ) class Pipeline(_ScikitCompat, PushToHubMixin): """ The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines. Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations: Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output Pipeline supports running on CPU or GPU through the device argument (see below). Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output` constructor argument. If set to `True`, the output will be stored in the pickle format. """ # These flags should be overridden for downstream pipelines. They indicate which preprocessing classes are # used by each pipeline. The possible values are: # - True (the class is mandatory, raise an error if it's not present in the repo) # - None (the class is optional; it should be loaded if present in the repo but the pipeline can work without it) # - False (the class is never used by the pipeline and should not be loaded even if present) _load_processor = None _load_image_processor = None _load_feature_extractor = None _load_tokenizer = None # Pipelines that call `generate` have shared logic, e.g. preparing the generation config. _pipeline_calls_generate = False default_input_names = None def __init__( self, model: Union["PreTrainedModel", "TFPreTrainedModel"], tokenizer: Optional[PreTrainedTokenizer] = None, feature_extractor: Optional[PreTrainedFeatureExtractor] = None, image_processor: Optional[BaseImageProcessor] = None, processor: Optional[ProcessorMixin] = None, modelcard: Optional[ModelCard] = None, framework: Optional[str] = None, task: str = "", device: Union[int, "torch.device"] = None, binary_output: bool = False, **kwargs, ): # We need to pop them for _sanitize_parameters call later _, _, _ = kwargs.pop("args_parser", None), kwargs.pop("torch_dtype", None), kwargs.pop("dtype", None) if framework is None: framework, model = infer_framework_load_model(model, config=model.config) if framework in ("tf", "jax"): logger.warning_once( "TensorFlow and JAX classes are deprecated and will be removed in Transformers v5. We " "recommend migrating to PyTorch classes or pinning your version of Transformers." ) self.task = task self.model = model self.tokenizer = tokenizer self.feature_extractor = feature_extractor self.image_processor = image_processor self.processor = processor self.modelcard = modelcard self.framework = framework # `accelerate` device map hf_device_map = getattr(self.model, "hf_device_map", None) if hf_device_map is not None and device is not None: raise ValueError( "The model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please " "discard the `device` argument when creating your pipeline object." ) if device is None: if hf_device_map is not None: # Take the first device used by `accelerate`. device = next(iter(hf_device_map.values())) else: device = 0 if is_torch_available() and self.framework == "pt": if device == -1 and self.model.device is not None: device = self.model.device if isinstance(device, torch.device): if (device.type == "xpu" and not is_torch_xpu_available(check_device=True)) or ( device.type == "hpu" and not is_torch_hpu_available() ): raise ValueError(f'{device} is not available, you should use device="cpu" instead') self.device = device elif isinstance(device, str): if ("xpu" in device and not is_torch_xpu_available(check_device=True)) or ( "hpu" in device and not is_torch_hpu_available() ): raise ValueError(f'{device} is not available, you should use device="cpu" instead') self.device = torch.device(device) elif device < 0: self.device = torch.device("cpu") elif is_torch_mlu_available(): self.device = torch.device(f"mlu:{device}") elif is_torch_musa_available(): self.device = torch.device(f"musa:{device}") elif is_torch_cuda_available(): self.device = torch.device(f"cuda:{device}") elif is_torch_npu_available(): self.device = torch.device(f"npu:{device}") elif is_torch_hpu_available(): self.device = torch.device(f"hpu:{device}") elif is_torch_xpu_available(check_device=True): self.device = torch.device(f"xpu:{device}") elif is_torch_mps_available(): self.device = torch.device(f"mps:{device}") else: self.device = torch.device("cpu") else: self.device = device if device is not None else -1 if is_torch_available() and torch.distributed.is_available() and torch.distributed.is_initialized(): self.device = self.model.device logger.warning(f"Device set to use {self.device}") self.binary_output = binary_output # We shouldn't call `model.to()` for models loaded with accelerate as well as the case that model is already on device if ( self.framework == "pt" and self.model.device != self.device and not (isinstance(self.device, int) and self.device < 0) and hf_device_map is None ): self.model.to(self.device) # If it's a generation pipeline and the model can generate: # 1 - create a local generation config. This is done to avoid side-effects on the model as we apply local # tweaks to the generation config. # 2 - load the assistant model if it is passed. if self._pipeline_calls_generate and self.model.can_generate(): self.assistant_model, self.assistant_tokenizer = load_assistant_model( self.model, kwargs.pop("assistant_model", None), kwargs.pop("assistant_tokenizer", None) ) self.prefix = self.model.config.prefix if hasattr(self.model.config, "prefix") else None # each pipeline with text generation capabilities should define its own default generation in a # `_default_generation_config` class attribute default_pipeline_generation_config = getattr(self, "_default_generation_config", GenerationConfig()) if hasattr(self.model, "_prepare_generation_config"): # TF doesn't have `_prepare_generation_config` # Uses `generate`'s logic to enforce the following priority of arguments: # 1. user-defined config options in `**kwargs` # 2. model's generation config values # 3. pipeline's default generation config values # NOTE: _prepare_generation_config creates a deep copy of the generation config before updating it, # and returns all kwargs that were not used to update the generation config prepared_generation_config, kwargs = self.model._prepare_generation_config( generation_config=default_pipeline_generation_config, use_model_defaults=True, **kwargs ) self.generation_config = prepared_generation_config # if the `max_new_tokens` is set to the pipeline default, but `max_length` is set to a non-default # value: let's honor `max_length`. E.g. we want Whisper's default `max_length=448` take precedence # over over the pipeline's length default. if ( default_pipeline_generation_config.max_new_tokens is not None # there's a pipeline default and self.generation_config.max_new_tokens == default_pipeline_generation_config.max_new_tokens and self.generation_config.max_length is not None and self.generation_config.max_length != 20 # global default ): self.generation_config.max_new_tokens = None else: # TODO (joao): no PT model should reach this line. However, some audio models with complex # inheritance patterns do. Streamline those models such that this line is no longer needed. # In those models, the default generation config is not (yet) used. self.generation_config = copy.deepcopy(self.model.generation_config) # Update the generation config with task specific params if they exist. # NOTE: 1. `prefix` is pipeline-specific and doesn't exist in the generation config. # 2. `task_specific_params` is a legacy feature and should be removed in a future version. task_specific_params = self.model.config.task_specific_params if task_specific_params is not None and task in task_specific_params: this_task_params = task_specific_params.get(task) if "prefix" in this_task_params: self.prefix = this_task_params.pop("prefix") self.generation_config.update(**this_task_params) # If the tokenizer has a pad token but the model doesn't, set it so that `generate` is aware of it. if ( self.tokenizer is not None and self.tokenizer.pad_token_id is not None and self.generation_config.pad_token_id is None ): self.generation_config.pad_token_id = self.tokenizer.pad_token_id self.call_count = 0 self._batch_size = kwargs.pop("batch_size", None) self._num_workers = kwargs.pop("num_workers", None) self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs) # In processor only mode, we can get the modality processors from the processor if self.processor is not None and all( [self.tokenizer is None, self.feature_extractor is None, self.image_processor is None] ): self.tokenizer = getattr(self.processor, "tokenizer", None) self.feature_extractor = getattr(self.processor, "feature_extractor", None) self.image_processor = getattr(self.processor, "image_processor", None) if self.image_processor is None and self.feature_extractor is not None: if isinstance(self.feature_extractor, BaseImageProcessor): # Backward compatible change, if users called # ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor()) # then we should keep working self.image_processor = self.feature_extractor def save_pretrained( self, save_directory: Union[str, os.PathLike], safe_serialization: bool = True, **kwargs, ): """ Save the pipeline's model and tokenizer. Args: save_directory (`str` or `os.PathLike`): A path to the directory where to saved. It will be created if it doesn't exist. safe_serialization (`str`): Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow. kwargs (`dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token") is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if hasattr(self, "_registered_impl"): # Add info to the config pipeline_info = self._registered_impl.copy() custom_pipelines = {} for task, info in pipeline_info.items(): if info["impl"] != self.__class__: continue info = info.copy() module_name = info["impl"].__module__ last_module = module_name.split(".")[-1] # Change classes into their names/full names info["impl"] = f"{last_module}.{info['impl'].__name__}" info["pt"] = tuple(c.__name__ for c in info["pt"]) info["tf"] = tuple(c.__name__ for c in info["tf"]) custom_pipelines[task] = info self.model.config.custom_pipelines = custom_pipelines # Save the pipeline custom code custom_object_save(self, save_directory) kwargs["safe_serialization"] = safe_serialization self.model.save_pretrained(save_directory, **kwargs) if self.tokenizer is not None: self.tokenizer.save_pretrained(save_directory, **kwargs) if self.feature_extractor is not None: self.feature_extractor.save_pretrained(save_directory, **kwargs) if self.image_processor is not None: self.image_processor.save_pretrained(save_directory, **kwargs) if self.modelcard is not None: self.modelcard.save_pretrained(save_directory) def transform(self, X): """ Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). """ return self(X) def predict(self, X): """ Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). """ return self(X) @property def dtype(self) -> Optional["torch.dtype"]: """ Dtype of the model (if it's Pytorch model), `None` otherwise. """ return getattr(self.model, "dtype", None) @property def torch_dtype(self) -> Optional["torch.dtype"]: """ Torch dtype of the model (if it's Pytorch model), `None` otherwise. """ logger.warning_once("`torch_dtype` attribute is deprecated. Use `dtype` instead!") return getattr(self.model, "dtype", None) @contextmanager def device_placement(self): """ Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. Returns: Context manager Examples: ```python # Explicitly ask for tensor allocation on CUDA device :0 pipe = pipeline(..., device=0) with pipe.device_placement(): # Every framework specific tensor allocation will be done on the request device output = pipe(...) ```""" if self.framework == "tf": with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"): yield else: if self.device.type == "cuda": with torch.cuda.device(self.device): yield elif self.device.type == "mlu": with torch.mlu.device(self.device): yield elif self.device.type == "musa": with torch.musa.device(self.device): yield elif self.device.type == "xpu": with torch.xpu.device(self.device): yield else: yield def ensure_tensor_on_device(self, **inputs): """ Ensure PyTorch tensors are on the specified device. Args: inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored): The tensors to place on `self.device`. Recursive on lists **only**. Return: `dict[str, torch.Tensor]`: The same as `inputs` but on the proper device. """ return self._ensure_tensor_on_device(inputs, self.device) def _ensure_tensor_on_device(self, inputs, device): if isinstance(inputs, ModelOutput): return ModelOutput( {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} ) elif isinstance(inputs, dict): return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} elif isinstance(inputs, UserDict): return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}) elif isinstance(inputs, list): return [self._ensure_tensor_on_device(item, device) for item in inputs] elif isinstance(inputs, tuple): return tuple(self._ensure_tensor_on_device(item, device) for item in inputs) elif isinstance(inputs, torch.Tensor): return inputs.to(device) else: return inputs def check_model_type(self, supported_models: Union[list[str], dict]): """ Check if the model class is in supported by the pipeline. Args: supported_models (`list[str]` or `dict`): The list of models supported by the pipeline, or a dictionary with model class values. """ if not isinstance(supported_models, list): # Create from a model mapping supported_models_names = [] if self.task in SUPPORTED_PEFT_TASKS: supported_models_names.extend(SUPPORTED_PEFT_TASKS[self.task]) for model_name in supported_models.values(): # Mapping can now contain tuples of models for the same configuration. if isinstance(model_name, tuple): supported_models_names.extend(list(model_name)) else: supported_models_names.append(model_name) if hasattr(supported_models, "_model_mapping"): for model in supported_models._model_mapping._extra_content.values(): if isinstance(model_name, tuple): supported_models_names.extend([m.__name__ for m in model]) else: supported_models_names.append(model.__name__) supported_models = supported_models_names if self.model.__class__.__name__ not in supported_models: logger.error( f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are" f" {supported_models}." ) @abstractmethod def _sanitize_parameters(self, **pipeline_parameters): """ _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__` methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`, `forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This lets you keep defaults in function signatures, which is more "natural". It is not meant to be called directly, it will be automatically called and the final parameters resolved by `__init__` and `__call__` """ raise NotImplementedError("_sanitize_parameters not implemented") @abstractmethod def preprocess(self, input_: Any, **preprocess_parameters: dict) -> dict[str, GenericTensor]: """ Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for `_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items. """ raise NotImplementedError("preprocess not implemented") @abstractmethod def _forward(self, input_tensors: dict[str, GenericTensor], **forward_parameters: dict) -> ModelOutput: """ _forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess` and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible. It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part of the code (leading to faster inference). """ raise NotImplementedError("_forward not implemented") @abstractmethod def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: dict) -> Any: """ Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers). """ raise NotImplementedError("postprocess not implemented") def get_inference_context(self): return torch.no_grad def forward(self, model_inputs, **forward_params): with self.device_placement(): if self.framework == "tf": model_inputs["training"] = False model_outputs = self._forward(model_inputs, **forward_params) elif self.framework == "pt": inference_context = self.get_inference_context() with inference_context(): model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) model_outputs = self._forward(model_inputs, **forward_params) model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu")) else: raise ValueError(f"Framework {self.framework} is not supported") return model_outputs def get_iterator( self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params ): if isinstance(inputs, collections.abc.Sized): dataset = PipelineDataset(inputs, self.preprocess, preprocess_params) else: if num_workers > 1: logger.warning( "For iterable dataset using num_workers>1 is likely to result" " in errors since everything is iterable, setting `num_workers=1`" " to guarantee correctness." ) num_workers = 1 dataset = PipelineIterator(inputs, self.preprocess, preprocess_params) if "TOKENIZERS_PARALLELISM" not in os.environ: logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") os.environ["TOKENIZERS_PARALLELISM"] = "false" # TODO hack by collating feature_extractor and image_processor feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) return final_iterator def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs): if args: logger.warning(f"Ignoring args : {args}") if num_workers is None: if self._num_workers is None: num_workers = 0 else: num_workers = self._num_workers if batch_size is None: if self._batch_size is None: batch_size = 1 else: batch_size = self._batch_size preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs) # Fuse __init__ params and __call__ params without modifying the __init__ ones. preprocess_params = {**self._preprocess_params, **preprocess_params} forward_params = {**self._forward_params, **forward_params} postprocess_params = {**self._postprocess_params, **postprocess_params} self.call_count += 1 if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda": logger.warning_once( "You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a" " dataset", ) is_dataset = Dataset is not None and isinstance(inputs, Dataset) is_generator = isinstance(inputs, types.GeneratorType) is_list = isinstance(inputs, list) is_iterable = is_dataset or is_generator or is_list # TODO make the get_iterator work also for `tf` (and `flax`). can_use_iterator = self.framework == "pt" and (is_dataset or is_generator or is_list) if is_list: if can_use_iterator: final_iterator = self.get_iterator( inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) outputs = list(final_iterator) return outputs else: return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params) elif can_use_iterator: return self.get_iterator( inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) elif is_iterable: return self.iterate(inputs, preprocess_params, forward_params, postprocess_params) elif self.framework == "pt" and isinstance(self, ChunkPipeline): return next( iter( self.get_iterator( [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) ) ) else: return self.run_single(inputs, preprocess_params, forward_params, postprocess_params) def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params): return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs] def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): model_inputs = self.preprocess(inputs, **preprocess_params) model_outputs = self.forward(model_inputs, **forward_params) outputs = self.postprocess(model_outputs, **postprocess_params) return outputs def iterate(self, inputs, preprocess_params, forward_params, postprocess_params): # This function should become `get_iterator` again, this is a temporary # easy solution. for input_ in inputs: yield self.run_single(input_, preprocess_params, forward_params, postprocess_params) Pipeline.push_to_hub = copy_func(Pipeline.push_to_hub) if Pipeline.push_to_hub.__doc__ is not None: Pipeline.push_to_hub.__doc__ = Pipeline.push_to_hub.__doc__.format( object="pipe", object_class="pipeline", object_files="pipeline file" ).replace(".from_pretrained", "") class ChunkPipeline(Pipeline): def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): all_outputs = [] for model_inputs in self.preprocess(inputs, **preprocess_params): model_outputs = self.forward(model_inputs, **forward_params) all_outputs.append(model_outputs) outputs = self.postprocess(all_outputs, **postprocess_params) return outputs def get_iterator( self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params ): if "TOKENIZERS_PARALLELISM" not in os.environ: logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") os.environ["TOKENIZERS_PARALLELISM"] = "false" if num_workers > 1: logger.warning( "For ChunkPipeline using num_workers>0 is likely to result in errors since everything is iterable," " setting `num_workers=1` to guarantee correctness." ) num_workers = 1 dataset = PipelineChunkIterator(inputs, self.preprocess, preprocess_params) # TODO hack by collating feature_extractor and image_processor feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) model_iterator = PipelinePackIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) return final_iterator class PipelineRegistry: def __init__(self, supported_tasks: dict[str, Any], task_aliases: dict[str, str]) -> None: self.supported_tasks = supported_tasks self.task_aliases = task_aliases def get_supported_tasks(self) -> list[str]: supported_task = list(self.supported_tasks.keys()) + list(self.task_aliases.keys()) supported_task.sort() return supported_task def check_task(self, task: str) -> tuple[str, dict, Any]: if task in self.task_aliases: task = self.task_aliases[task] if task in self.supported_tasks: targeted_task = self.supported_tasks[task] return task, targeted_task, None if task.startswith("translation"): tokens = task.split("_") if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to": targeted_task = self.supported_tasks["translation"] task = "translation" return task, targeted_task, (tokens[1], tokens[3]) raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format") raise KeyError( f"Unknown task {task}, available tasks are {self.get_supported_tasks() + ['translation_XX_to_YY']}" ) def register_pipeline( self, task: str, pipeline_class: type, pt_model: Optional[Union[type, tuple[type]]] = None, tf_model: Optional[Union[type, tuple[type]]] = None, default: Optional[dict] = None, type: Optional[str] = None, ) -> None: if task in self.supported_tasks: logger.warning(f"{task} is already registered. Overwriting pipeline for task {task}...") if pt_model is None: pt_model = () elif not isinstance(pt_model, tuple): pt_model = (pt_model,) if tf_model is None: tf_model = () elif not isinstance(tf_model, tuple): tf_model = (tf_model,) task_impl = {"impl": pipeline_class, "pt": pt_model, "tf": tf_model} if default is not None: if "model" not in default and ("pt" in default or "tf" in default): default = {"model": default} task_impl["default"] = default if type is not None: task_impl["type"] = type self.supported_tasks[task] = task_impl pipeline_class._registered_impl = {task: task_impl} def to_dict(self): return self.supported_tasks
transformers/src/transformers/pipelines/base.py/0
{ "file_path": "transformers/src/transformers/pipelines/base.py", "repo_id": "transformers", "token_count": 28768 }
568
import enum import warnings from typing import Any, Union from ..generation import GenerationConfig from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import Pipeline, build_pipeline_init_args if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES logger = logging.get_logger(__name__) class ReturnType(enum.Enum): TENSORS = 0 TEXT = 1 @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class Text2TextGenerationPipeline(Pipeline): """ Pipeline for text to text generation using seq2seq models. Unless the model you're using explicitly sets these generation parameters in its configuration files (`generation_config.json`), the following default values will be used: - max_new_tokens: 256 - num_beams: 4 Example: ```python >>> from transformers import pipeline >>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap") >>> generator( ... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google" ... ) [{'generated_text': 'question: Who created the RuPERTa-base?'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about text generation parameters in [Text generation strategies](../generation_strategies) and [Text generation](text_generation). This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"text2text-generation"`. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Usage: ```python text2text_generator = pipeline("text2text-generation") text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything") ```""" _pipeline_calls_generate = True _load_processor = False _load_image_processor = False _load_feature_extractor = False _load_tokenizer = True # Make sure the docstring is updated when the default generation config is changed (in all pipelines in this file) _default_generation_config = GenerationConfig( max_new_tokens=256, num_beams=4, ) # Used in the return key of the pipeline. return_name = "generated" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) def _sanitize_parameters( self, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, truncation=None, stop_sequence=None, **generate_kwargs, ): preprocess_params = {} if truncation is not None: preprocess_params["truncation"] = truncation forward_params = generate_kwargs postprocess_params = {} if return_tensors is not None and return_type is None: return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) if len(stop_sequence_ids) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) generate_kwargs["eos_token_id"] = stop_sequence_ids[0] if self.assistant_model is not None: forward_params["assistant_model"] = self.assistant_model if self.assistant_tokenizer is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer return preprocess_params, forward_params, postprocess_params def check_inputs(self, input_length: int, min_length: int, max_length: int): """ Checks whether there might be something wrong with given input with regard to the model. """ return True def _parse_and_tokenize(self, *args, truncation): prefix = self.prefix if self.prefix is not None else "" if isinstance(args[0], list): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input") args = ([prefix + arg for arg in args[0]],) padding = True elif isinstance(args[0], str): args = (prefix + args[0],) padding = False else: raise TypeError( f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" ) inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__(self, *args: Union[str, list[str]], **kwargs: Any) -> list[dict[str, str]]: r""" Generate the output text(s) using text(s) given as inputs. Args: args (`str` or `list[str]`): Input text for the encoder. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`): The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE` (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's max_length instead of throwing an error down the line. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./text_generation)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. """ result = super().__call__(*args, **kwargs) if ( isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]) and all(len(res) == 1 for res in result) ): return [res[0] for res in result] return result def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs): inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs) return inputs def _forward(self, model_inputs, **generate_kwargs): if self.framework == "pt": in_b, input_length = model_inputs["input_ids"].shape elif self.framework == "tf": in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy() self.check_inputs( input_length, generate_kwargs.get("min_length", self.generation_config.min_length), generate_kwargs.get("max_length", self.generation_config.max_length), ) # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config output_ids = self.model.generate(**model_inputs, **generate_kwargs) out_b = output_ids.shape[0] if self.framework == "pt": output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:]) elif self.framework == "tf": output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:])) return {"output_ids": output_ids} def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False): records = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: record = {f"{self.return_name}_token_ids": output_ids} elif return_type == ReturnType.TEXT: record = { f"{self.return_name}_text": self.tokenizer.decode( output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) } records.append(record) return records @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class SummarizationPipeline(Text2TextGenerationPipeline): """ Summarize news articles and other documents. This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"summarization"`. The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, '*bart-large-cnn*', '*google-t5/t5-small*', '*google-t5/t5-base*', '*google-t5/t5-large*', '*google-t5/t5-3b*', '*google-t5/t5-11b*'. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Unless the model you're using explicitly sets these generation parameters in its configuration files (`generation_config.json`), the following default values will be used: - max_new_tokens: 256 - num_beams: 4 Usage: ```python # use bart in pytorch summarizer = pipeline("summarization") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) # use t5 in tf summarizer = pipeline("summarization", model="google-t5/t5-base", tokenizer="google-t5/t5-base", framework="tf") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) ```""" # Used in the return key of the pipeline. return_name = "summary" def __call__(self, *args, **kwargs): r""" Summarize the text(s) given as inputs. Args: documents (*str* or `list[str]`): One or several articles (or one list of articles) to summarize. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./text_generation)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input. - **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the summary. """ return super().__call__(*args, **kwargs) def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool: """ Checks whether there might be something wrong with given input with regard to the model. """ if max_length < min_length: logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.") if input_length < max_length: logger.warning( f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is " "a summarization task, where outputs shorter than the input are typically wanted, you might " f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length // 2})" ) @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class TranslationPipeline(Text2TextGenerationPipeline): """ Translates from one language to another. This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"translation_xx_to_yy"`. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Unless the model you're using explicitly sets these generation parameters in its configuration files (`generation_config.json`), the following default values will be used: - max_new_tokens: 256 - num_beams: 4 Usage: ```python en_fr_translator = pipeline("translation_en_to_fr") en_fr_translator("How old are you?") ```""" # Used in the return key of the pipeline. return_name = "translation" def check_inputs(self, input_length: int, min_length: int, max_length: int): if input_length > 0.9 * max_length: logger.warning( f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider " "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None): if getattr(self.tokenizer, "_build_translation_inputs", None): return self.tokenizer._build_translation_inputs( *args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang ) else: return super()._parse_and_tokenize(*args, truncation=truncation) def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs): preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs) if src_lang is not None: preprocess_params["src_lang"] = src_lang if tgt_lang is not None: preprocess_params["tgt_lang"] = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. task = kwargs.get("task", self.task) items = task.split("_") if task and len(items) == 4: # translation, XX, to YY preprocess_params["src_lang"] = items[1] preprocess_params["tgt_lang"] = items[3] return preprocess_params, forward_params, postprocess_params def __call__(self, *args, **kwargs): r""" Translate the text(s) given as inputs. Args: args (`str` or `list[str]`): Texts to be translated. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. src_lang (`str`, *optional*): The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models tgt_lang (`str`, *optional*): The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./text_generation)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **translation_text** (`str`, present when `return_text=True`) -- The translation. - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the translation. """ return super().__call__(*args, **kwargs)
transformers/src/transformers/pipelines/text2text_generation.py/0
{ "file_path": "transformers/src/transformers/pipelines/text2text_generation.py", "repo_id": "transformers", "token_count": 7499 }
569
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any, Optional, Union from ..utils import is_torch_available, logging from ..utils.quantization_config import QuantizationConfigMixin, QuantizationMethod from .quantizers_utils import get_module_from_name if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel if is_torch_available(): import torch from torch.nn import ModuleList else: ModuleList = str logger = logging.get_logger(__file__) class HfQuantizer(ABC): """ Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization. This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method yet. Attributes quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`): The quantization config that defines the quantization parameters of your model that you want to quantize. modules_to_not_convert (`list[str]`, *optional*): The list of module names to not convert when quantizing the model. required_packages (`list[str]`, *optional*): The list of required pip packages to install prior to using the quantizer requires_calibration (`bool`): Whether the quantization method requires to calibrate the model before using it. requires_parameters_quantization (`bool`): Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is required to create a new xxxParameter in order to properly quantize the model. """ requires_calibration = False required_packages = None requires_parameters_quantization = False def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): self.quantization_config = quantization_config # -- Handle extra kwargs below -- self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", []) self.pre_quantized = kwargs.pop("pre_quantized", True) if not self.pre_quantized and self.requires_calibration: raise ValueError( f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized." f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to " f"pass `pre_quantized=True` while knowing what you are doing." ) def update_torch_dtype(self, dtype: "torch.dtype") -> "torch.dtype": """ Deprecared in favor of `update_dtype`! Args: dtype (`torch.dtype`): The input dtype that is passed in `from_pretrained` """ logger.warning_once( "`update_torch_dtype` is deprecated in favor of `update_dtype`! It will be removed in version v4.57" ) return self.update_dtype(dtype) def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": """ Some quantization methods require to explicitly set the dtype of the model to a target dtype. You need to override this method in case you want to make sure that behavior is preserved Args: dtype (`torch.dtype`): The input dtype that is passed in `from_pretrained` """ return dtype def update_device_map(self, device_map: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]: """ Override this method if you want to pass a override the existing device map with a new one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is passed, the device_map is set to `"auto"`` Args: device_map (`Union[dict, str]`, *optional*): The device_map that is passed through the `from_pretrained` method. """ return device_map def adjust_target_dtype(self, dtype: "torch.dtype") -> "torch.dtype": """ Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained` to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype` to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`. Args: dtype (`torch.dtype`, *optional*): The dtype that is used to compute the device_map. """ return dtype def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]: """ Override this method if you want to adjust the `missing_keys`. Args: missing_keys (`list[str]`, *optional*): The list of missing keys in the checkpoint compared to the state dict of the model """ return missing_keys def update_unexpected_keys(self, model, unexpected_keys: list[str], prefix: str) -> list[str]: """ Override this method if you want to adjust the `unexpected_keys`. Args: unexpected_keys (`list[str]`, *optional*): The list of unexpected keys in the checkpoint compared to the state dict of the model """ return unexpected_keys def update_missing_keys_after_loading(self, model, missing_keys: list[str], prefix: str) -> list[str]: """ Override this method if you want to adjust the `missing_keys` after loading the model params, but before the model is post-processed. Args: missing_keys (`list[str]`, *optional*): The list of missing keys in the checkpoint compared to the state dict of the model """ return missing_keys def update_expected_keys(self, model, expected_keys: list[str], loaded_keys: list[str]) -> list[str]: """ Override this method if you want to adjust the `update_expected_keys`. Args: expected_keys (`list[str]`, *optional*): The list of the expected keys in the initialized model. loaded_keys (`list[str]`, *optional*): The list of the loaded keys in the checkpoint. """ return expected_keys def get_special_dtypes_update(self, model, dtype: "torch.dtype") -> dict[str, "torch.dtype"]: """ returns dtypes for modules that are not quantized - used for the computation of the device_map in case one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified in `_process_model_before_weight_loading`. Args: model (`~transformers.PreTrainedModel`): The model to quantize dtype (`torch.dtype`): The dtype passed in `from_pretrained` method. """ return { name: dtype for name, _ in model.named_parameters() if any(m in name for m in self.modules_to_not_convert) } def adjust_max_memory(self, max_memory: dict[str, Union[int, str]]) -> dict[str, Union[int, str]]: """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization""" return max_memory def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ) -> bool: """ checks if a loaded state_dict component is part of quantized param + some validation; only defined if requires_parameters_quantization == True for quantization methods that require to create a new parameters for quantization. """ return False def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter": """ takes needed components from state_dict and creates quantized param; only applicable if requires_parameters_quantization == True """ if not self.requires_parameters_quantization: raise AttributeError( f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}." ) def validate_environment(self, *args, **kwargs): """ This method is used to potentially check for potential conflicts with arguments that are passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers. If no explicit check are needed, simply return nothing. """ return def update_tp_plan(self, config): "updates the tp plan for the scales" return config def preprocess_model(self, model: "PreTrainedModel", **kwargs): """ Setting model attributes and/or converting model before weights loading. At this point the model should be initialized on the meta device so you can freely manipulate the skeleton of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`. Args: model (`~transformers.PreTrainedModel`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_before_weight_loading`. """ model.is_quantized = True model.quantization_method = self.quantization_config.quant_method if self.pre_quantized: self._convert_model_for_quantization(model) return self._process_model_before_weight_loading(model, **kwargs) def postprocess_model(self, model: "PreTrainedModel", **kwargs): """ Post-process the model post weights loading. Make sure to override the abstract method `_process_model_after_weight_loading`. Args: model (`~transformers.PreTrainedModel`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_after_weight_loading`. """ return self._process_model_after_weight_loading(model, **kwargs) def remove_quantization_config(self, model): """ Remove the quantization config from the model. """ if hasattr(model, "hf_quantizer"): del model.hf_quantizer if hasattr(model.config, "quantization_config"): del model.config.quantization_config if hasattr(model.config, "_pre_quantization_dtype"): del model.config._pre_quantization_dtype if hasattr(model, "quantization_method"): del model.quantization_method model.is_quantized = False def dequantize(self, model): """ Potentially dequantize the model to retrieve the original model, with some loss in accuracy / performance. Note not all quantization schemes support this. """ model = self._dequantize(model) # Delete quantizer and quantization config del model.hf_quantizer del model.config.quantization_config del model.config._pre_quantization_dtype del model.quantization_method model.is_quantized = False return model def get_cuda_warm_up_factor(self): """ The factor to be used in `caching_allocator_warmup` to get the number of bytes to pre-allocate to warm up cuda. A factor of 2 means we allocate all bytes in the empty model (since we allocate in fp16), a factor of 4 means we allocate half the memory of the weights residing in the empty model, etc... """ # By default we return 4, i.e. half the model size (this corresponds to the case where the model is not # really pre-processed, i.e. we do not have the info that weights are going to be 8 bits before actual # weight loading) return 4 def _dequantize(self, model): raise NotImplementedError( f"{self.quantization_config.quant_method} has no implementation of `dequantize`, please raise an issue on GitHub." ) def update_param_name(self, param_name: str) -> str: """ Override this method if you want to adjust the `param_name`. """ return param_name @staticmethod def get_modules_to_not_convert( model: "PreTrainedModel", skip_modules: Optional[list[str]] = None, keep_in_fp32_modules: Optional[list[str]] = None, add_default_skips: bool = False, ): from ..integrations import get_keys_to_not_convert if skip_modules is None or add_default_skips: modules_to_not_convert = get_keys_to_not_convert(model) else: modules_to_not_convert = [] if skip_modules is not None: modules_to_not_convert.extend(skip_modules) if keep_in_fp32_modules is not None: modules_to_not_convert.extend(keep_in_fp32_modules) return modules_to_not_convert @property def is_qat_trainable(self) -> bool: """Flag indicating whether the quantized model can carry out quantization aware training""" return False @property def is_compileable(self) -> bool: """Flag indicating whether the quantized model can be compiled""" return False @abstractmethod def _process_model_before_weight_loading(self, model, **kwargs): ... @abstractmethod def _process_model_after_weight_loading(self, model, **kwargs): ... @abstractmethod def is_serializable(self, safe_serialization=None): ... @property @abstractmethod def is_trainable(self): ... def _convert_model_for_quantization(self, model): from accelerate import init_empty_weights for name, module in model.named_modules(): module_class_name = module.__class__.__name__ if module_class_name in MODULES_TO_PATCH_FOR_QUANTIZATION and ( self.quantization_config.quant_method in MODULES_TO_PATCH_FOR_QUANTIZATION[module_class_name]["quantization_methods"] ): with init_empty_weights(): parent_module, name = get_module_from_name(model, name) parent_module._modules[name] = MODULES_TO_PATCH_FOR_QUANTIZATION[module_class_name]["module_name"]( model.config.get_text_config() ) class SequentialLlama4TextExperts(ModuleList): """ A module that implements a compressed version of a list of expert modules. This is specifically designed to work with Llama4TextExperts in MoE layers. """ def __init__(self, config): from transformers.models.llama4.modeling_llama4 import Llama4TextMLP super().__init__([Llama4TextMLP(config) for _ in range(config.num_local_experts)]) self.num_experts = config.num_local_experts def forward( self, hidden_states: "torch.Tensor", ) -> "torch.Tensor": hidden_states = hidden_states.reshape(self.num_experts, -1, hidden_states.shape[-1]) routed_out = torch.zeros_like(hidden_states) for expert_idx in range(self.num_experts): routed_out[expert_idx] = self[expert_idx](hidden_states[expert_idx]) return routed_out MODULES_TO_PATCH_FOR_QUANTIZATION = { "Llama4TextExperts": { "module_name": SequentialLlama4TextExperts, "quantization_methods": [ QuantizationMethod.COMPRESSED_TENSORS, QuantizationMethod.BITS_AND_BYTES, ], } }
transformers/src/transformers/quantizers/base.py/0
{ "file_path": "transformers/src/transformers/quantizers/base.py", "repo_id": "transformers", "token_count": 6401 }
570
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING, Any, Optional, Union from packaging import version from .base import HfQuantizer from .quantizers_utils import get_module_from_name if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import ( is_accelerate_available, is_optimum_quanto_available, is_torch_available, logging, ) from ..utils.quantization_config import QuantoConfig if is_torch_available(): import torch logger = logging.get_logger(__name__) class QuantoHfQuantizer(HfQuantizer): """ Quantizer for the quanto library """ required_packages = ["quanto", "accelerate"] requires_parameters_quantization = True requires_calibration = False def __init__(self, quantization_config: QuantoConfig, **kwargs): super().__init__(quantization_config, **kwargs) self.post_init() def post_init(self): r""" Safety checker """ if self.quantization_config.activations is not None and not self.pre_quantized: raise ValueError( "We don't support quantizing the activations with transformers library." "Use quanto library for more complex use cases such as activations quantization, calibration and quantization aware training." ) def validate_environment(self, *args, **kwargs): if not is_optimum_quanto_available(): raise ImportError( "Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)" ) if not is_accelerate_available(): raise ImportError( "Loading an optimum-quanto quantized model requires accelerate library (`pip install accelerate`)" ) def update_device_map(self, device_map): if device_map is None: device_map = {"": "cpu"} logger.info( "The device_map was not initialized. " "Setting device_map to {'':'cpu'}. " "If you want to use the model for inference, please set device_map ='auto'" ) return device_map def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": if dtype is None: logger.info("You did not specify `dtype` in `from_pretrained`. Setting it to `torch.float32`.") dtype = torch.float32 return dtype def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]: if is_optimum_quanto_available(): from optimum.quanto import QModuleMixin not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, QModuleMixin): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ) -> bool: """ Check if a parameter needs to be quantized. """ if is_optimum_quanto_available(): from optimum.quanto import QModuleMixin device_map = kwargs.get("device_map") param_device = kwargs.get("param_device") # we don't quantize the model if the module is going to be offloaded to the cpu if device_map is not None and param_device is not None: device_map_values = set(device_map.values()) if param_device == "cpu" and len(device_map_values) > 1: if not (device_map_values == {"cpu"} or device_map_values == {"cpu", "disk"}): return False module, tensor_name = get_module_from_name(model, param_name) # We only quantize the weights and the bias is not quantized. if isinstance(module, QModuleMixin) and "weight" in tensor_name: # if the weights are quantized, don't need to recreate it again with `create_quantized_param` return not module.frozen else: return False def adjust_max_memory(self, max_memory: dict[str, Union[int, str]]) -> dict[str, Union[int, str]]: max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", *args, **kwargs, ): """ Create the quantized parameter by calling .freeze() after setting it to the module. """ from accelerate.utils import set_module_tensor_to_device set_module_tensor_to_device(model, param_name, target_device, param_value) module, _ = get_module_from_name(model, param_name) module.freeze() module.weight.requires_grad = False def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.27.0"): from accelerate.utils import CustomDtype mapping = { "int8": torch.int8, "float8": CustomDtype.FP8, "int4": CustomDtype.INT4, "int2": CustomDtype.INT2, } target_dtype = mapping[self.quantization_config.weights] return target_dtype else: raise ValueError( "You are using `device_map='auto'` on an optimum-quanto quantized model. To automatically compute" " the appropriate device map, you should upgrade your `accelerate` library," "`pip install --upgrade accelerate` or install it from source." ) def _process_model_before_weight_loading( self, model: "PreTrainedModel", keep_in_fp32_modules: Optional[list[str]] = None, **kwargs ): from ..integrations import replace_with_quanto_layers self.modules_to_not_convert = self.get_modules_to_not_convert( model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules ) model, _ = replace_with_quanto_layers( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model, **kwargs): return model @property def is_trainable(self) -> bool: return True def is_serializable(self, safe_serialization=None): return False
transformers/src/transformers/quantizers/quantizer_quanto.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_quanto.py", "repo_id": "transformers", "token_count": 3187 }
571
# Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for fast tokenizers (provided by HuggingFace's tokenizers library). For slow (python) tokenizers see tokenization_utils.py """ import copy import json import os from collections import defaultdict from collections.abc import Iterable from typing import Any, Optional, Union import tokenizers.pre_tokenizers as pre_tokenizers_fast from tokenizers import Encoding as EncodingFast from tokenizers import Tokenizer as TokenizerFast from tokenizers.decoders import Decoder as DecoderFast from tokenizers.trainers import BpeTrainer, UnigramTrainer, WordLevelTrainer, WordPieceTrainer from .convert_slow_tokenizer import convert_slow_tokenizer from .integrations.ggml import convert_gguf_tokenizer from .modeling_gguf_pytorch_utils import load_gguf_checkpoint from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils_base import ( INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, SpecialTokensMixin, TextInput, TextInputPair, TruncationStrategy, ) from .utils import PaddingStrategy, add_end_docstrings, logging logger = logging.get_logger(__name__) # Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file TOKENIZER_FILE = "tokenizer.json" SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" TIKTOKEN_VOCAB_FILE = "tokenizer.model" # Slow tokenizers have an additional added tokens files ADDED_TOKENS_FILE = "added_tokens.json" INIT_TOKENIZER_DOCSTRING += """ tokenizer_object ([`tokenizers.Tokenizer`]): A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗 tokenizers](../fast_tokenizers) for more information. tokenizer_file ([`str`]): A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗 tokenizers. """ MODEL_TO_TRAINER_MAPPING = { "BPE": BpeTrainer, "Unigram": UnigramTrainer, "WordLevel": WordLevelTrainer, "WordPiece": WordPieceTrainer, } VOCAB_FILES_NAMES = {"tokenizer_file": TOKENIZER_FILE, "vocab_file": TIKTOKEN_VOCAB_FILE} @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizerFast(PreTrainedTokenizerBase): """ Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`]. Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary. This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class: PreTrainedTokenizer = None def __init__(self, *args, **kwargs): tokenizer_object = kwargs.pop("tokenizer_object", None) slow_tokenizer = kwargs.pop("__slow_tokenizer", None) gguf_file = kwargs.pop("gguf_file", None) fast_tokenizer_file = kwargs.pop("tokenizer_file", None) from_slow = kwargs.pop("from_slow", False) added_tokens_decoder = kwargs.pop("added_tokens_decoder", {}) self.add_prefix_space = kwargs.get("add_prefix_space", False) if from_slow and slow_tokenizer is None and self.slow_tokenizer_class is None: raise ValueError( "Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you " "have sentencepiece installed." ) if tokenizer_object is not None: fast_tokenizer = copy.deepcopy(tokenizer_object) elif fast_tokenizer_file is not None and not from_slow: # We have a serialization from tokenizers which let us directly build the backend fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file) elif slow_tokenizer: # We need to convert a slow tokenizer to build the backend fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) elif gguf_file is not None: # We need to convert a slow tokenizer to build the backend gguf_param = load_gguf_checkpoint(kwargs.get("vocab_file")) architecture = gguf_param["config"]["model_type"] tokenizer_dict = gguf_param["tokenizer"] tokenizer_config = gguf_param["tokenizer_config"] fast_tokenizer, additional_kwargs = convert_gguf_tokenizer(architecture, tokenizer_dict) kwargs.update(tokenizer_config) if len(additional_kwargs) > 0: kwargs.update(additional_kwargs) elif self.slow_tokenizer_class is not None and slow_tokenizer is not False: # We need to create and convert a slow tokenizer to build the backend slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs) fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) elif not slow_tokenizer: # We tried loading a slow_tokenizer with spm and failed, try to load with tiktoken self.vocab_file = kwargs.get("vocab_file") self.additional_special_tokens = kwargs.get("additional_special_tokens", []) fast_tokenizer = convert_slow_tokenizer(self, from_tiktoken=True) slow_tokenizer = None else: raise ValueError( "Couldn't instantiate the backend tokenizer from one of: \n" "(1) a `tokenizers` library serialization file, \n" "(2) a slow tokenizer instance to convert or \n" "(3) an equivalent slow tokenizer class to instantiate and convert. \n" "You need to have sentencepiece or tiktoken installed to convert a slow tokenizer to a fast one." ) self._tokenizer = fast_tokenizer if slow_tokenizer is not None: kwargs.update(slow_tokenizer.init_kwargs) self._decode_use_source_tokenizer = False _truncation = self._tokenizer.truncation if _truncation is not None: self._tokenizer.enable_truncation(**_truncation) kwargs.setdefault("max_length", _truncation["max_length"]) kwargs.setdefault("truncation_side", _truncation["direction"]) kwargs.setdefault("stride", _truncation["stride"]) kwargs.setdefault("truncation_strategy", _truncation["strategy"]) else: self._tokenizer.no_truncation() _padding = self._tokenizer.padding if _padding is not None: self._tokenizer.enable_padding(**_padding) kwargs.setdefault("pad_token", _padding["pad_token"]) kwargs.setdefault("pad_token_type_id", _padding["pad_type_id"]) kwargs.setdefault("padding_side", _padding["direction"]) kwargs.setdefault("max_length", _padding["length"]) kwargs.setdefault("pad_to_multiple_of", _padding["pad_to_multiple_of"]) # We call this after having initialized the backend tokenizer because we update it. super().__init__(**kwargs) self._tokenizer.encode_special_tokens = self.split_special_tokens added_tokens_decoder_hash = {hash(repr(token)) for token in self.added_tokens_decoder} tokens_to_add = [ token for index, token in sorted(added_tokens_decoder.items(), key=lambda x: x[0]) if hash(repr(token)) not in added_tokens_decoder_hash ] encoder = list(self.added_tokens_encoder.keys()) + [str(token) for token in tokens_to_add] # if some of the special tokens are strings, we check if we don't already have a token tokens_to_add += [ token for token in self.all_special_tokens_extended if token not in encoder and token not in tokens_to_add ] if len(tokens_to_add) > 0: tokens = [] special_tokens = self.all_special_tokens for token in tokens_to_add: is_special = ( (token.special or str(token) in special_tokens) if isinstance(token, AddedToken) else str(token) in special_tokens ) if isinstance(token, str): token = AddedToken(token, special=is_special) else: token.special = is_special tokens.append(token) if tokens: self.add_tokens(tokens) try: pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", self.add_prefix_space) != self.add_prefix_space: pre_tok_class = getattr(pre_tokenizers_fast, pre_tok_state.pop("type")) pre_tok_state["add_prefix_space"] = self.add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) except Exception: # We'll get an error if there is no pre_tokenizer, or if it's a custom pre_tokenizer that can # not be serialized. In those cases, we just ignore the error as there's no pre_tokenizer # for which we need to update the `add_prefix_space` attribute. pass @property def is_fast(self) -> bool: return True @property def can_save_slow_tokenizer(self) -> bool: """ `bool`: Whether or not the slow tokenizer can be saved. For a sentencepiece based slow tokenizer, this can only be `True` if the original `"sentencepiece.model"` was not deleted. """ if "vocab_file" in self.vocab_files_names and self.vocab_files_names["vocab_file"].endswith(".model"): if hasattr(self, "vocab_file") and self.vocab_file: # If the vocab file is a sentencepiece model, we can save it return os.path.isfile(self.vocab_file) return False else: return True @property def vocab_size(self) -> int: """ `int`: Size of the base vocabulary (without the added tokens). """ return self._tokenizer.get_vocab_size(with_added_tokens=False) def get_vocab(self) -> dict[str, int]: return self._tokenizer.get_vocab(with_added_tokens=True) @property def vocab(self) -> dict[str, int]: return self.get_vocab() @property def added_tokens_encoder(self) -> dict[str, int]: """ Returns the sorted mapping from string to index. The added tokens encoder is cached for performance optimisation in `self._added_tokens_encoder` for the slow tokenizers. """ return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} @property def added_tokens_decoder(self) -> dict[int, AddedToken]: """ Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. Returns: `dict[str, int]`: The added tokens. """ return self._tokenizer.get_added_tokens_decoder() def get_added_vocab(self) -> dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Returns: `dict[str, int]`: The added tokens. """ return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} def __bool__(self) -> bool: """ Returns True, to avoid expensive `assert tokenizer` gotchas. """ return True def __len__(self) -> int: """ Size of the full vocabulary with the added tokens. """ return self._tokenizer.get_vocab_size(with_added_tokens=True) @property def backend_tokenizer(self) -> TokenizerFast: """ `tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend. """ return self._tokenizer @property def decoder(self) -> DecoderFast: """ `tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer. """ return self._tokenizer.decoder def _convert_encoding( self, encoding: EncodingFast, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> tuple[dict[str, Any], list[EncodingFast]]: """ Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list of encodings, take care of building a batch from overflowing tokens. Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are lists (overflows) of lists (tokens). Output shape: (overflows, sequence length) """ if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_overflowing_tokens and encoding.overflowing is not None: encodings = [encoding] + encoding.overflowing else: encodings = [encoding] encoding_dict = defaultdict(list) for e in encodings: encoding_dict["input_ids"].append(e.ids) if return_token_type_ids: encoding_dict["token_type_ids"].append(e.type_ids) if return_attention_mask: encoding_dict["attention_mask"].append(e.attention_mask) if return_special_tokens_mask: encoding_dict["special_tokens_mask"].append(e.special_tokens_mask) if return_offsets_mapping: encoding_dict["offset_mapping"].append(e.offsets) if return_length: encoding_dict["length"].append(len(e.ids)) return encoding_dict, encodings def convert_tokens_to_ids(self, tokens: Union[str, Iterable[str]]) -> Union[int, list[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a Iterable of ids), using the vocabulary. Args: tokens (`str` or `Iterable[str]`): One or several token(s) to convert to token id(s). Returns: `int` or `list[int]`: The token id or list of token ids. """ if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) return [self._convert_token_to_id_with_added_voc(token) for token in tokens] def _convert_token_to_id_with_added_voc(self, token: str) -> int: index = self._tokenizer.token_to_id(token) if index is None: return self.unk_token_id return index def _convert_id_to_token(self, index: int) -> Optional[str]: return self._tokenizer.id_to_token(int(index)) def _add_tokens(self, new_tokens: list[Union[str, AddedToken]], special_tokens=False) -> int: if special_tokens: return self._tokenizer.add_special_tokens(new_tokens) return self._tokenizer.add_tokens(new_tokens) def num_special_tokens_to_add(self, pair: bool = False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. <Tip> This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. </Tip> Args: pair (`bool`, *optional*, defaults to `False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: `int`: Number of special tokens added to sequences. """ return self._tokenizer.num_special_tokens_to_add(pair) def convert_ids_to_tokens( self, ids: Union[int, list[int]], skip_special_tokens: bool = False ) -> Union[str, list[str]]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `list[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `list[str]`: The decoded token(s). """ if isinstance(ids, int): return self._tokenizer.id_to_token(ids) tokens = [] # self.all_special_ids is an @property which may be slow, so only compute it once before the loop ids_to_skip = set(self.all_special_ids) if skip_special_tokens else set() for index in ids: index = int(index) if index in ids_to_skip: continue tokens.append(self._tokenizer.id_to_token(index)) return tokens def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]: return self.encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens() def set_truncation_and_padding( self, padding_strategy: PaddingStrategy, truncation_strategy: TruncationStrategy, max_length: int, stride: int, pad_to_multiple_of: Optional[int], padding_side: Optional[str], ): """ Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards. The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section. Args: padding_strategy ([`~utils.PaddingStrategy`]): The kind of padding that will be applied to the input truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]): The kind of truncation that will be applied to the input max_length (`int`): The maximum size of a sequence. stride (`int`): The stride to use when handling overflow. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. """ _truncation = self._tokenizer.truncation _padding = self._tokenizer.padding # Set truncation and padding on the backend tokenizer if truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE: if _truncation is not None: self._tokenizer.no_truncation() else: target = { "max_length": max_length, "stride": stride, "strategy": truncation_strategy.value, "direction": self.truncation_side, } # _truncation might contain more keys that the target `transformers` # supports. Use only the target keys to trigger `enable_truncation`. # This should enable this code to works on various `tokenizers` # targets. if _truncation is None: current = None else: current = {k: _truncation.get(k, None) for k in target} if current != target: self._tokenizer.enable_truncation(**target) if padding_strategy == PaddingStrategy.DO_NOT_PAD: if _padding is not None: self._tokenizer.no_padding() else: length = max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None target = { "length": length, "direction": padding_side if padding_side is not None else self.padding_side, "pad_id": self.pad_token_id, "pad_token": self.pad_token, "pad_type_id": self.pad_token_type_id, "pad_to_multiple_of": pad_to_multiple_of, } if _padding != target: self._tokenizer.enable_padding(**target) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair] ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, (tuple, list)): raise TypeError( f"batch_text_or_text_pairs has to be a list or a tuple (got {type(batch_text_or_text_pairs)})" ) # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, ) if self._tokenizer.encode_special_tokens != split_special_tokens: self._tokenizer.encode_special_tokens = split_special_tokens encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=is_split_into_words, ) # Convert encoding to dict # `Tokens` has type: tuple[ # list[dict[str, list[list[int]]]] or list[dict[str, 2D-Tensor]], # list[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0]: stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: batched_input = [(text, text_pair)] if text_pair else [text] batched_output = self._batch_encode_plus( batched_input, is_split_into_words=is_split_into_words, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: (value[0] if len(value) > 0 and isinstance(value[0], list) else value) for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def convert_tokens_to_string(self, tokens: list[str]) -> str: return ( self.backend_tokenizer.decoder.decode(tokens) if self.backend_tokenizer.decoder is not None else " ".join(tokens) ) def _decode( self, token_ids: Union[int, list[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) if isinstance(token_ids, int): token_ids = [token_ids] text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def _save_pretrained( self, save_directory: Union[str, os.PathLike], file_names: tuple[str], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, ) -> tuple[str]: """ Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON file containing {config + vocab + added-tokens}. """ save_directory = str(save_directory) if self.slow_tokenizer_class is None and legacy_format is True: raise ValueError( "Your tokenizer does not have a legacy version defined and therefore cannot register this version. You" " might consider leaving the legacy_format at `None` or setting it to `False`." ) save_slow = ( (legacy_format is None or legacy_format is True) and self.slow_tokenizer_class is not None and self.can_save_slow_tokenizer ) save_fast = legacy_format is None or legacy_format is False if save_slow: added_tokens_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE ) # make sure to be forward compatible added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} if added_vocab: with open(added_tokens_file, "w", encoding="utf-8") as f: out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) file_names = file_names + vocab_files + (added_tokens_file,) if save_fast: tokenizer_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_FILE ) self.backend_tokenizer.save(tokenizer_file) file_names = file_names + (tokenizer_file,) return file_names def train_new_from_iterator( self, text_iterator, vocab_size, length=None, new_special_tokens=None, special_tokens_map=None, **kwargs, ): """ Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one. Args: text_iterator (generator of `list[str]`): The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory. vocab_size (`int`): The size of the vocabulary you want for your tokenizer. length (`int`, *optional*): The total number of sequences in the iterator. This is used to provide meaningful progress tracking new_special_tokens (list of `str` or `AddedToken`, *optional*): A list of new special tokens to add to the tokenizer you are training. special_tokens_map (`dict[str, str]`, *optional*): If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library. Returns: [`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on `text_iterator`. """ tokenizer_json = json.loads(self._tokenizer.to_str()) # Remove added tokens for now (uses IDs of tokens) added_tokens = tokenizer_json.pop("added_tokens") # Remove post processor for now (uses IDs of tokens) post_processor = tokenizer_json.pop("post_processor") unk_token = None # Remove vocab if tokenizer_json["model"]["type"] == "BPE": tokenizer_json["model"]["vocab"] = {} tokenizer_json["model"]["merges"] = [] elif tokenizer_json["model"]["type"] == "Unigram": if tokenizer_json["model"]["unk_id"] is not None: unk_id = tokenizer_json["model"]["unk_id"] unk_token = tokenizer_json["model"]["vocab"][unk_id][0] if special_tokens_map is not None and unk_token in special_tokens_map: unk_token = special_tokens_map[unk_token] tokenizer_json["model"]["unk_id"] = 0 tokenizer_json["model"]["vocab"] = [[unk_token, 0.0]] elif tokenizer_json["model"]["type"] in ["WordLevel", "WordPiece"]: tokenizer_json["model"]["vocab"] = {} else: raise ValueError( f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) " "only BPE, Unigram, WordLevel and WordPiece." ) if ( special_tokens_map is not None and "unk_token" in tokenizer_json["model"] and tokenizer_json["model"]["unk_token"] in special_tokens_map ): tokenizer_json["model"]["unk_token"] = special_tokens_map[tokenizer_json["model"]["unk_token"]] tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json)) # Get the special tokens from the current tokenizer if none are specified. special_tokens = [] for added_token in added_tokens: special = added_token.pop("special", None) _ = added_token.pop("id", None) if tokenizer_json["model"]["type"] != "Unigram" and not special: continue if special_tokens_map is not None and added_token["content"] in special_tokens_map: added_token["content"] = special_tokens_map[added_token["content"]] special_tokens.append(AddedToken(**added_token)) if new_special_tokens is not None: special_tokens.extend(new_special_tokens) # Trainer needs to know the end of word / continuing subword thingies in BPE if ( tokenizer_json["model"]["type"] == "BPE" and "continuing_subword_prefix" not in kwargs and tokenizer_json["model"]["continuing_subword_prefix"] is not None ): kwargs["continuing_subword_prefix"] = tokenizer_json["model"]["continuing_subword_prefix"] if ( tokenizer_json["model"]["type"] == "BPE" and "end_of_word_suffix" not in kwargs and tokenizer_json["model"]["end_of_word_suffix"] is not None ): kwargs["end_of_word_suffix"] = tokenizer_json["model"]["end_of_word_suffix"] if tokenizer_json["model"]["type"] == "Unigram" and unk_token is not None: kwargs["unk_token"] = unk_token if tokenizer_json["pre_tokenizer"] is not None: if ( tokenizer_json["pre_tokenizer"]["type"] == "ByteLevel" or tokenizer_json["pre_tokenizer"]["type"] == "Sequence" and "pretokenizers" in tokenizer_json["pre_tokenizer"] and any( pretokenizer["type"] == "ByteLevel" for pretokenizer in tokenizer_json["pre_tokenizer"]["pretokenizers"] ) ): kwargs["initial_alphabet"] = pre_tokenizers_fast.ByteLevel.alphabet() trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json["model"]["type"]] trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs) tokenizer.train_from_iterator(text_iterator, length=length, trainer=trainer) if post_processor is not None: trained_tokenizer_json = json.loads(tokenizer.to_str()) # Almost done, we just have to adjust the token IDs in the post processor if "special_tokens" in post_processor: for key in post_processor["special_tokens"]: tokens = post_processor["special_tokens"][key]["tokens"] if special_tokens_map is not None: tokens = [special_tokens_map.get(token, token) for token in tokens] post_processor["special_tokens"][key]["tokens"] = tokens for token in tokens: token_id = tokenizer.token_to_id(token) if token_id is None: raise ValueError( "Attempted to set a token in the post processor that does not exist in the mapping" ) post_processor["special_tokens"][key]["ids"] = [tokenizer.token_to_id(token) for token in tokens] for special_token in ["cls", "sep"]: if special_token in post_processor: token, _ = post_processor[special_token] if special_tokens_map is not None and token in special_tokens_map: token = special_tokens_map[token] token_id = tokenizer.token_to_id(token) if token_id is None: raise ValueError( "Attempted to set a token in the post processor that does not exist in the mapping" ) post_processor[special_token] = [token, token_id] trained_tokenizer_json["post_processor"] = post_processor tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json)) kwargs = self.init_kwargs.copy() # Map pad/cls/mask token at the Transformers level special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_list.remove("additional_special_tokens") for token in special_tokens_list: if getattr(self, token) is not None: special_token = getattr(self, token) if special_tokens_map is not None and special_token in special_tokens_map: special_token = special_tokens_map[special_token] special_token_full = self._special_tokens_map.get(token, None) if isinstance(special_token_full, AddedToken): # Create an added token with the same parameters except the content kwargs[token] = AddedToken( special_token, single_word=special_token_full.single_word, lstrip=special_token_full.lstrip, rstrip=special_token_full.rstrip, normalized=special_token_full.normalized, special=True, ) else: kwargs[token] = special_token additional_special_tokens = self.additional_special_tokens if new_special_tokens is not None: additional_special_tokens.extend(new_special_tokens) if len(additional_special_tokens) > 0: kwargs["additional_special_tokens"] = additional_special_tokens return self.__class__(tokenizer_object=tokenizer, **kwargs)
transformers/src/transformers/tokenization_utils_fast.py/0
{ "file_path": "transformers/src/transformers/tokenization_utils_fast.py", "repo_id": "transformers", "token_count": 18367 }
572
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings from functools import wraps from typing import Optional import packaging.version from .. import __version__ from . import ExplicitEnum, is_torch_available, is_torchdynamo_compiling # This is needed in case we deprecate a kwarg of a function/method being compiled if is_torch_available(): import torch # noqa: F401 class Action(ExplicitEnum): NONE = "none" NOTIFY = "notify" NOTIFY_ALWAYS = "notify_always" RAISE = "raise" def deprecate_kwarg( old_name: str, version: str, new_name: Optional[str] = None, warn_if_greater_or_equal_version: bool = False, raise_if_greater_or_equal_version: bool = False, raise_if_both_names: bool = False, additional_message: Optional[str] = None, ): """ Function or method decorator to notify users about deprecated keyword arguments, replacing them with a new name if specified. Note that is decorator is `torch.compile`-safe, i.e. it will not cause graph breaks (but no warning will be displayed if compiling). This decorator allows you to: - Notify users when a keyword argument is deprecated. - Automatically replace deprecated keyword arguments with new ones. - Raise an error if deprecated arguments are used, depending on the specified conditions. By default, the decorator notifies the user about the deprecated argument while the `transformers.__version__` < specified `version` in the decorator. To keep notifications with any version `warn_if_greater_or_equal_version=True` can be set. Parameters: old_name (`str`): Name of the deprecated keyword argument. version (`str`): The version in which the keyword argument was (or will be) deprecated. new_name (`Optional[str]`, *optional*): The new name for the deprecated keyword argument. If specified, the deprecated keyword argument will be replaced with this new name. warn_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`): Whether to show warning if current `transformers` version is greater or equal to the deprecated version. raise_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`): Whether to raise `ValueError` if current `transformers` version is greater or equal to the deprecated version. raise_if_both_names (`bool`, *optional*, defaults to `False`): Whether to raise `ValueError` if both deprecated and new keyword arguments are set. additional_message (`Optional[str]`, *optional*): An additional message to append to the default deprecation message. Raises: ValueError: If raise_if_greater_or_equal_version is True and the current version is greater than or equal to the deprecated version, or if raise_if_both_names is True and both old and new keyword arguments are provided. Returns: Callable: A wrapped function that handles the deprecated keyword arguments according to the specified parameters. Example usage with renaming argument: ```python @deprecate_kwarg("reduce_labels", new_name="do_reduce_labels", version="6.0.0") def my_function(do_reduce_labels): print(do_reduce_labels) my_function(reduce_labels=True) # Will show a deprecation warning and use do_reduce_labels=True ``` Example usage without renaming argument: ```python @deprecate_kwarg("max_size", version="6.0.0") def my_function(max_size): print(max_size) my_function(max_size=1333) # Will show a deprecation warning ``` """ deprecated_version = packaging.version.parse(version) current_version = packaging.version.parse(__version__) is_greater_or_equal_version = current_version >= deprecated_version if is_greater_or_equal_version: version_message = f"and removed starting from version {version}" else: version_message = f"and will be removed in version {version}" def wrapper(func): # Required for better warning message sig = inspect.signature(func) function_named_args = set(sig.parameters.keys()) is_instance_method = "self" in function_named_args is_class_method = "cls" in function_named_args @wraps(func) def wrapped_func(*args, **kwargs): # Get class + function name (just for better warning message) func_name = func.__name__ if is_instance_method: func_name = f"{args[0].__class__.__name__}.{func_name}" elif is_class_method: func_name = f"{args[0].__name__}.{func_name}" minimum_action = Action.NONE message = None # deprecated kwarg and its new version are set for function call -> replace it with new name if old_name in kwargs and new_name in kwargs: minimum_action = Action.RAISE if raise_if_both_names else Action.NOTIFY_ALWAYS message = f"Both `{old_name}` and `{new_name}` are set for `{func_name}`. Using `{new_name}={kwargs[new_name]}` and ignoring deprecated `{old_name}={kwargs[old_name]}`." kwargs.pop(old_name) # only deprecated kwarg is set for function call -> replace it with new name elif old_name in kwargs and new_name is not None and new_name not in kwargs: minimum_action = Action.NOTIFY message = f"`{old_name}` is deprecated {version_message} for `{func_name}`. Use `{new_name}` instead." kwargs[new_name] = kwargs.pop(old_name) # deprecated kwarg is not set for function call and new name is not specified -> just notify elif old_name in kwargs: minimum_action = Action.NOTIFY message = f"`{old_name}` is deprecated {version_message} for `{func_name}`." if message is not None and additional_message is not None: message = f"{message} {additional_message}" # update minimum_action if argument is ALREADY deprecated (current version >= deprecated version) if is_greater_or_equal_version: # change to (NOTIFY, NOTIFY_ALWAYS) -> RAISE if specified # in case we want to raise error for already deprecated arguments if raise_if_greater_or_equal_version and minimum_action != Action.NONE: minimum_action = Action.RAISE # change to NOTIFY -> NONE if specified (NOTIFY_ALWAYS can't be changed to NONE) # in case we want to ignore notifications for already deprecated arguments elif not warn_if_greater_or_equal_version and minimum_action == Action.NOTIFY: minimum_action = Action.NONE # raise error or notify user if minimum_action == Action.RAISE: raise ValueError(message) # If we are compiling, we do not raise the warning as it would break compilation elif minimum_action in (Action.NOTIFY, Action.NOTIFY_ALWAYS) and not is_torchdynamo_compiling(): # DeprecationWarning is ignored by default, so we use FutureWarning instead warnings.warn(message, FutureWarning, stacklevel=2) return func(*args, **kwargs) return wrapped_func return wrapper
transformers/src/transformers/utils/deprecation.py/0
{ "file_path": "transformers/src/transformers/utils/deprecation.py", "repo_id": "transformers", "token_count": 3013 }
573
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class BaseImageProcessorFast(metaclass=DummyObject): _backends = ["torchvision"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchvision"]) class BaseVideoProcessor(metaclass=DummyObject): _backends = ["torchvision"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchvision"])
transformers/src/transformers/utils/dummy_torchvision_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_torchvision_objects.py", "repo_id": "transformers", "token_count": 173 }
574
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import warnings from copy import deepcopy from typing import Any, Optional, Union import numpy as np from .dynamic_module_utils import custom_object_save from .image_processing_utils import ( BatchFeature, get_size_dict, ) from .image_processing_utils_fast import BaseImageProcessorFast from .image_utils import ( ChannelDimension, SizeDict, validate_kwargs, ) from .processing_utils import Unpack, VideosKwargs from .utils import ( VIDEO_PROCESSOR_NAME, TensorType, add_start_docstrings, cached_file, copy_func, download_url, is_offline_mode, is_remote_url, is_torch_available, is_torchvision_available, is_torchvision_v2_available, is_vision_available, logging, ) from .utils.import_utils import requires from .video_utils import ( VideoInput, VideoMetadata, group_videos_by_shape, load_video, make_batched_videos, reorder_videos, to_channel_dimension_format, ) if is_vision_available(): from .image_utils import PILImageResampling if is_torch_available(): import torch if is_torchvision_available(): from .image_utils import pil_torch_interpolation_mapping if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F logger = logging.get_logger(__name__) BASE_VIDEO_PROCESSOR_DOCSTRING = r""" Args: do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the video's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `self.size`): Size of the output video after resizing. Can be overridden by the `size` parameter in the `preprocess` method. size_divisor (`int`, *optional*, defaults to `self.size_divisor`): The size by which to make sure both the height and width can be divided. default_to_square (`bool`, *optional*, defaults to `self.default_to_square`): Whether to default to a square video when resizing, if size is an int. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the video to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. do_pad (`bool`, *optional*): Whether to pad the video to the `(max_height, max_width)` of the videos in the batch. crop_size (`dict[str, int]` *optional*, defaults to `self.crop_size`): Size of the output video after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the video by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): Scale factor to use if rescaling the video. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the video. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Mean to use if normalizing the video. This is a float or list of floats the length of the number of channels in the video. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Standard deviation to use if normalizing the video. This is a float or list of floats the length of the number of channels in the video. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `self.image_std`): Whether to convert the video to RGB. video_metadata (`VideoMetadata`, *optional*): Metadata of the video containing information about total duration, fps and total number of frames. do_sample_frames (`int`, *optional*, defaults to `self.do_sample_frames`): Whether to sample frames from the video before processing or to process the whole video. num_frames (`int`, *optional*, defaults to `self.num_frames`): Maximum number of frames to sample when `do_sample_frames=True`. fps (`int` or `float`, *optional*, defaults to `self.fps`): Target frames to sample per second when `do_sample_frames=True`. return_tensors (`str` or `TensorType`, *optional*): Returns stacked tensors if set to `pt, otherwise returns a list of tensors. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output video. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input video. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input video. If unset, the channel dimension format is inferred from the input video. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: video in (height, width) format. device (`torch.device`, *optional*): The device to process the videos on. If unset, the device is inferred from the input videos.""" @add_start_docstrings( "Constructs a base VideoProcessor.", BASE_VIDEO_PROCESSOR_DOCSTRING, ) @requires(backends=("vision", "torchvision")) class BaseVideoProcessor(BaseImageProcessorFast): _auto_class = None resample = None image_mean = None image_std = None size = None size_divisor = None default_to_square = True crop_size = None do_resize = None do_center_crop = None do_pad = None do_rescale = None rescale_factor = 1 / 255 do_normalize = None do_convert_rgb = None do_sample_frames = None fps = None num_frames = None video_metadata = None valid_kwargs = VideosKwargs model_input_names = ["pixel_values_videos"] def __init__(self, **kwargs: Unpack[VideosKwargs]) -> None: super().__init__() self._processor_class = kwargs.pop("processor_class", None) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err # Prepare size related keys and turn then into `SizeDict` size = kwargs.pop("size", self.size) self.size = ( get_size_dict(size=size, default_to_square=kwargs.pop("default_to_square", self.default_to_square)) if size is not None else None ) crop_size = kwargs.pop("crop_size", self.crop_size) self.crop_size = get_size_dict(crop_size, param_name="crop_size") if crop_size is not None else None # Save valid kwargs in a list for further processing self.model_valid_processing_keys = list(self.valid_kwargs.__annotations__.keys()) for key in self.model_valid_processing_keys: if kwargs.get(key) is not None: setattr(self, key, kwargs[key]) else: setattr(self, key, deepcopy(getattr(self, key, None))) def __call__(self, videos, **kwargs) -> BatchFeature: return self.preprocess(videos, **kwargs) def convert_to_rgb( self, video: "torch.Tensor", ) -> VideoInput: """ Converts a video to RGB format. Args: video (`"torch.Tensor"`): The video to convert. Returns: `torch.Tensor`: The converted video. """ video = F.grayscale_to_rgb(video) if video.shape[-3] == 3 or not (video[..., 3, :, :] < 255).any(): return video # There is a transparency layer, blend it with a white background. # Calculate the alpha proportion for blending. alpha = video[..., 3, :, :] / 255.0 video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., :3, :, :] return video def sample_frames( self, video: "torch.Tensor", metadata: Optional[Union[VideoMetadata, dict]] = None, num_frames: Optional[int] = None, fps: Optional[Union[int, float]] = None, ): """ Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames. If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames` and `fps` are mutually exclusive. Args: video (`torch.Tensor`): Video that need to be sampled. metadata (`VideoMetadata`, *optional*): Metadata of the video containing information about total duration, fps and total number of frames. num_frames (`int`, *optional*): Maximum number of frames to sample. Defaults to `self.num_frames`. fps (`int` or `float`, *optional*): Target frames to sample per second. Defaults to `self.fps`. Returns: torch.Tensor: Sampled video frames. """ if fps is not None and num_frames is not None: raise ValueError( "`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!" ) num_frames = num_frames if num_frames is not None else self.num_frames fps = fps if fps is not None else self.fps total_num_frames = video.shape[0] # If num_frames is not given but fps is, calculate num_frames from fps if num_frames is None and fps is not None: if metadata is None: raise ValueError( "Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. " "Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video" ) num_frames = int(total_num_frames / metadata["fps"] * fps) if num_frames > total_num_frames: raise ValueError( f"Video can't be sampled. The `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. " ) if num_frames is not None: indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int() else: indices = torch.arange(0, total_num_frames).int() video = video[indices].contiguous() return video def _prepare_input_videos( self, videos: VideoInput, video_metadata: VideoMetadata = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> list["torch.Tensor"]: """ Prepare the input videos for processing. """ videos = make_batched_videos(videos) if video_metadata is not None: batch_metadata = [metadata for batch_list in video_metadata for metadata in batch_list] else: batch_metadata = [None] * len(videos) processed_videos = [] for video in videos: # `make_batched_videos` always returns a 4D array per video if isinstance(video, np.ndarray): video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_data_format) # not using F.to_tensor as it doesn't handle (C, H, W) numpy arrays video = torch.from_numpy(video).contiguous() processed_videos.append(video) return processed_videos, batch_metadata @add_start_docstrings(BASE_VIDEO_PROCESSOR_DOCSTRING) def preprocess( self, videos: VideoInput, **kwargs: Unpack[VideosKwargs], ) -> BatchFeature: validate_kwargs( captured_kwargs=kwargs.keys(), valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) + ["return_tensors"], ) # Set default kwargs from self. This ensures that if a kwarg is not provided # by the user, it gets its default value from the instance, or is set to None. for kwarg_name in self.valid_kwargs.__annotations__: kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None)) input_data_format = kwargs.pop("input_data_format") video_metadata = kwargs.pop("video_metadata") videos, video_metadata = self._prepare_input_videos( videos=videos, video_metadata=video_metadata, input_data_format=input_data_format ) kwargs = self._further_process_kwargs(**kwargs) self._validate_preprocess_kwargs(**kwargs) # torch resize uses interpolation instead of resample resample = kwargs.pop("resample") kwargs["interpolation"] = ( pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample ) # Pop kwargs that are not needed in _preprocess kwargs.pop("default_to_square") kwargs.pop("data_format") return self._preprocess(videos=videos, video_metadata=video_metadata, **kwargs) def _preprocess( self, videos: list["torch.Tensor"], video_metadata: Union[list[VideoMetadata], list[dict]], do_convert_rgb: bool, do_resize: bool, size: SizeDict, size_divisor: Optional[int], interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, do_pad: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_sample_frames: Optional[bool] = None, fps: Optional[Union[int, float]] = None, num_frames: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, device: Optional["torch.Tensor"] = None, ) -> BatchFeature: if do_sample_frames: # Sample video frames videos = [ self.sample_frames(video, metadata=metadata, num_frames=num_frames, fps=fps) for video, metadata in zip(videos, video_metadata) ] # We need to sample frames first before moving to device, if `do_sample_frames=True`. Otherwise # moving the whole video incurs high GPU mem usage for long videos if device is not None: videos = [video.to(device) for video in videos] # Group videos by size for batched resizing grouped_videos, grouped_videos_index = group_videos_by_shape(videos) resized_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): if do_convert_rgb: stacked_videos = self.convert_to_rgb(stacked_videos) if do_resize: stacked_videos = self.resize( stacked_videos, size=size, size_divisor=size_divisor, interpolation=interpolation ) resized_videos_grouped[shape] = stacked_videos resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index) # Group videos by size for further processing # Needed in case do_resize is False, or resize returns videos with different sizes grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos) processed_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): if do_center_crop: stacked_videos = self.center_crop(stacked_videos, crop_size) # Fused rescale and normalize stacked_videos = self.rescale_and_normalize( stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_videos_grouped[shape] = stacked_videos processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index) processed_videos = torch.stack(processed_videos, dim=0) if return_tensors else processed_videos return BatchFeature(data={"pixel_values_videos": processed_videos}, tensor_type=return_tensors) @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ): r""" Instantiate a type of [`~video_processing_utils.VideoProcessorBase`] from an video processor. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained video hosted inside a model repo on huggingface.co. - a path to a *directory* containing a video processor file saved using the [`~video_processing_utils.VideoProcessorBase.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved video processor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model video processor should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the video processor files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final video processor object. If `True`, then this functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of `kwargs` which has not been used to update `video_processor` and is otherwise ignored. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. kwargs (`dict[str, Any]`, *optional*): The values in kwargs of any keys which are video processor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is controlled by the `return_unused_kwargs` keyword parameter. Returns: A video processor of type [`~video_processing_utils.ImagVideoProcessorBase`]. Examples: ```python # We can't instantiate directly the base class *VideoProcessorBase* so let's show the examples on a # derived class: *LlavaOnevisionVideoProcessor* video_processor = LlavaOnevisionVideoProcessor.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf" ) # Download video_processing_config from huggingface.co and cache. video_processor = LlavaOnevisionVideoProcessor.from_pretrained( "./test/saved_model/" ) # E.g. video processor (or model) was saved using *save_pretrained('./test/saved_model/')* video_processor = LlavaOnevisionVideoProcessor.from_pretrained("./test/saved_model/preprocessor_config.json") video_processor = LlavaOnevisionVideoProcessor.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False ) assert video_processor.do_normalize is False video_processor, unused_kwargs = LlavaOnevisionVideoProcessor.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False, return_unused_kwargs=True ) assert video_processor.do_normalize is False assert unused_kwargs == {"foo": False} ```""" kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token video_processor_dict, kwargs = cls.get_video_processor_dict(pretrained_model_name_or_path, **kwargs) return cls.from_dict(video_processor_dict, **kwargs) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the [`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the video processor JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token") is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self) # If we save using the predefined names, we can load using `from_pretrained` output_video_processor_file = os.path.join(save_directory, VIDEO_PROCESSOR_NAME) self.to_json_file(output_video_processor_file) logger.info(f"Video processor saved in {output_video_processor_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) return [output_video_processor_file] @classmethod def get_video_processor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> tuple[dict[str, Any], dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a video processor of type [`~video_processing_utils.VideoProcessorBase`] using `from_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. Returns: `tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the video processor object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) use_auth_token = kwargs.pop("use_auth_token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", "") from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token user_agent = {"file_type": "video processor", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): resolved_video_processor_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): video_processor_file = pretrained_model_name_or_path resolved_video_processor_file = download_url(pretrained_model_name_or_path) else: try: # Try to load with a new config name first and if not successful try with # the old file name. In case we can load with old name only, raise a deprecation warning # Deprecated until v5.0 video_processor_file = VIDEO_PROCESSOR_NAME resolved_video_processor_file = cached_file( pretrained_model_name_or_path, video_processor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, ) except OSError: video_processor_file = "preprocessor_config.json" resolved_video_processor_file = cached_file( pretrained_model_name_or_path, video_processor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, ) logger.warning_once( "You have video processor config saved in `preprocessor.json` file which is deprecated. " "Video processor configs should be saved in their own `video_preprocessor.json` file. You can rename " "the file or load and save the processor back which renames it automatically. " "Loading from `preprocessor.json` will be removed in v5.0." ) except OSError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise OSError( f"Can't load video processor for '{pretrained_model_name_or_path}'. If you were trying to load" " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a {VIDEO_PROCESSOR_NAME} file" ) try: # Load video_processor dict with open(resolved_video_processor_file, "r", encoding="utf-8") as reader: text = reader.read() video_processor_dict = json.loads(text) except json.JSONDecodeError: raise OSError( f"It looks like the config file at '{resolved_video_processor_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_video_processor_file}") else: logger.info( f"loading configuration file {video_processor_file} from cache at {resolved_video_processor_file}" ) return video_processor_dict, kwargs @classmethod def from_dict(cls, video_processor_dict: dict[str, Any], **kwargs): """ Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters. Args: video_processor_dict (`dict[str, Any]`): Dictionary that will be used to instantiate the video processor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~video_processing_utils.VideoProcessorBase.to_dict`] method. kwargs (`dict[str, Any]`): Additional parameters from which to initialize the video processor object. Returns: [`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those parameters. """ video_processor_dict = video_processor_dict.copy() return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # The `size` parameter is a dict and was previously an int or tuple in feature extractors. # We set `size` here directly to the `video_processor_dict` so that it is converted to the appropriate # dict within the video processor and isn't overwritten if `size` is passed in as a kwarg. if "size" in kwargs and "size" in video_processor_dict: video_processor_dict["size"] = kwargs.pop("size") if "crop_size" in kwargs and "crop_size" in video_processor_dict: video_processor_dict["crop_size"] = kwargs.pop("crop_size") video_processor = cls(**video_processor_dict) # Update video_processor with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(video_processor, key): setattr(video_processor, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f"Video processor {video_processor}") if return_unused_kwargs: return video_processor, kwargs else: return video_processor def to_dict(self) -> dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this video processor instance. """ output = deepcopy(self.__dict__) output.pop("model_valid_processing_keys", None) output.pop("_valid_kwargs_names", None) output["video_processor_type"] = self.__class__.__name__ return output def to_json_string(self) -> str: """ Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. """ dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this image_processor instance's parameters will be saved. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" @classmethod def from_json_file(cls, json_file: Union[str, os.PathLike]): """ Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters. Returns: A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object instantiated from that JSON file. """ with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() video_processor_dict = json.loads(text) return cls(**video_processor_dict) @classmethod def register_for_auto_class(cls, auto_class="AutoVideoProcessor"): """ Register this class with a given auto class. This should only be used for custom video processors as the ones in the library are already mapped with `AutoVideoProcessor `. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoVideoProcessor "`): The auto class to register this new video processor with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class def fetch_videos(self, video_url_or_urls: Union[str, list[str]]): """ Convert a single or a list of urls into the corresponding `np.array` objects. If a single url is passed, the return value will be a single object. If a list is passed a list of objects is returned. """ if isinstance(video_url_or_urls, list): return [self.fetch_videos(x) for x in video_url_or_urls] elif isinstance(video_url_or_urls, str): return load_video(video_url_or_urls) else: raise TypeError(f"only a single or a list of entries is supported but got type={type(video_url_or_urls)}") BaseVideoProcessor.push_to_hub = copy_func(BaseVideoProcessor.push_to_hub) if BaseVideoProcessor.push_to_hub.__doc__ is not None: BaseVideoProcessor.push_to_hub.__doc__ = BaseVideoProcessor.push_to_hub.__doc__.format( object="video processor", object_class="AutoVideoProcessor", object_files="video processor file" )
transformers/src/transformers/video_processing_utils.py/0
{ "file_path": "transformers/src/transformers/video_processing_utils.py", "repo_id": "transformers", "token_count": 17113 }
575
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from unittest.mock import patch import transformers.commands.transformers_cli as cli from transformers.commands.chat import ChatArguments, ChatCommand from transformers.testing_utils import CaptureStd class ChatCLITest(unittest.TestCase): def test_help(self): with patch("sys.argv", ["transformers", "chat", "--help"]), CaptureStd() as cs: with self.assertRaises(SystemExit): cli.main() self.assertIn("chat interface", cs.out.lower()) @patch.object(ChatCommand, "run") def test_cli_dispatch_model(self, run_mock): """ Running transformers chat with just a model should work & spawn a serve underneath """ args = ["transformers", "chat", "hf-internal-testing/tiny-random-gpt2"] with patch("sys.argv", args): cli.main() run_mock.assert_called_once() def test_cli_dispatch_url(self): """ Running transformers chat with just a URL should not work as a model should additionally be specified """ args = ["transformers", "chat", "localhost:8000"] with self.assertRaises(ValueError): with patch("sys.argv", args): cli.main() @patch.object(ChatCommand, "run") def test_cli_dispatch_url_and_model(self, run_mock): """ Running transformers chat with a URL and a model should work """ args = ["transformers", "chat", "localhost:8000", "--model_name_or_path=hf-internal-testing/tiny-random-gpt2"] with patch("sys.argv", args): cli.main() run_mock.assert_called_once() def test_parsed_args(self): with ( patch.object(ChatCommand, "__init__", return_value=None) as init_mock, patch.object(ChatCommand, "run") as run_mock, patch( "sys.argv", [ "transformers", "chat", "test-model", "max_new_tokens=64", ], ), ): cli.main() init_mock.assert_called_once() run_mock.assert_called_once() parsed_args = init_mock.call_args[0][0] self.assertEqual(parsed_args.model_name_or_path_or_address, "test-model") self.assertEqual(parsed_args.generate_flags, ["max_new_tokens=64"]) class ChatUtilitiesTest(unittest.TestCase): def test_save_and_clear_chat(self): tmp_path = tempfile.mkdtemp() args = ChatArguments(save_folder=str(tmp_path)) args.model_name_or_path_or_address = "test-model" chat_history = [{"role": "user", "content": "hi"}] filename = ChatCommand.save_chat(chat_history, args) self.assertTrue(os.path.isfile(filename)) cleared = ChatCommand.clear_chat_history() self.assertEqual(cleared, []) def test_parse_generate_flags(self): dummy = ChatCommand.__new__(ChatCommand) parsed = ChatCommand.parse_generate_flags(dummy, ["temperature=0.5", "max_new_tokens=10"]) self.assertEqual(parsed["temperature"], 0.5) self.assertEqual(parsed["max_new_tokens"], 10)
transformers/tests/commands/test_chat.py/0
{ "file_path": "transformers/tests/commands/test_chat.py", "repo_id": "transformers", "token_count": 1615 }
576
# Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import datetime import gc import inspect import random import tempfile import unittest import warnings from pathlib import Path import numpy as np import pytest from packaging import version from parameterized import parameterized from transformers import ( AutoConfig, AutoProcessor, AutoTokenizer, PreTrainedModel, is_torch_available, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_flaky, require_accelerate, require_flash_attn, require_flash_attn_3, require_optimum_quanto, require_read_token, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_greater_or_equal, require_torch_multi_accelerator, set_config_for_less_flaky_test, set_model_for_less_flaky_test, set_model_tester_for_less_flaky_test, slow, torch_device, ) from transformers.utils import is_ipex_available, is_torchdynamo_exporting if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( AutoModelForCausalLM, AutoModelForImageTextToText, AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, SpeechEncoderDecoderModel, T5ForConditionalGeneration, ) from transformers.cache_utils import ( Cache, DynamicCache, EncoderDecoderCache, QuantoQuantizedLayer, StaticCache, ) from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, CompileConfig, DisjunctiveConstraint, GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput, GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput, GenerationConfig, GenerationMixin, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, PhrasalConstraint, PromptLookupCandidateGenerator, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, SynthIDTextWatermarkingConfig, WatermarkDetector, WatermarkingConfig, ) from transformers.generation.candidate_generator import ( AssistedCandidateGenerator, AssistedCandidateGeneratorDifferentTokenizers, ) from transformers.generation.utils import _speculative_sampling from unittest.mock import patch from transformers.utils import is_sklearn_available class GenerationTesterMixin: input_name = "input_ids" model_tester = None max_new_tokens = 3 def prepare_config_and_inputs_for_generate(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # We don't want a few model inputs in our model input dictionary for generation tests input_keys_to_ignore = [ # we don't want to mask attention heads "head_mask", "decoder_head_mask", "cross_attn_head_mask", # we don't want encoder-decoder models to start from filled decoder ids "decoder_input_ids", "decoder_attention_mask", # we'll set cache use in each test differently "use_cache", # Ignore labels if it is in the input dict "labels", # model-specific exceptions should overload/overwrite this function ] filtered_inputs_dict = { k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v for k, v in inputs_dict.items() if k not in input_keys_to_ignore } # It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks) text_gen_config = config.get_text_config(decoder=True) if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None: text_gen_config.pad_token_id = ( text_gen_config.eos_token_id if isinstance(text_gen_config.eos_token_id, int) else text_gen_config.eos_token_id[0] ) text_gen_config.eos_token_id = None text_gen_config.forced_eos_token_id = None return config, filtered_inputs_dict def _get_logits_processor_kwargs(self, do_sample=False, config=None): logits_processor_kwargs = { "bad_words_ids": [[1, 0]], "repetition_penalty": 1.2, "remove_invalid_values": True, } if do_sample: logits_processor_kwargs.update( { "top_k": 10, "top_p": 0.7, "temperature": 0.7, } ) # TODO (joao, raushan): see this comment for a long-term fix # https://github.com/huggingface/transformers/pull/33593#issuecomment-2361824264) # This is a band-aid for VLM models, to ensure they don't generate image/video tokens which would cause them # to crash. On pretrained models this isn't a risk, as they are trained to not generate these tokens. if config is not None: for key in [ "image_token_id", "video_token_id", "audio_token_id", "vision_start_token_id", "audio_start_token_id", "audio_end_token_id", "vision_end_token_id", ]: token_index = getattr(config, key, None) if token_index is None and hasattr(self, "model_tester"): token_index = getattr(self.model_tester, key, None) if token_index is not None and token_index < config.get_text_config().vocab_size: logits_processor_kwargs["bad_words_ids"].append([token_index]) return logits_processor_kwargs def _get_beam_kwargs(self, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } return beam_kwargs def _get_diverse_beam_kwargs(self, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } return beam_kwargs def _get_constrained_beam_kwargs(self, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } return beam_kwargs def _greedy_generate( self, model, inputs_dict, output_scores=False, output_logits=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, use_cache=True, ): logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config) output_generate = model.generate( do_sample=False, num_beams=1, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, output_logits=output_logits, return_dict_in_generate=return_dict_in_generate, use_cache=use_cache, **logits_processor_kwargs, **inputs_dict, ) return output_generate def _sample_generate( self, model, inputs_dict, num_return_sequences, output_scores=False, output_logits=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, use_cache=True, ): torch.manual_seed(0) logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=True, config=model.config) output_generate = model.generate( do_sample=True, num_beams=1, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, num_return_sequences=num_return_sequences, output_scores=output_scores, output_logits=output_logits, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, use_cache=use_cache, **logits_processor_kwargs, **inputs_dict, ) return output_generate def _beam_search_generate( self, model, inputs_dict, beam_kwargs, output_scores=False, output_logits=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, use_cache=True, ): logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config) output_generate = model.generate( do_sample=False, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_scores=output_scores, output_logits=output_logits, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, use_cache=use_cache, **beam_kwargs, **logits_processor_kwargs, **inputs_dict, ) return output_generate def _beam_sample_generate( self, model, inputs_dict, beam_kwargs, output_scores=False, output_logits=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, use_cache=True, ): torch.manual_seed(0) logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=True, config=model.config) output_generate = model.generate( do_sample=True, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_scores=output_scores, output_logits=output_logits, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, use_cache=use_cache, **beam_kwargs, **logits_processor_kwargs, **inputs_dict, ) return output_generate def _group_beam_search_generate( self, model, inputs_dict, beam_kwargs, output_scores=False, output_logits=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, use_cache=True, ): logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config) output_generate = model.generate( do_sample=False, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_scores=output_scores, output_logits=output_logits, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, use_cache=use_cache, **beam_kwargs, **logits_processor_kwargs, **inputs_dict, ) return output_generate def _constrained_beam_search_generate( self, model, inputs_dict, constraints, beam_kwargs, output_scores=False, output_logits=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, use_cache=True, ): logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config) output_generate = model.generate( do_sample=False, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_scores=output_scores, output_logits=output_logits, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, constraints=constraints, use_cache=use_cache, **beam_kwargs, **logits_processor_kwargs, **inputs_dict, ) return output_generate def _contrastive_generate( self, model, inputs_dict, output_scores=False, output_logits=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, use_cache=True, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config) output_generate = model.generate( do_sample=False, num_beams=1, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, output_logits=output_logits, return_dict_in_generate=return_dict_in_generate, use_cache=use_cache, **logits_processor_kwargs, **contrastive_search_kwargs, **inputs_dict, ) return output_generate @pytest.mark.generate def test_greedy_generate(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() model = model_class(config).to(torch_device).eval() output_generate = self._greedy_generate(model=model, inputs_dict=inputs_dict) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) @pytest.mark.generate def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() output_generate = self._greedy_generate( model=model, inputs_dict=inputs_dict, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=False, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self._check_generate_outputs(output_generate, model.config) @pytest.mark.generate def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") if any(model_name in model_class.__name__.lower() for model_name in ["rwkv"]): self.skipTest(reason="Won't fix: model with non-standard dictionary output shapes") config.is_decoder = True model = model_class(config).to(torch_device).eval() output_generate = self._greedy_generate( model=model, inputs_dict=inputs_dict, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=True, # Enable cache ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self._check_generate_outputs(output_generate, model.config, use_cache=True) @pytest.mark.generate def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() model = model_class(config).to(torch_device).eval() output_generate = self._sample_generate(model=model, inputs_dict=inputs_dict, num_return_sequences=1) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) @pytest.mark.generate def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() output_generate = self._sample_generate( model=model, inputs_dict=inputs_dict, num_return_sequences=2, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=False, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self._check_generate_outputs(output_generate, model.config, num_return_sequences=2) @pytest.mark.generate def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() model = model_class(config).to(torch_device).eval() beam_kwargs = self._get_beam_kwargs() output_generate = self._beam_search_generate(model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) @pytest.mark.generate def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() beam_kwargs = self._get_beam_kwargs() output_generate = self._beam_search_generate( model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=False, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self._check_generate_outputs( output_generate, model.config, num_return_sequences=beam_kwargs["num_return_sequences"], num_beams=beam_kwargs["num_beams"], ) @pytest.mark.generate def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") if any(model_name in model_class.__name__.lower() for model_name in ["rwkv"]): self.skipTest(reason="Won't fix: model with non-standard dictionary output shapes") if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() beam_kwargs = self._get_beam_kwargs() config.is_decoder = True model = model_class(config).to(torch_device).eval() output_generate = self._beam_search_generate( model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=True, # Enable cache ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self._check_generate_outputs( output_generate, model.config, use_cache=True, num_return_sequences=beam_kwargs["num_return_sequences"], num_beams=beam_kwargs["num_beams"], ) @require_accelerate @require_torch_multi_accelerator @pytest.mark.generate def test_model_parallel_beam_search(self): if "xpu" in torch_device: if not (is_ipex_available("2.5") or version.parse(torch.__version__) >= version.parse("2.6")): self.skipTest(reason="device_map='auto' does not work with XPU devices") for model_class in self.all_generative_model_classes: if model_class._no_split_modules is None: continue config, inputs_dict = self.prepare_config_and_inputs_for_generate() model = model_class(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) new_model = model_class.from_pretrained(tmp_dir, device_map="auto") new_model.generate( max_new_tokens=self.max_new_tokens, num_beams=2, **inputs_dict, ) @pytest.mark.generate def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() model = model_class(config).to(torch_device).eval() beam_kwargs = self._get_beam_kwargs() output_generate = self._beam_sample_generate( model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) @pytest.mark.generate def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() beam_kwargs = self._get_beam_kwargs() output_generate = self._beam_sample_generate( model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=False, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self._check_generate_outputs( output_generate, model.config, num_return_sequences=beam_kwargs["num_return_sequences"], num_beams=beam_kwargs["num_beams"], ) @pytest.mark.generate def test_generate_without_input_ids(self): config, _ = self.prepare_config_and_inputs_for_generate() # if no bos token id => cannot generate from None if config.bos_token_id is None: self.skipTest(reason="bos_token_id is None") # hack in case they are equal, otherwise the attn mask will be [0] if config.bos_token_id == config.pad_token_id: config.pad_token_id = None for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate( do_sample=False, max_new_tokens=self.max_new_tokens, remove_invalid_values=True ) self.assertIsNotNone(output_ids_generate) @pytest.mark.generate def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() model = model_class(config).to(torch_device).eval() # check `generate()` and `group_beam_search()` are equal beam_kwargs = self._get_diverse_beam_kwargs() output_generate = self._group_beam_search_generate( model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) # check `group_beam_search` for higher than 1 `num_return_sequences` num_return_sequences = 2 beam_kwargs = self._get_diverse_beam_kwargs(num_return_sequences=num_return_sequences) output_generate = self._group_beam_search_generate( model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) @pytest.mark.generate def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() beam_kwargs = self._get_diverse_beam_kwargs() output_generate = self._group_beam_search_generate( model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=False, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self._check_generate_outputs( output_generate, model.config, num_return_sequences=beam_kwargs["num_return_sequences"], num_beams=beam_kwargs["num_beams"], ) @is_flaky() # Some models have position-specific tokens, this test may try to force them in an invalid position @pytest.mark.generate def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() model = model_class(config).to(torch_device).eval() # Sample constraints min_id = 3 max_id = config.get_text_config(decoder=True).vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs = self._get_constrained_beam_kwargs() output_generate = self._constrained_beam_search_generate( model=model, inputs_dict=inputs_dict, constraints=constraints, beam_kwargs=beam_kwargs, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check`constrained_beam_search` for higher than 1 `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs = self._get_constrained_beam_kwargs(num_return_sequences=2) output_generate = self._constrained_beam_search_generate( model=model, inputs_dict=inputs_dict, constraints=constraints, beam_kwargs=beam_kwargs, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) @is_flaky() # Some models have position-specific tokens, this test may try to force them in an invalid position @pytest.mark.generate def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() # Sample constraints min_id = 3 max_id = model.config.get_text_config(decoder=True).vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs = self._get_constrained_beam_kwargs() output_generate = self._constrained_beam_search_generate( model=model, inputs_dict=inputs_dict, constraints=constraints, beam_kwargs=beam_kwargs, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=False, ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self._check_generate_outputs( output_generate, model.config, num_return_sequences=beam_kwargs["num_return_sequences"], num_beams=beam_kwargs["num_beams"], ) @pytest.mark.generate def test_contrastive_generate(self): for model_class in self.all_generative_model_classes: if model_class._is_stateful: self.skipTest(reason="Stateful models don't support contrastive search generation") # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]): self.skipTest(reason="Won't fix: old model with different cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_generate = self._contrastive_generate( model=model, inputs_dict=inputs_dict, use_cache=True, # Enable cache ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1) else: self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]) @pytest.mark.generate def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: if model_class._is_stateful: self.skipTest(reason="Stateful models don't support contrastive search generation") # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]): self.skipTest(reason="Won't fix: old model with different cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") config.is_decoder = True if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() output_generate = self._contrastive_generate( model=model, inputs_dict=inputs_dict, output_scores=True, output_logits=True, output_hidden_states=True, output_attentions=self.has_attentions, return_dict_in_generate=True, use_cache=True, # Enable cache ) if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self._check_generate_outputs(output_generate, model.config, use_cache=True) @pytest.mark.generate def test_contrastive_generate_low_memory(self): # Check that choosing 'low_memory' does not change the model output for model_class in self.all_generative_model_classes: if model_class._is_stateful: self.skipTest(reason="Stateful models don't support contrastive search generation") if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]): self.skipTest(reason="Won't fix: old model with different cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=1) # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") config.is_decoder = True # test output equality of low versus high memory model = model_class(config).to(torch_device).eval() generate_kwargs = { "top_k": 4, "penalty_alpha": 0.6, "max_new_tokens": self.max_new_tokens, "use_cache": True, "return_dict_in_generate": True, "output_scores": True, } low_output = model.generate(**inputs_dict, **generate_kwargs, low_memory=True) high_output = model.generate(**inputs_dict, **generate_kwargs, low_memory=False) self.assertTrue(has_similar_generate_outputs(low_output, high_output)) @parameterized.expand([("random",), ("same",)]) @pytest.mark.generate def test_assisted_decoding_matches_greedy_search(self, assistant_type): # This test ensures that the assisted generation does not introduce output changes over greedy search. # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 for more info. # NOTE: It breaks the pattern in the tests above, for multiple reasons: # - assisted_decoding, contrarily to the other methods, can't be called on its own (e.g. needs to # prepare the assistant encoder outputs in the main generate body); # - assisted_decoding does not support `use_cache = False` # - assisted_decoding does not support `batch_size > 1` for model_class in self.all_generative_model_classes: if model_class._is_stateful: self.skipTest(reason="Stateful models don't support assisted generation") if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]): self.skipTest(reason="Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "moshi", "git", "prophetnet", "mllama", # special cache sizes "blip2", # overridden `generate()` all BLIP models "instructblip", "instructblipvideo", ] ): self.skipTest(reason="May fix in the future: need model-specific fixes") # enable cache config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=1) # force eager attention to support output attentions if self.has_attentions: config._attn_implementation = "eager" # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") config.is_decoder = True model = model_class._from_config(config, attn_implementation="eager").to(torch_device).eval() config = model.config # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": False, "output_scores": True, "output_logits": True, "output_hidden_states": True, "output_attentions": self.has_attentions, "return_dict_in_generate": True, "use_cache": True, } logits_processor_kwargs = self._get_logits_processor_kwargs(config=model.config) output_greedy = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs) # test with the same assistant model or randomly init one # in the first case all candidate tokens are accepted, in the second none is accepted # case when some are accepted and some not is hard to reproduce, so let's hope this catches most errors :) if assistant_type == "random": assistant_model = model_class(config).to(torch_device).eval() else: assistant_model = model assistant_model.config._attn_implementation = "eager" assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs.update({"assistant_model": assistant_model}) output_assisted = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs) # The two outputs must match and their shape must be as expected self.assertTrue(has_similar_generate_outputs(output_greedy, output_assisted)) for output in (output_greedy, output_assisted): self._check_generate_outputs(output, model.config, use_cache=True) @pytest.mark.generate def test_prompt_lookup_decoding_matches_greedy_search(self): # This test ensures that the prompt lookup generation does not introduce output changes over greedy search. # This test is mostly a copy of test_assisted_decoding_matches_greedy_search for model_class in self.all_generative_model_classes: if model_class._is_stateful: self.skipTest(reason="Stateful models don't support assisted generation") if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]): self.skipTest(reason="Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "moshi", "git", "prophetnet", "mllama", # special cache sizes "blip2", # overridden `generate()` for all BLIP models "instructblip", "instructblipvideo", # All models below: shouldn't suggest image tokens. Can be fixed by passing `suppress_ids` to candidate generator: @joaa @raushan "llava", "idefics2", "idefics3", "mllama", "paligemma", "emu3", "gotocr2", "qwen2vl", "qwen2_5_vl", "ayavision", "janus", "gemma3", "mistral3", "chameleon", "internvl", "qwen2_5omni", # the file is named `qwen2_5_omni`, but the model class is `Qwen2_5Omni`, ] ): self.skipTest(reason="May fix in the future: need model-specific fixes") # enable cache config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=1) # force eager attention to support output attentions if self.has_attentions: config._attn_implementation = "eager" # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the prompt lookup tries to give the model 2 tokens, to ensure the input preparation of # prompt lookup is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": False, "output_scores": True, "output_logits": True, "output_hidden_states": True, "output_attentions": self.has_attentions, "return_dict_in_generate": True, "use_cache": True, } output_greedy = model.generate(**generation_kwargs, **inputs_dict) generation_kwargs.update({"prompt_lookup_num_tokens": 2}) # see b) output_prompt_lookup = model.generate(**generation_kwargs, **inputs_dict) # The two outputs must match and their shape must be as expected self.assertTrue(has_similar_generate_outputs(output_greedy, output_prompt_lookup)) for output in (output_greedy, output_prompt_lookup): self._check_generate_outputs(output, model.config, use_cache=True) @pytest.mark.generate def test_dola_decoding_sample(self): # TODO (joao): investigate skips, try to reduce incompatibilities for model_class in self.all_generative_model_classes: if model_class._is_stateful: self.skipTest(reason="Stateful models don't support DoLa decoding") if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]): self.skipTest("Skip Reformer as the lm_head input size is 2 * hidden size, adopted from Rev Nets.") if any(model_name in model_class.__name__.lower() for model_name in ["marian", "mbart", "pegasus"]): self.skipTest("DoLa is not supported for models that don't return layerwise hidden states") if any(model_name == model_class.__name__ for model_name in ["LlavaNextVideoForConditionalGeneration"]): self.skipTest(f"DoLa is failing for {model_class.__name__}") # enable cache if the model is not openai-gpt, xlnet, cpm, or xlm config, inputs_dict = self.prepare_config_and_inputs_for_generate() # force eager attention to support output attentions if self.has_attentions: config._attn_implementation = "eager" # Encoder-decoder models are not supported if config.get_text_config(decoder=True).is_encoder_decoder: self.skipTest("DoLa is not supported for encoder-decoder models") config.is_decoder = True model = model_class(config).to(torch_device).eval() if model.get_output_embeddings() is None: self.skipTest("DoLa is not supported for models that don't have output embeddings") logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=True, config=model.config) # Sets dola generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see b) "num_beams": 1, "do_sample": True, "output_scores": True, "output_logits": True, "output_hidden_states": True, "output_attentions": self.has_attentions, "return_dict_in_generate": True, "use_cache": getattr(config, "use_cache", False), # Some models don't support the cache "dola_layers": "low", } output_dola = model.generate(**generation_kwargs, **logits_processor_kwargs, **inputs_dict) self._check_generate_outputs(output_dola, model.config, use_cache=getattr(config, "use_cache", False)) @pytest.mark.generate def test_assisted_decoding_sample(self): # In this test we don't check assisted vs non-assisted output -- seeded assisted decoding with sample will not # match sample for the same seed, as the forward pass does not return the exact same logits (due to matmul with # different shapes, see https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535). for model_class in self.all_generative_model_classes: if model_class._is_stateful: self.skipTest(reason="Stateful models don't support assisted generation") if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]): self.skipTest(reason="Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "moshi", "git", "prophetnet", "mllama", # special cache sizes "blip2", # overridden `generate()` for all BLIP models "instructblip", "instructblipvideo", ] ): self.skipTest(reason="May fix in the future: need model-specific fixes") # enable cache config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=1) # force eager attention to support output attentions if self.has_attentions: config._attn_implementation = "eager" # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") config.is_decoder = True model = model_class._from_config(config, attn_implementation="eager").to(torch_device).eval() config = model.config # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": True, "assistant_model": assistant_model, "output_scores": True, "output_logits": True, "output_hidden_states": True, "output_attentions": self.has_attentions, "return_dict_in_generate": True, "use_cache": True, } logits_processor_kwargs = self._get_logits_processor_kwargs(config=model.config) output_assisted = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs) self._check_generate_outputs(output_assisted, config, use_cache=True) @pytest.mark.generate def test_prompt_lookup_decoding_stops_at_eos(self): # This test ensures that the prompt lookup generation stops at eos token and does not suggest more tokens # (see https://github.com/huggingface/transformers/pull/31301) # The main idea is to have an ngram (unigram in our case) that is repeated twice in the input ids. # First time at the very end, so input ends with the unigrams, and second any arbitrary location. # Also, we need an EOS token which will be injected just after the arbitrary located ngram. # We verify that PLD will not copy and propose candidated that contain an EOS token, even if there are overlapping ngrams # in input ids. Otherwise a proposed EOS along with the trailing (ngrams-1) tokens might be accepted by the target model. # That seems as if the model "generated" and EOS but didn't stop from user's perspective input_ids = torch.randint(1, 50, (1, 10), device=torch_device) # generate inputs in range from 1-50 arbitrary_ngram = 51 # this is the arbitrary ngram, specifically chosen OOV to prevent flaky tests input_ids[:, 3] = arbitrary_ngram # set pre-eos to arbitrary_ngram which is for sure not present in inputs input_ids[:, -1] = arbitrary_ngram # put arbitrary_ngram in the end for the necessary match to happen eos_token_id = torch.tensor([0], device=torch_device) input_ids[:, 4] = eos_token_id # inject eos-token-id in input ids so that it is located after arbitrary_ngram # init cand geenerator with max_matching_ngram_size=1 to match per-token candidate_generator = PromptLookupCandidateGenerator( eos_token_id=eos_token_id, num_output_tokens=4, max_matching_ngram_size=1 ) output_prompt_lookup = candidate_generator.get_candidates(input_ids)[0] # PLD shouldn't propose any new tokens based on eos-match self.assertTrue(output_prompt_lookup.shape[-1] == 10) @pytest.mark.generate def test_left_padding_compatibility(self): # NOTE: left-padding results in small numerical differences. This is expected. # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 # First, filter out models that don't support left padding # - The model must have generative capabilities if len(self.all_generative_model_classes) == 0: self.skipTest(reason="No generative architecture available for this model.") # - The model must support padding if not self.has_attentions: self.skipTest(reason="This model doesn't support padding.") # - The model must be a decoder-only architecture (encoder-based architectures use right-padding) decoder_only_classes = [] for model_class in self.all_generative_model_classes: config, _ = self.prepare_config_and_inputs_for_generate() if config.get_text_config(decoder=True).is_encoder_decoder: continue else: decoder_only_classes.append(model_class) if len(decoder_only_classes) == 0: self.skipTest(reason="No decoder-only architecture available for this model.") # - Decoder-only architectures derived from encoder-decoder models could support it in theory, but we haven't # added support for it yet. We skip these models for now. has_encoder_attributes = any( attr_name for attr_name in config.to_dict() if attr_name.startswith("encoder") and attr_name != "encoder_no_repeat_ngram_size" ) if has_encoder_attributes: self.skipTest( reason="The decoder-only derived from encoder-decoder models are not expected to support left-padding." ) # Then, test left-padding def _prepare_model_kwargs(input_ids, attention_mask, signature): model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids if "cache_position" in signature: cache_position = torch.arange(input_ids.shape[1], device=torch_device) model_kwargs["cache_position"] = cache_position return model_kwargs for model_class in decoder_only_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict.get("attention_mask") if attention_mask is None: attention_mask = torch.ones_like(input_ids) model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() # no cache as some models require special cache classes to be init outside forward model.generation_config.use_cache = False # Without padding model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature) next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :] # With left-padding (length 32) # can hardcode pad_token to be 0 as we'll do attn masking anyway pad_token_id = ( config.get_text_config().pad_token_id if config.get_text_config().pad_token_id is not None else 0 ) pad_size = (input_ids.shape[0], 32, *input_ids.shape[2:]) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat( (torch.zeros(pad_size[:2], dtype=input_ids.dtype, device=torch_device), attention_mask), dim=1 ) model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature) next_logits_with_padding = model(**model_kwargs).logits[:, -1, :] # They should result in very similar logits torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5) @pytest.mark.generate def test_past_key_values_format(self, custom_all_cache_shapes=None): """ Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test, or pass the expected cache shapes. Having a standard KV cache format is important for a consistent API (and for advanced generation methods). """ for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # 1. If it doesn't support cache, skip the test if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") model = model_class(config).to(torch_device) model = model.eval() if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest(reason="This model doesn't return `past_key_values`") # 2. retrieve the KV cache and compute its default expected shapes (if no custom shapes are provided) past_kv = outputs["past_key_values"] is_legacy_cache = not isinstance(past_kv, Cache) text_config = config.get_text_config() num_decoder_layers = ( getattr(text_config, "decoder_layers", None) or getattr(text_config, "num_decoder_layers", None) or text_config.num_hidden_layers ) if custom_all_cache_shapes is None: num_query_attention_heads = getattr( text_config, "decoder_attention_heads", text_config.num_attention_heads ) embed_dim = getattr(text_config, "d_model", text_config.hidden_size) per_head_embed_dim = embed_dim // num_query_attention_heads num_key_value_heads = ( text_config.num_key_value_heads if getattr(text_config, "num_key_value_heads", None) is not None else num_query_attention_heads ) if config.is_encoder_decoder: encoder_num_attention_heads = ( text_config.encoder_attention_heads if hasattr(text_config, "encoder_attention_heads") else text_config.num_attention_heads ) encoder_per_head_embed_dim = embed_dim // encoder_num_attention_heads batch_size, seq_length = inputs["decoder_input_ids"].shape[:2] # The sequence length for the encoder K V depends on the model. Since it is not manipulated in # autoregressive generation, we're keeping the test general and not checking the 3rd dim default_cross_attention_shape = ( batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim, ) default_self_attention_shape = (batch_size, num_key_value_heads, seq_length, per_head_embed_dim) all_cache_shapes = [ [ default_self_attention_shape, default_self_attention_shape, default_cross_attention_shape, default_cross_attention_shape, ] for _ in range(num_decoder_layers) ] else: batch_size, seq_length = inputs["input_ids"].shape[:2] default_self_attention_shape = (batch_size, num_key_value_heads, seq_length, per_head_embed_dim) all_cache_shapes = [ [default_self_attention_shape, default_self_attention_shape] for _ in range(num_decoder_layers) ] else: all_cache_shapes = custom_all_cache_shapes # 3. Check cache shapes # 3.1. Encoder-Decoder checks if config.is_encoder_decoder: num_cache_decoder_layers = len(past_kv) if is_legacy_cache else len(past_kv.self_attention_cache) self.assertEqual(num_cache_decoder_layers, num_decoder_layers) for i in range(num_decoder_layers): if is_legacy_cache: self.assertEqual(len(past_kv[0]), 4) # legacy check: confirm number of elements in tuple # Self attention self_attention_layer_keys = ( past_kv[i][0] if is_legacy_cache else past_kv.self_attention_cache.layers[i].keys ) self_attention_layer_values = ( past_kv[i][1] if is_legacy_cache else past_kv.self_attention_cache.layers[i].values ) self.assertEqual(self_attention_layer_keys.shape, all_cache_shapes[i][0]) self.assertEqual(self_attention_layer_values.shape, all_cache_shapes[i][1]) # Cross attention (ignore 3rd dim, see default shape preparation) cross_attention_layer_keys = ( past_kv[i][2] if is_legacy_cache else past_kv.cross_attention_cache.layers[i].keys ) cross_attention_layer_values = ( past_kv[i][3] if is_legacy_cache else past_kv.cross_attention_cache.layers[i].values ) cross_attention_layer_keys = cross_attention_layer_keys[:, :, 0, :] cross_attention_layer_values = cross_attention_layer_values[:, :, 0, :] self.assertEqual(cross_attention_layer_keys.shape, all_cache_shapes[i][2]) self.assertEqual(cross_attention_layer_values.shape, all_cache_shapes[i][3]) # 3.2. Decoder-only checks else: num_cache_decoder_layers = len(past_kv) self.assertEqual(num_cache_decoder_layers, num_decoder_layers) for i in range(num_decoder_layers): if is_legacy_cache: self.assertEqual(len(past_kv[0]), 2) # legacy check: confirm number of elements in tuple # Self attention if is_legacy_cache: self_attention_layer_keys = past_kv[i][0] self_attention_layer_values = past_kv[i][1] elif getattr(past_kv, "layers", None) is None: # Cache is lot layered (i.e, Mamba derivatives) self_attention_layer_keys = past_kv.key_cache[i] self_attention_layer_values = past_kv.value_cache[i] else: self_attention_layer_keys = past_kv.layers[i].keys self_attention_layer_values = past_kv.layers[i].values self.assertEqual(self_attention_layer_keys.shape, all_cache_shapes[i][0]) self.assertEqual(self_attention_layer_values.shape, all_cache_shapes[i][1]) @pytest.mark.generate def test_generate_from_random_inputs_embeds(self): """ Text-only: Tests that different `inputs_embeds` generate different outputs in models with `main_input=="input_ids"`. Some models have 'images' as main input and thus can't generate with random text embeddings. See `test_generate_from_inputs_embeds` for more general checks. """ for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if config.is_encoder_decoder: continue config.is_decoder = True model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters: continue # No easy fix, let's skip the test for now has_complex_embeds_computation = any( model_name in model_class.__name__.lower() for model_name in ["moshi"] ) if model_class.main_input_name != "input_ids" or has_complex_embeds_computation: self.skipTest( "The model's main input name in not `input_ids` and we need kwargs from input dict as well." ) if hasattr(config, "scale_embedding"): config.scale_embedding = False generation_kwargs = { "return_dict_in_generate": True, "output_scores": True, "do_sample": False, "max_new_tokens": 5, "min_new_tokens": 5, # generate exactly 5 tokens } input_ids = inputs_dict.pop("input_ids") inputs_embeds = model.get_input_embeddings()(input_ids) outputs_from_embeds = model.generate(input_ids, inputs_embeds=inputs_embeds, **generation_kwargs) # If we pass different inputs_embeds, we should get different outputs (the output text may be the # same, but the logits will almost surely be different) random_embeds = torch.rand_like(inputs_embeds) outputs_from_rand_embeds = model.generate( input_ids=input_ids, inputs_embeds=random_embeds, **generation_kwargs ) for i in range(len(outputs_from_rand_embeds.scores)): self.assertFalse(torch.allclose(outputs_from_embeds.scores[i], outputs_from_rand_embeds.scores[i])) @pytest.mark.generate @parameterized.expand([("greedy", 1), ("beam search", 2)]) def test_generate_from_inputs_embeds(self, _, num_beams): """Tests that we can generate from `inputs_embeds` instead of `input_ids` in LLMs, VLMs, etc""" # When supported, tests that the decoder model can generate from `inputs_embeds` instead of `input_ids` # if fails, you should probably update the `prepare_inputs_for_generation` function set_model_tester_for_less_flaky_test(self) for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() # This test is for decoder-only models (encoder-decoder models have native input embeddings support in the # decoder) if config.get_text_config(decoder=True).is_encoder_decoder: continue config.is_decoder = True set_config_for_less_flaky_test(config) # Skip models without explicit support model = model_class(config).to(torch_device).eval() set_model_for_less_flaky_test(model) if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters: continue # There are a few exception patterns in this test: # 1 - Complex `inputs_embeds` computation, i.e. the correct computation of inputs embeds is more complex # than calling the embedding layer with `input_ids`. Subcases of this exception: # 1.A - Ignore `scale_embedding`, if the model supports it (it is controlled by a model-dependent flag) if hasattr(config, "scale_embedding"): config.scale_embedding = False # HACK - in the case of granite speech, input_features and inputs_embeds are mutually exclusive; # this is similar to VLMs and should likely be standardized for similar audio models in the future, # then made generic here. if "granitespeech" in model_class.__name__.lower(): inputs_dict.pop("input_features", None) # 1.B - No easy fix, let's skip the check that compares the outputs from `input_ids` and `inputs_embeds` has_complex_embeds_computation = any( model_name in model_class.__name__.lower() for model_name in ["moshi"] ) # 2 - `inputs_dict` doesn't contain `attention_mask`. When `attention_mask` is not passed to generate, # we infer it from `input_ids`. The last test case will fail if there is a pad token in the original input. missing_attention_mask = "attention_mask" not in inputs_dict # Traditional way of generating text input_ids = inputs_dict.pop("input_ids") generation_kwargs = { "return_dict_in_generate": True, "output_scores": True, "num_beams": num_beams, "do_sample": False, "max_new_tokens": 5, "min_new_tokens": 5, # generate exactly 5 tokens "use_cache": True, } outputs_from_ids = model.generate(input_ids=input_ids, **generation_kwargs, **inputs_dict) self.assertEqual(outputs_from_ids.sequences.shape[:2], (input_ids.shape[0], input_ids.shape[1] + 5)) # Same thing, but from input embeddings (`input_ids` is passed so the prompt is present in the output). # The output of the two calls should be the same. inputs_embeds = model.get_input_embeddings()(input_ids) outputs_from_embeds = model.generate( input_ids=input_ids, inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict ) if not has_complex_embeds_computation: self.assertTrue(has_similar_generate_outputs(outputs_from_ids, outputs_from_embeds)) # input_ids is not a required input on most models -- if we don't pass it, the newly generated tokens will # be the same if not missing_attention_mask: outputs_from_embeds_wo_ids = model.generate( inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict ) outputs_from_embeds.sequences = outputs_from_embeds.sequences[:, inputs_embeds.shape[1] :] self.assertTrue(has_similar_generate_outputs(outputs_from_embeds_wo_ids, outputs_from_embeds)) @pytest.mark.generate def test_generate_from_inputs_embeds_with_static_cache(self): """ Test that StaticCache can generate from inputs_embeds and calculates max_cache_length correctly in `generate()`. We force the model to not stop generation until max-length is reached to verify that the cache length is indeed set correctly and we don't run out of index when slicing the cache. """ for model_class in self.all_generative_model_classes: # Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly # use a static cache because they don't create the causal masks correctly. # TODO: cyril -> relax this by adding a `_support_static_cache` attribute if not model_class._can_compile_fullgraph: self.skipTest(reason="This model does not support the static cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate() if config.get_text_config(decoder=True).is_encoder_decoder: self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache") model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters: self.skipTest(reason="This model does not support `inputs_embeds` in generation") input_ids = inputs_dict.pop("input_ids") model.config.use_cache = True model.config.is_decoder = True batch_size = input_ids.shape[0] max_new_tokens = 10 # here we force to not stop at eos and go until max-length model.generation_config.eos_token_id = model.config.get_text_config().eos_token_id = -1 generation_kwargs = { "max_new_tokens": max_new_tokens, "cache_implementation": "static", "return_dict_in_generate": True, # Required to return `past_key_values` } text_config = model.config.get_text_config() head_dim = ( getattr(text_config, "head_dim", None) or text_config.hidden_size // text_config.num_attention_heads ) num_key_value_heads = ( text_config.num_attention_heads if getattr(text_config, "num_key_value_heads", None) is None else text_config.num_key_value_heads ) num_hidden_layers = text_config.num_hidden_layers inputs_embeds = model.get_input_embeddings()(input_ids) outputs = model.generate(inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict) # we should get `max_length - 1` in shape, not `max_length - embeds_length`. # -1 because the last generated token isn't yet in the cache. max_length = max_new_tokens + inputs_embeds.shape[1] - 1 cache_shape = [batch_size, num_key_value_heads, max_length, head_dim] self.assertIsInstance(outputs.past_key_values, StaticCache) self.assertEqual(len(outputs.past_key_values), num_hidden_layers) self.assertListEqual(list(outputs.past_key_values.layers[0].keys.shape), cache_shape) @pytest.mark.generate def test_generate_continue_from_past_key_values(self): # Tests that we can continue generating from past key values, returned from a previous `generate` call for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt", "mllama"]): self.skipTest(reason="Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") # Let's make it always: # 1. use cache (for obvious reasons) # 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which # would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the # continuation would force it to generate beyond an EOS token) # 3. ignore `token_type_ids` for simplicity # 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is # active by default on some models # 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When # we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents # repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls # with cache, what is considered a prompt is different in the two cases. if "token_type_ids" in inputs: del inputs["token_type_ids"] model = model_class(config).to(torch_device) model.eval() # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest(reason="This model doesn't return `past_key_values`") generate_kwargs = { "pad_token_id": -1, "eos_token_id": -1, "forced_eos_token_id": None, "encoder_no_repeat_ngram_size": 0, "use_cache": True, "do_sample": False, "return_dict_in_generate": True, "output_scores": True, } # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=4) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the # inputs may need to be tweaked across `generate` calls (like the attention mask). outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=3) # Continue from the tokens generated above, preparing the inputs accordingly inputs["past_key_values"] = outputs_cached.past_key_values new_attention_len = outputs_cached.sequences.shape[-1] if config.is_encoder_decoder: inputs["decoder_input_ids"] = outputs_cached.sequences if "decoder_attention_mask" in inputs: inputs["decoder_attention_mask"] = torch.nn.functional.pad( inputs["decoder_attention_mask"], (0, new_attention_len - inputs["decoder_attention_mask"].shape[1]), mode="constant", value=1, ) else: inputs["input_ids"] = outputs_cached.sequences if "attention_mask" in inputs: inputs["attention_mask"] = torch.nn.functional.pad( inputs["attention_mask"], (0, new_attention_len - inputs["attention_mask"].shape[1]), mode="constant", value=1, ) first_caches_scores = outputs_cached.scores outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1) full_cached_scores = first_caches_scores + outputs_cached.scores outputs_cached.scores = full_cached_scores # The two sets of generated text and past kv should be equal to each other self.assertTrue(has_similar_generate_outputs(outputs, outputs_cached)) for layer_idx in range(len(outputs_cached.past_key_values)): for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], outputs_cached.past_key_values[layer_idx][kv_idx], ) ) @pytest.mark.generate def test_generate_continue_from_inputs_embeds(self): """Tests that we can continue generation from `inputs_embeds` and past key values returned from a previous `generate` call.""" for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]): self.skipTest(reason="Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility") config, inputs_dict = self.prepare_config_and_inputs_for_generate() if "token_type_ids" in inputs_dict: del inputs_dict["token_type_ids"] if config.get_text_config(decoder=True).is_encoder_decoder: self.skipTest(reason="This model is encoder-decoder") # TODO (joao, raushan): the correct line below is `if not hasattr(config.get_text_config(), "use_cache")`, # but it breaks a few models. Fix and then apply `has_similar_generate_outputs` pattern if not hasattr(config, "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters: self.skipTest(reason="This model does not support `inputs_embeds` in generation") # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs_dict) if "past_key_values" not in outputs: self.skipTest(reason="This model doesn't return `past_key_values`") input_ids = inputs_dict.pop("input_ids") model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None model.config.is_decoder = True model.generation_config.use_cache = True generation_kwargs = { "return_dict_in_generate": True, "do_sample": False, } # Traditional way of generating text, with `return_dict_in_generate` to return the past key values. input_embeds = model.get_input_embeddings()(input_ids) outputs = model.generate(inputs_embeds=input_embeds, max_new_tokens=4, **generation_kwargs) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens) initial_output = model.generate(inputs_embeds=input_embeds, max_new_tokens=3, **generation_kwargs) continued_embeds = torch.cat([input_embeds, model.get_input_embeddings()(initial_output.sequences)], dim=1) cached_output = model.generate( inputs_embeds=continued_embeds, max_new_tokens=1, past_key_values=initial_output.past_key_values, **generation_kwargs, ) # Combine the (3 + 1) generated tokens and verify it matches with full generation. combined_output_sequences = torch.concat([initial_output.sequences, cached_output.sequences], axis=1) self.assertListEqual(outputs.sequences.tolist(), combined_output_sequences.tolist()) # The two sets of past kv should be equal to each other for layer_idx in range(len(cached_output.past_key_values)): for kv_idx in range(len(cached_output.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], cached_output.past_key_values[layer_idx][kv_idx], ) ) @pytest.mark.generate def test_generate_with_static_cache(self): """ Tests that generating with static cache give almost same results as with dynamic cache, and the output cache has the expected shapes """ for model_class in self.all_generative_model_classes: # Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly # use a static cache because they don't create the causal masks correctly. # TODO: cyril -> relax this by adding a `_support_static_cache` attribute if not model_class._can_compile_fullgraph: self.skipTest(reason="This model does not support the static cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate() set_config_for_less_flaky_test(config) main_input = inputs_dict[model_class.main_input_name] if config.get_text_config(decoder=True).is_encoder_decoder: self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache") config.is_decoder = True batch_size = main_input.shape[0] seq_length = self.model_tester.seq_length max_new_tokens = 20 for dtype in (torch.float32, torch.float16): model = model_class(copy.deepcopy(config)).to(torch_device).to(dtype).eval() inputs_dict = { k: v.to(dtype) if isinstance(v, torch.Tensor) and torch.is_floating_point(v) else v for k, v in inputs_dict.items() } set_model_for_less_flaky_test(model) generation_kwargs = { "max_new_tokens": max_new_tokens, "return_dict_in_generate": True, # Required to return `past_key_values` "output_scores": True, "use_cache": True, } static_cache_generation = model.generate( **generation_kwargs, **inputs_dict, cache_implementation="static" ) # Check 1: The cache shapes must match the expected shapes max_cache_len = seq_length + max_new_tokens - 1 # cache len = gen len - 1, the last token has no cache text_config = config.text_config if hasattr(config, "text_config") else config head_dim = ( getattr(text_config, "head_dim", None) or text_config.hidden_size // text_config.num_attention_heads ) num_key_value_heads = ( text_config.num_attention_heads if getattr(text_config, "num_key_value_heads", None) is None else text_config.num_key_value_heads ) num_hidden_layers = text_config.num_hidden_layers cache_shape = (batch_size, num_key_value_heads, max_cache_len, head_dim) self.assertTrue(isinstance(static_cache_generation.past_key_values, StaticCache)) self.assertTrue(len(static_cache_generation.past_key_values) == num_hidden_layers) self.assertTrue(static_cache_generation.past_key_values.layers[0].keys.shape == cache_shape) # Check 2: The outputs must be similar to the case with dynamic cache dynamic_cache_generation = model.generate(**generation_kwargs, **inputs_dict) self.assertTrue(has_similar_generate_outputs(dynamic_cache_generation, static_cache_generation)) @require_optimum_quanto @pytest.mark.generate def test_generate_with_quant_cache(self): for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() if ( config.get_text_config(decoder=True).is_encoder_decoder or not model_class._supports_default_dynamic_cache() ): self.skipTest(reason="This model does not support the quantized cache format") config.is_decoder = True model = model_class(config).to(torch_device).eval() generation_kwargs = { "max_new_tokens": 5, "cache_implementation": "quantized", # careful with group size, should be divisor of model's hidden size "cache_config": {"backend": "quanto", "nbits": 2, "q_group_size": 8, "residual_length": 128}, "return_dict_in_generate": True, # Required to return `past_key_values` "use_cache": True, } results = model.generate(**generation_kwargs, **inputs_dict) self.assertTrue(all(isinstance(layer, QuantoQuantizedLayer) for layer in results.past_key_values.layers)) # passing past key values of different type should raise Error with self.assertRaises(ValueError): model.generate(past_key_valyes=DynamicCache(), **generation_kwargs, **inputs_dict) # setting incorrect cache_config args should raise an Error, i.e. nbits=60 does not make sense generation_kwargs["cache_config"] = {"nbits": 60, "q_group_size": 8, "residual_length": 128} with self.assertRaises(ValueError): model.generate(**generation_kwargs, **inputs_dict) @pytest.mark.generate @pytest.mark.torch_compile_test @require_torch_greater_or_equal("2.6") # Uses torch.compiler.set_stance def test_generate_compile_model_forward_fullgraph(self): """ Tests that `.generate` is compatible with torch.compile, keeping the same results. Also confirms that `.forward` called from `.generate` sees no graph breaks or recompilations when compiled. ⚠️ Runs two sequential generations to ensure the cache doesn't get stuck after the first compiled run! ⚠️ """ for model_class in self.all_generative_model_classes: # 1. Test exclusion criteria if not model_class._can_compile_fullgraph: self.skipTest("This model doesn't support compilation without graph breaks") # 2. Prepares two sets of inputs config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=4) set_config_for_less_flaky_test(config) model = model_class(config).to(torch_device) set_model_for_less_flaky_test(model) model.eval() # otherwise `self.training` is `True` -- this flag is used at attn mask creation time # Some composite models have a custom generate and will call an inner model's generate -> that inner model # is the one that gets compiled. # (Note for the future: if BLIP starts causing problems, let's stop testing it) if "blip" in model.__class__.__name__.lower(): model_to_be_compiled = model.language_model else: model_to_be_compiled = model # creates two sets of *different* inputs with the same shape main_input = inputs_dict[model.main_input_name].to(torch_device) half_batch_size = main_input.shape[0] // 2 input_1 = {} input_2 = {} for key, value in inputs_dict.items(): if isinstance(value, torch.Tensor): input_1[key] = value[:half_batch_size, :].to(torch_device) input_2[key] = value[half_batch_size : half_batch_size * 2, :].to(torch_device) else: input_1[key] = value input_2[key] = value model_input_sets = [input_1, input_2] self.assertTrue( model_input_sets[0][model.main_input_name].shape == model_input_sets[1][model.main_input_name].shape ) # 3. compilation-specific setup and generation parameterization torch.compiler.reset() # prevent cached compilation from being used in the test has_defined_cache_implementation = model.generation_config.cache_implementation is not None compile_config = CompileConfig(fullgraph=True, dynamic=False) # Error out on dynamic shapes compile_config._compile_all_devices = True # force compilation (e.g. fast CI, CPU) generation_kwargs = { "use_cache": True, "do_sample": False, "max_new_tokens": 5, "return_dict_in_generate": True, "output_scores": True, "compile_config": compile_config, } # 4. get eager + dynamic cache results for future comparison dynamic_outputs = [] # Ignores all `torch.compile` usage, useful to test models that that have non-default compilable caches # (who would have used compilation in this section) with torch.compiler.set_stance("force_eager"): for model_inputs in model_input_sets: gen_out = model.generate(**model_inputs, **generation_kwargs) dynamic_outputs.append(gen_out) # sanity checks for the default cache implementation if not has_defined_cache_implementation: decoder_cache = ( gen_out.past_key_values.self_attention_cache if config.get_text_config(decoder=True).is_encoder_decoder else gen_out.past_key_values ) self.assertTrue(isinstance(decoder_cache, DynamicCache)) self.assertFalse(decoder_cache.is_compileable) # our auto compile should NOT have been called self.assertFalse(hasattr(model_to_be_compiled, "_compiled_call")) # 5. get compiled results -- relies on the automatic compilation triggered by specific compilable caches if not has_defined_cache_implementation: generation_kwargs["cache_implementation"] = "static" compiled_outputs = [] # Uses a context manager to catch recompilation logs. If there is any recompilation, this test fails. # Try/Finally is used to ensure that the log options are reset even if an error is raised. try: torch._logging.set_logs(recompiles_verbose=True) logger = logging.get_logger("torch._dynamo.guards") with CaptureLogger(logger) as cl: for model_inputs in model_input_sets: # with torch.compiler.set_stance("fail_on_recompile"): gen_out = model.generate(**model_inputs, **generation_kwargs) compiled_outputs.append(gen_out) # sanity checks decoder_cache = ( gen_out.past_key_values.self_attention_cache if config.get_text_config(decoder=True).is_encoder_decoder else gen_out.past_key_values ) self.assertFalse(isinstance(decoder_cache, DynamicCache)) self.assertTrue(decoder_cache.is_compileable) # our auto compile should have been called self.assertTrue(hasattr(model_to_be_compiled, "_compiled_call")) finally: torch._logging.set_logs() # Compilation of sliding layers necessarily has recompiles with `dynamic=False` - however this test # still checks that `fullgraph=True` is supported in this case, as compilation with `dynamic=None` # is the default and does not actually lead to too many recompiles has_sliding_layers = any(decoder_cache.is_sliding) has_recompilation = "Recompiling" in cl.out or ("guard" in cl.out and "failure" in cl.out) if not has_sliding_layers and has_recompilation: raise RuntimeError( f"`torch.compile` recompiled part of the forward pass in {model.__class__.__name__}. " "See the test logs for more details." ) for dynamic_result, compiled_result in zip(dynamic_outputs, compiled_outputs): self.assertTrue(has_similar_generate_outputs(dynamic_result, compiled_result)) @pytest.mark.generate def test_generate_compilation_all_outputs(self): """ Tests that all optional outputs are behaving as expected when compilation is triggered. In essence, it's the same as `test_greedy_generate_dict_outputs`, but with automatic compilation triggered. """ for model_class in self.all_generative_model_classes: # Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly # use a static cache because they don't create the causal masks correctly. # TODO: cyril -> relax this by adding a `_support_static_cache` attribute if not model_class._can_compile_fullgraph: self.skipTest(reason="This model does not support the static cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() # compilation-specific setup torch.compiler.reset() # prevent cached compilation from being used in the test has_defined_cache_implementation = model.generation_config.cache_implementation is not None # BLIP is the only exception with custom generate which call `self.lm.generate()` # We should avoid such calls in all subsequent multimodal models and try to make `generate()` # compatible with multimodality compile_config = CompileConfig() compile_config._compile_all_devices = True if "blip" in model.__class__.__name__.lower(): model.language_model.generation_config.compile_config = compile_config if not has_defined_cache_implementation: model.language_model.generation_config.cache_implementation = "static" else: # force compilation (e.g. fast CI, CPU) model.generation_config.compile_config = compile_config if not has_defined_cache_implementation: model.generation_config.cache_implementation = "static" logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config) output_generate = model.generate( do_sample=False, num_beams=1, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_attentions=True, output_hidden_states=True, output_scores=True, output_logits=True, return_dict_in_generate=True, use_cache=True, **logits_processor_kwargs, **inputs_dict, ) if "blip" in model.__class__.__name__.lower(): self.assertTrue(hasattr(model.language_model, "_compiled_call")) else: self.assertTrue(hasattr(model, "_compiled_call")) # our auto compile should have been called if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) self._check_generate_outputs(output_generate, model.config, use_cache=True) @pytest.mark.generate def test_generate_methods_with_logits_to_keep(self): for model_class in self.all_generative_model_classes: if "logits_to_keep" not in set(inspect.signature(model_class.forward).parameters.keys()): self.skipTest(reason="This model does not support `logits_to_keep` argument.") config, inputs_dict = self.prepare_config_and_inputs_for_generate() config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # All generation methods (except assisted decoding) rely on always extracting the last token logits of the # full logits matrix, so testing out only greedy search and assisted decoding is enough (if it works, # other methods will work as well) generation_kwargs = { "max_new_tokens": 10, "do_sample": False, } # Setting logits_to_keep at 0 keeps all logits (old behavior) with_all_logits = model.generate(**generation_kwargs, **inputs_dict, logits_to_keep=0) # By default, logits_to_keep is automatically set to 1 if not provided (new behavior) without_all_logits = model.generate(**inputs_dict, **generation_kwargs) self.assertEqual(with_all_logits.tolist(), without_all_logits.tolist()) @pytest.mark.generate def test_inherits_generation_mixin(self): """ Tests that the model class directly inherits `GenerationMixin`, as opposed to relying on `PreTrainedModel` to inherit it. """ for model_class in self.all_generative_model_classes: self.assertTrue("GenerationMixin" in str(model_class.__bases__)) def _test_attention_implementation(self, attn_implementation): """ Compares the output of generate with the eager attention implementation against other implementations. NOTE: despite the test logic being the same, different implementations actually need different decorators, hence this separate function. """ max_new_tokens = 3 support_flag = { "sdpa": "_supports_sdpa", "flash_attention_2": "_supports_flash_attn", "flash_attention_3": "_supports_flash_attn", } set_model_tester_for_less_flaky_test(self) for model_class in self.all_generative_model_classes: if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]): self.skipTest(f"{model_class.__name__} does not support `attn_implementation={attn_implementation}`") config, original_inputs_dict = self.prepare_config_and_inputs_for_generate() inputs_dict = {} for input_name, input_data in original_inputs_dict.items(): if isinstance(input_data, torch.Tensor) and input_data.dtype in [torch.float32, torch.bfloat16]: inputs_dict[input_name] = input_data.to(torch.float16) else: inputs_dict[input_name] = input_data main_input = inputs_dict[model_class.main_input_name] # FA2 doesn't accept masking in the middle of the sequence for now. We usually generate right-padded # attention masks at test time and, with generate, the mask will be appended with 1s on the right, # resulting in a mask with holes (not supported properly by FA2). if attn_implementation == "flash_attention_2": for input_name in ("attention_mask", "decoder_attention_mask", "encoder_attention_mask"): if input_name in inputs_dict: inputs_dict[input_name] = torch.ones_like(inputs_dict[input_name]) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + main_input.shape[1] + 1 set_config_for_less_flaky_test(config) model = model_class(config) # If not all sub-models support flex, skip the test. We could potentially set not supported backbones # to "eager" attention, leaving it for future updates on multimodality tests sub_models_supporting_attn = [ getattr(module, support_flag[attn_implementation]) for name, module in model.named_modules() if isinstance(module, PreTrainedModel) and name != "" ] if not all(sub_models_supporting_attn) and len(sub_models_supporting_attn) > 0: self.skipTest( f"One of {model_class.__name__}'s backbones does not support `attn_implementation={attn_implementation}`" ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) del model gc.collect() generate_kwargs = { "max_new_tokens": max_new_tokens, "do_sample": False, "return_dict_in_generate": True, "output_scores": True, "use_cache": True, } model_eager = model_class.from_pretrained( tmpdirname, dtype=torch.float16, attn_implementation="eager", ).to(torch_device) set_model_for_less_flaky_test(model_eager) res_eager = model_eager.generate(**inputs_dict, **generate_kwargs) del model_eager gc.collect() model_attn = model_class.from_pretrained( tmpdirname, dtype=torch.float16, attn_implementation=attn_implementation, ).to(torch_device) set_model_for_less_flaky_test(model_attn) res_attn = model_attn.generate(**inputs_dict, **generate_kwargs) del model_attn gc.collect() self.assertTrue(has_similar_generate_outputs(res_eager, res_attn, atol=1e-3, rtol=1e-3)) @pytest.mark.generate @slow def test_eager_matches_sdpa_generate(self): """Tests that generate has equivalent outputs with SDPA and eager attention implementations.""" self._test_attention_implementation("sdpa") @pytest.mark.flash_attn_test @require_flash_attn @require_torch_gpu @slow def test_eager_matches_fa2_generate(self): """Tests that generate has equivalent outputs with FA2 and eager attention implementations.""" self._test_attention_implementation("flash_attention_2") @pytest.mark.flash_attn_3_test @require_flash_attn_3 @require_torch_gpu @slow def test_eager_matches_fa3_generate(self): """Tests that generate has equivalent outputs with FA3 and eager attention implementations.""" self._test_attention_implementation("flash_attention_3") def _check_generate_outputs(self, output, config, use_cache=False, num_return_sequences=1, num_beams=1): input_batch_size = int(output.sequences.shape[0] / num_return_sequences) internal_batch_size = ( input_batch_size * num_beams if num_beams > 1 else input_batch_size * num_return_sequences ) prompt_length = getattr(self.model_tester, "seq_length", None) prompt_length = getattr(self.model_tester, "encoder_seq_length", prompt_length) prompt_length = getattr(self.model_tester, "text_seq_length", prompt_length) config = config.text_config if hasattr(config, "text_config") else config generated_length = ( output.sequences.shape[1] - 1 if config.is_encoder_decoder else output.sequences.shape[1] - prompt_length ) decoder_past_key_values = getattr(output, "past_key_values", None) if config.is_encoder_decoder and isinstance(decoder_past_key_values, EncoderDecoderCache): decoder_past_key_values = decoder_past_key_values.self_attention_cache # in some models we subsample the sequence length in inner layers if hasattr(self.model_tester, "get_subsampled_output_lengths"): prompt_length = self.model_tester.get_subsampled_output_lengths(prompt_length) # scores self._check_scores( batch_size=internal_batch_size, scores=output.scores, generated_length=generated_length, config=config ) # unprocessed logits self._check_logits(batch_size=internal_batch_size, logits=output.logits, config=config) # Attentions if self.has_attentions: if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate( attentions=output.encoder_attentions, batch_size=input_batch_size, config=config, prompt_length=prompt_length, ) # decoder self._check_attentions_for_generate( batch_size=internal_batch_size, attentions=output.decoder_attentions, prompt_length=1, # the BOS token output_length=output.sequences.shape[1], config=config, decoder_past_key_values=decoder_past_key_values, ) else: self._check_attentions_for_generate( batch_size=internal_batch_size, attentions=output.attentions, prompt_length=prompt_length, output_length=output.sequences.shape[1], config=config, decoder_past_key_values=decoder_past_key_values, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( hidden_states=output.encoder_hidden_states, batch_size=input_batch_size, config=config, prompt_length=prompt_length, ) # decoder self._check_hidden_states_for_generate( batch_size=internal_batch_size, hidden_states=output.decoder_hidden_states, prompt_length=1, # the BOS token output_length=output.sequences.shape[1], config=config, use_cache=use_cache, ) else: self._check_hidden_states_for_generate( batch_size=internal_batch_size, hidden_states=output.hidden_states, prompt_length=prompt_length, output_length=output.sequences.shape[1], config=config, use_cache=use_cache, ) # Past Key Value States -- a few notes here: # 1. Its inner sequence length is with respect to the inputs of the latest forward pass, hence the "-1" # 2. We ignore models that have unique cache structures (e.g. mamba) or are in need of refatoring to match the # standard cache format (e.g.mamba architecture ) models_without_standard_cache = ( "bamba", "granitemoehybrid", "reformer", "jamba", "mamba", "xlnet", "zamba", "zamba2", "lfm2", ) has_standard_cache = not any( model_name in config.__class__.__name__.lower() for model_name in models_without_standard_cache ) if has_standard_cache: if use_cache: cache_length = output.sequences.shape[1] - 1 self._check_past_key_values_for_generate( batch_size=internal_batch_size, decoder_past_key_values=decoder_past_key_values, cache_length=cache_length, config=config, ) elif use_cache is False: self.assertTrue(decoder_past_key_values is None) def _check_scores(self, batch_size, scores, generated_length, config): vocab_size = config.get_text_config(decoder=True).vocab_size expected_shape = (batch_size, vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), generated_length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_logits(self, batch_size, logits, config): vocab_size = config.get_text_config(decoder=True).vocab_size self.assertIsInstance(logits, tuple) self.assertListEqual([iter_logits.shape[0] for iter_logits in logits], [batch_size] * len(logits)) # vocabulary difference equal to one (imagegptmodel?) or zero (all other models) vocab_diff = vocab_size - logits[0].shape[-1] self.assertTrue(vocab_diff in [0, 1]) self.assertListEqual([vocab_size - score.shape[-1] for score in logits], [vocab_diff] * len(logits)) def _check_attentions_for_generate( self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (output_length - prompt_length)) use_cache = decoder_past_key_values is not None has_static_cache = isinstance(decoder_past_key_values, StaticCache) # When `output_attentions=True`, each iteration of generate appends the attentions corresponding to the new # token(s) # NOTE: `StaticCache` may have different lengths on different layers, if this test starts failing add more # elaborate checks for generated_length, iter_attentions in enumerate(attentions): # regardless of using cache, the first forward pass will have the full prompt as input if use_cache and generated_length > 0: model_input_length = 1 else: model_input_length = prompt_length + generated_length query_length = ( prompt_length + generated_length if not has_static_cache else decoder_past_key_values.get_max_cache_shape() ) expected_shape = ( batch_size, config.num_attention_heads, model_input_length, query_length, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length): encoder_expected_shape = (batch_size, config.num_attention_heads, prompt_length, prompt_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (output_length - prompt_length)) # When `output_hidden_states=True`, each iteration of generate appends the hidden states corresponding to the # new token(s) # NOTE: `StaticCache` may have different lengths on different layers, if this test starts failing add more # elaborate checks for generated_length, iter_hidden_states in enumerate(hidden_states): # regardless of using cache, the first forward pass will have the full prompt as input if use_cache and generated_length > 0: model_input_length = 1 else: model_input_length = prompt_length + generated_length expected_shape = (batch_size, model_input_length, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length): encoder_expected_shape = (batch_size, prompt_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_past_key_values_for_generate(self, batch_size, decoder_past_key_values, cache_length, config): self.assertIsInstance(decoder_past_key_values, (tuple, Cache)) # (batch, head, seq_length, head_features) expected_shape = ( batch_size, config.num_key_value_heads if hasattr(config, "num_key_value_heads") else config.num_attention_heads, cache_length, config.hidden_size // config.num_attention_heads, ) if isinstance(decoder_past_key_values, Cache): self.assertListEqual( [layer.keys.shape for layer in decoder_past_key_values.layers], [expected_shape] * len(decoder_past_key_values.layers), ) self.assertListEqual( [layer.values.shape for layer in decoder_past_key_values.layers], [expected_shape] * len(decoder_past_key_values.layers), ) # Legacy cache format checks. This branch should be removed when all models use `Cache` by default else: self.assertListEqual( [isinstance(iter_past_key_values, tuple) for iter_past_key_values in decoder_past_key_values], [True] * len(decoder_past_key_values), ) # check shape key, value self.assertListEqual( [layer_past_key_values[0].shape for layer_past_key_values in decoder_past_key_values], [expected_shape] * len(decoder_past_key_values), ) self.assertListEqual( [layer_past_key_values[1].shape for layer_past_key_values in decoder_past_key_values], [expected_shape] * len(decoder_past_key_values), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): def test_speculative_sampling(self): # assume vocab size 10, input length 5 + 3 generated candidates candidate_input_ids = torch.tensor([[8, 0, 3, 9, 8, 1, 4, 5]]) # input tokens candidate_logits = torch.tensor( [ [ [-10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 1 [-10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 4 [-10.0, -10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0], # generated 5 ] ] ) candidate_length = 3 inf = float("inf") new_logits = torch.tensor( [ [ [-10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # accepts 1 [-10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # accepts 4 [-inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, 10.0, -inf], # rejects 5, accepts 8 [-10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # N/A ] ] ) last_assistant_token_is_eos = False validated_tokens, n_matches = _speculative_sampling( candidate_input_ids, candidate_logits, candidate_length, new_logits, last_assistant_token_is_eos, ) self.assertTrue(n_matches.item() == 2) self.assertTrue(validated_tokens.tolist()[0] == [1, 4, 8]) def test_speculative_sampling_target_distribution(self): """ Asserts that the target distribution is preserved. Should help with catching issues like #32867. """ # assume vocab size 10, input length 5 + 3 generated candidates candidate_input_ids = torch.tensor([[8, 0, 3, 9, 8, 1, 4, 5]]) # input tokens candidate_logits = torch.tensor( [ [ [-10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 1 [-10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 4 [-10.0, -10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0], # generated 5 ] ] ) candidate_length = 3 inf = float("inf") new_logits = torch.tensor( [ [ # accepts 1: [-inf, 10.0, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf], # accepts 4: [-inf, -inf, -inf, -inf, 10.0, -inf, -inf, -inf, -inf, -inf], # most likely to be 1 or 8, less likely to be 3, then 7, and should never be any other value: [-inf, 2.0, -inf, 1.0, -inf, -inf, -inf, -0.01, 2.0, -inf], # N/A: [-inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf], ] ] ) last_assistant_token_is_eos = False last_validated_token = [] for _ in range(10_000): validated_tokens, n_matches = _speculative_sampling( candidate_input_ids, candidate_logits, candidate_length, new_logits, last_assistant_token_is_eos, ) self.assertTrue(n_matches.item() == 2) self.assertTrue(validated_tokens.tolist()[0][0] == 1) self.assertTrue(validated_tokens.tolist()[0][1] == 4) self.assertTrue(validated_tokens.tolist()[0][2] in [1, 3, 7, 8]) last_validated_token.append(validated_tokens.tolist()[0][2]) # check that the most likely tokens are selected more often than the less likely ones last_token_counts = collections.Counter(last_validated_token) self.assertTrue(last_token_counts[1] > last_token_counts[3] > last_token_counts[7] > 0) self.assertTrue(last_token_counts[8] > last_token_counts[3]) @pytest.mark.torch_export_test def test_cache_dependant_input_preparation_exporting(self): self.assertFalse( is_torchdynamo_exporting() ) # otherwise this test does not compare two different implementation # Case 1 input_ids = torch.randint(0, 16, (2, 8), dtype=torch.int64)[:, :0] inputs_embeds = torch.rand((2, 8), dtype=torch.float32) cache_position = torch.arange(0, 8, dtype=torch.int64) eager1, eager2 = GenerationMixin()._cache_dependant_input_preparation(input_ids, inputs_embeds, cache_position) export1, export2 = GenerationMixin()._cache_dependant_input_preparation_exporting( input_ids, inputs_embeds, cache_position ) torch.testing.assert_close(eager1, export1) torch.testing.assert_close(eager2, export2) # Case 2 input_ids = torch.randint(0, 16, (2, 8), dtype=torch.int64) inputs_embeds = torch.rand((2, 8), dtype=torch.float32) cache_position = torch.arange(0, 8, dtype=torch.int64) eager1, eager2 = GenerationMixin()._cache_dependant_input_preparation(input_ids, inputs_embeds, cache_position) export1, export2 = GenerationMixin()._cache_dependant_input_preparation_exporting( input_ids, inputs_embeds, cache_position ) torch.testing.assert_close(eager1, export1) torch.testing.assert_close(eager2, export2) # Case 3 input_ids = torch.randint(0, 16, (2, 12), dtype=torch.int64) inputs_embeds = None cache_position = torch.arange(0, 8, dtype=torch.int64) eager1, eager2 = GenerationMixin()._cache_dependant_input_preparation(input_ids, inputs_embeds, cache_position) export1, export2 = GenerationMixin()._cache_dependant_input_preparation_exporting( input_ids, inputs_embeds, cache_position ) torch.testing.assert_close(eager1, export1) torch.testing.assert_close(eager2, export2) # Case 4 input_ids = torch.randint(0, 16, (2, 8), dtype=torch.int64) inputs_embeds = None cache_position = torch.arange(0, 8, dtype=torch.int64) eager1, eager2 = GenerationMixin()._cache_dependant_input_preparation(input_ids, inputs_embeds, cache_position) export1, export2 = GenerationMixin()._cache_dependant_input_preparation_exporting( input_ids, inputs_embeds, cache_position ) torch.testing.assert_close(eager1, export1) torch.testing.assert_close(eager2, export2) global_rng = random.Random() # Copied from tests.test_modeling_common.ids_tensor def ids_tensor(shape, vocab_size, rng=None, name=None): # Creates a random int32 tensor of the shape within the vocab size if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() # Copied from tests.test_modeling_common.floats_tensor def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous() @pytest.mark.generate @require_torch class GenerationIntegrationTests(unittest.TestCase): @slow def test_diverse_beam_search(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) @slow def test_beam_search_early_stop_heuristic(self): """Regression test for #38778 (early stopping needs to be tracked at a batch level)""" EXPECTED_OUTPUT = ( "<|user|>\nWhat is 3+5?\n<|assistant|>\nThe sum of 3 and 5 is 8. \n\nSo, 3 + 5 = 8. \n\n" "Let's confirm this using Python code:\n\n```python\n# Define the numbers\nnum1 = 3\nnum2 = 5\n\n" "# Calculate the sum\nresult = num1 + num2\n\n# Print the result\nprint(result)\n```\n" "```output\n8\n```\nThe sum of 3 and 5 is \\(\\boxed{8}\\)." ) model = AutoModelForCausalLM.from_pretrained("allenai/OLMo-2-0425-1B-Instruct").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-2-0425-1B-Instruct", padding_side="left") generation_config = GenerationConfig( num_beams=10, max_new_tokens=256, length_penalty=2, ) # batch of 1 question = [{"role": "user", "content": "What is 3+5?"}] question = tokenizer.apply_chat_template( question, tokenize=False, add_generation_prompt=True, return_tensors="pt" ) inputs = tokenizer(question, return_tensors="pt", padding=True).to("cuda") outputs = model.generate(**inputs, generation_config=generation_config) responses = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertEqual(responses[0], EXPECTED_OUTPUT) # batch of 2 question = [{"role": "user", "content": "What is 3+5?"}] cot_question = [ { "role": "user", "content": "What is 3+5? Explain your reasoning step by step, and provide the final answer at the end.", } ] question = tokenizer.apply_chat_template( question, tokenize=False, add_generation_prompt=True, return_tensors="pt" ) cot_question = tokenizer.apply_chat_template( cot_question, tokenize=False, add_generation_prompt=True, return_tensors="pt" ) inputs = tokenizer([question, cot_question], return_tensors="pt", padding=True).to("cuda") outputs = model.generate(**inputs, generation_config=generation_config) responses = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertEqual(responses[0], EXPECTED_OUTPUT) def test_max_length_if_input_embeds(self): article = "Today a dragon flew over Paris." model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) inputs_embeds = model.get_input_embeddings()(input_ids) max_length = 20 input_len = input_ids.shape[-1] out_gen = model.generate(input_ids=input_ids, max_length=max_length) out_gen_embeds = model.generate(inputs_embeds=inputs_embeds, max_length=max_length) self.assertEqual(out_gen.shape[-1], input_len + out_gen_embeds.shape[-1]) def test_min_length_if_input_embeds(self): article = "Today a dragon flew over Paris." model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) inputs_embeds = model.get_input_embeddings()(input_ids) min_length = 10 input_len = input_ids.shape[-1] out_gen = model.generate(input_ids=input_ids, min_length=min_length) out_gen_embeds = model.generate(inputs_embeds=inputs_embeds, min_length=min_length) self.assertEqual(out_gen.shape[-1], input_len + out_gen_embeds.shape[-1]) def test_custom_stopping_criteria_overload_error(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) # TODO (joao): replace `stop_sequence` in the pipeline by the more recent `generate` functionality def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt, max_new_tokens=10) self.assertEqual( output, [{"generated_text": ("Hello I believe in we we we we we we we we we")}], ) output = generator(prompt, stop_sequence=" we") self.assertEqual(output, [{"generated_text": "Hello I believe in we"}]) def test_generate_non_nlp_input_ids_as_kwarg(self): model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_values_as_encoder_kwarg(self): input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_transition_scores_group_beam_search_encoder_decoder(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, diversity_penalty=1.0, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores_sum = transition_scores.sum(-1) torch.testing.assert_close(transition_scores_sum, outputs.sequences_scores, rtol=1e-3, atol=1e-3) @slow def test_green_red_watermark_generation(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id model_inputs = tokenizer("I will be", return_tensors="pt").to(torch_device) input_len = model_inputs["input_ids"].shape[-1] # generation should work with both input types: WatermarkingConfig or Dict, so let's check it here :) watermark_config = WatermarkingConfig(bias=2.5, seeding_scheme="selfhash") _ = model.generate(**model_inputs, watermarking_config=watermark_config, do_sample=False, max_length=15) # We will not check watermarked text, since we check it in `logits_processors` tests # Checking if generated ids are as expected fails on different hardware args = { "bias": 2.0, "context_width": 1, "seeding_scheme": "selfhash", "greenlist_ratio": 0.25, "hashing_key": 15485863, } output = model.generate(**model_inputs, do_sample=False, max_length=15) output_selfhash = model.generate(**model_inputs, watermarking_config=args, do_sample=False, max_length=15) # Check that the detector is detecting watermarked text detector = WatermarkDetector(model_config=model.config, device=torch_device, watermarking_config=args) detection_out_watermarked = detector(output_selfhash[:, input_len:], return_dict=True) detection_out = detector(output[:, input_len:], return_dict=True) self.assertListEqual(detection_out_watermarked.prediction.tolist(), [True]) self.assertListEqual(detection_out.prediction.tolist(), [False]) """Check the mean bias inserted by the watermarking algorithm.""" @slow def test_synthid_text_watermark_generation_mean_expected_bias(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id model_inputs = tokenizer("I will be", return_tensors="pt").to(torch_device) input_len = 5 batch_size = 200 # generation should work with both input types: WatermarkingConfig or Dict, so let's check it here :) watermark_config = SynthIDTextWatermarkingConfig(keys=[10, 20], ngram_len=5, debug_mode=True) logits_processor = watermark_config.construct_processor(model.config.vocab_size, torch_device) mean_g_values_repeats = [] for _ in range(40): input_ids = torch.zeros( (batch_size, input_len), dtype=torch.int64, device=torch_device, ) model_inputs = { "input_ids": input_ids, "attention_mask": torch.ones_like(input_ids, device=torch_device), } output = model.generate( **model_inputs, watermarking_config=watermark_config, do_sample=True, max_length=500, top_k=1000 ) g_values = logits_processor.compute_g_values(input_ids=output[:, input_len:]) context_repetition_mask = logits_processor.compute_context_repetition_mask( input_ids=output[:, input_len:], ).unsqueeze(dim=2) mean_g_values = torch.masked.mean( g_values, mask=context_repetition_mask, dim=0, keepdim=True, dtype=torch.float64, ) mean_g_values_repeats.append(mean_g_values) mean_g_values = torch.concat(mean_g_values_repeats, dim=0).mean(dim=0) expected_mean_g_value = logits_processor.expected_mean_g_value( vocab_size=model.config.vocab_size, ) atol = 0.03 is_close = torch.isclose( mean_g_values, torch.tensor(expected_mean_g_value, dtype=torch.float64), atol=atol, rtol=0, ) self.assertTrue(torch.all(is_close)) @slow def test_beam_search_example_integration(self): # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((1, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = {"encoder_outputs": model.get_encoder()(encoder_input_ids, return_dict=True)} outputs = model.generate( input_ids, num_beams=num_beams, min_length=5, eos_token_id=model.config.eos_token_id, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_cfg_mixin(self): model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") input = tokenizer(["The dragon flew over Paris,"], return_tensors="pt", return_attention_mask=True) input["input_ids"] = input["input_ids"].to(torch_device) input["attention_mask"] = input["attention_mask"].to(torch_device) outputs = model.generate(**input, max_new_tokens=32, guidance_scale=1.5) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The dragon flew over Paris, landing in the Rue de la Bastille. The crowd was so excited " 'that they had to leave the city.\n\n"We\'re going to Paris!"\n' ], ) neg = tokenizer(["France,"], return_tensors="pt", return_attention_mask=True) neg["input_ids"] = neg["input_ids"].to(torch_device) neg["attention_mask"] = neg["attention_mask"].to(torch_device) outputs = model.generate( **input, max_new_tokens=32, guidance_scale=1.5, negative_prompt_ids=neg["input_ids"], negative_prompt_attention_mask=neg["attention_mask"], ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ 'The dragon flew over Paris, landing on the pavement.\n\n"Paris!"\n\n"Paris!"\n\n"' 'Paris!"\n\n"Paris!"\n\n"Paris!"\n\n' ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((1, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = {"encoder_outputs": model.get_encoder()(encoder_input_ids, return_dict=True)} constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token outputs = model.generate( input_ids, num_beams=num_beams, force_words_ids=[constraint_token_ids], min_length=5, eos_token_id=model.config.eos_token_id, **model_kwargs, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_per_row_stopping_criteria(self): text = [ "They completed the challenging puzzle, revealing the hidden", "Today a dragon flew over France", "The aroma of freshly baked pizza filled the kitchen", ] stop_strings = ["secrets"] model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.padding_side = "left" tokenizer.pad_token_id = tokenizer.eos_token_id input_ids = tokenizer(text, return_tensors="pt", padding="longest", add_special_tokens=False).input_ids.to( torch_device ) # normal generation with one stopping criteria out = model.generate(input_ids, max_length=15) out_text = tokenizer.batch_decode(out) expected_out = [ "They completed the challenging puzzle, revealing the hidden secrets of the world.\n", "<|endoftext|><|endoftext|><|endoftext|>Today a dragon flew over France and the French government was forced", "The aroma of freshly baked pizza filled the kitchen with a sense of freshness", ] self.assertListEqual(out_text, expected_out) # generation should stop at "secrets" for first batch only, filling the rest with eos tokens out = model.generate(input_ids, max_length=15, stop_strings=stop_strings, tokenizer=tokenizer) out_text = tokenizer.batch_decode(out) expected_out = [ "They completed the challenging puzzle, revealing the hidden secrets<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>", "<|endoftext|><|endoftext|><|endoftext|>Today a dragon flew over France and the French government was forced", "The aroma of freshly baked pizza filled the kitchen with a sense of freshness", ] self.assertListEqual(out_text, expected_out) def test_constrained_beam_search_mixin_type_checks(self): tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_batched_decoder_start_id(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) decoder_start_token_id = bart_model.generation_config.decoder_start_token_id decoder_start_token_id_batch = [decoder_start_token_id] * input_ids.shape[0] outputs = bart_model.generate(input_ids, decoder_start_token_id=decoder_start_token_id) outputs_batched_ids = bart_model.generate(input_ids, decoder_start_token_id=decoder_start_token_id_batch) self.assertListEqual(outputs.tolist(), outputs_batched_ids.tolist()) def test_decoder_start_id_from_config(self): # Refer to: (#30899) articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) decoder_start_token_id = bart_model.generation_config.decoder_start_token_id # we should be able to take `decoder_start_token_id` from model's generation config if user passes a `GenerationConfig` type outputs = bart_model.generate(input_ids, generation_config=GenerationConfig(do_sample=False)) # If the generatoin config has no `decoder_start_token_id` or `bos_token_id`, we will raise an error unless user passes it in config bart_model.generation_config.decoder_start_token_id = None bart_model.generation_config.bos_token_id = None outputs_with_user_id = bart_model.generate( input_ids, generation_config=GenerationConfig(do_sample=False, decoder_start_token_id=decoder_start_token_id), ) self.assertListEqual(outputs.tolist(), outputs_with_user_id.tolist()) with self.assertRaises(ValueError): outputs = bart_model.generate(input_ids, generation_config=GenerationConfig(do_sample=False)) def test_contrastive_search_batched(self): # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_logits_processor_not_inplace(self): article = "Today a dragon flew over Paris." model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) out = model.generate(input_ids, output_logits=True, output_scores=True, return_dict_in_generate=True) out_with_temp = model.generate( input_ids, temperature=0.5, do_sample=True, output_logits=True, output_scores=True, return_dict_in_generate=True, ) # if no logits processor is used, scores == logits. Otherwise, the processor has to modify the scores self.assertListEqual(out.logits[-1].tolist(), out.scores[-1].tolist()) self.assertNotEqual(out_with_temp.logits[-1].tolist(), out_with_temp.scores[-1].tolist()) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has TF equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 20 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) # Only some seeds will work both on CPU/GPU for a fixed `expectation` value. # The selected seed is not guaranteed to work on all torch versions. torch.manual_seed(1) eos_token_id = 846 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) torch.manual_seed(1) eos_token_id = [846, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has TF equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) output = bart_model.generate(input_ids).cpu().numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=None, **kwargs): return super().forward(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def forward(self, input_ids, **kwargs): return super().forward(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared).to(torch_device) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).cpu().numpy() with self.assertRaises(TypeError): # FakeEncoder.forward() accepts **kwargs -> no filtering -> type error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar") def test_default_max_length_warning(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.generation_config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Default generation config value of 20 -> emits warning with self.assertWarns(UserWarning): model.generate(input_ids) # Explicitly setting max_length to 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: model.generate(input_ids, max_length=20) self.assertEqual(len(warning_list), 0) # Generation config max_length != 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: # generation_config is modified -> legacy mode is disabled = generation_config takes precedence model.generation_config.max_length = 10 model.generate(input_ids) self.assertEqual(len(warning_list), 0) def test_length_warning_assisted_generation(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.generation_config.pad_token_id = tokenizer.eos_token_id assistant.generation_config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # This should not raise any warning that min length is not feasible in candidate generation with warnings.catch_warnings(record=True) as warning_list: model.generate( input_ids, assistant_model=assistant, min_new_tokens=10, max_length=20, ) self.assertEqual(len(warning_list), 0) def test_default_assisted_generation(self): # Initialize the GenerationConfig object config = GenerationConfig() # Check the default values self.assertEqual(config.num_assistant_tokens, 20) self.assertEqual(config.num_assistant_tokens_schedule, "constant") self.assertEqual(config.assistant_confidence_threshold, 0.4) self.assertEqual(config.is_assistant, False) def test_generated_length_assisted_generation(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.generation_config.pad_token_id = tokenizer.eos_token_id assistant.generation_config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) input_length = input_ids.shape[-1] out = model.generate( input_ids, assistant_model=assistant, min_new_tokens=10, max_new_tokens=20, ) self.assertTrue((10 + input_length) <= out.shape[-1] <= (20 + input_length)) out = model.generate( input_ids, assistant_model=assistant, min_new_tokens=10, ) self.assertTrue((input_length + 10) <= out.shape[-1]) out = model.generate( input_ids, assistant_model=assistant, max_new_tokens=7, ) self.assertTrue(out.shape[-1] <= (input_length + 7)) def test_model_kwarg_assisted_decoding_decoder_only(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.generation_config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with token_type_ids outputs_tti = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), ) with self.assertRaises(AssertionError): self.assertListEqual(outputs_tti.tolist(), outputs_normal.tolist()) # Assistant model assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) assistant.config.pad_token_id = tokenizer.eos_token_id # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_tti.tolist()) def test_assisted_decoding_num_assistant_tokens_heuristic_schedule(self): # This test ensures that the assisted generation num_assistant_tokens 'heuristic' schedule works properly. prompt = "Alice and Bob" checkpoint = "EleutherAI/pythia-160m-deduped" tokenizer = AutoTokenizer.from_pretrained(checkpoint) inputs = tokenizer(prompt, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained(checkpoint) assistant_model = model assistant_model.generation_config.num_assistant_tokens = 5 assistant_model.generation_config.num_assistant_tokens_schedule = "heuristic" generation_kwargs = { "eos_token_id": -1, "max_new_tokens": 5, "do_sample": False, "assistant_model": assistant_model, } model.generate(**inputs, **generation_kwargs) # update_candidate_strategy is called only once and therefore, assistant_model.generation_config.num_assistant_tokens should be either 4 or 7 self.assertTrue(assistant_model.generation_config.num_assistant_tokens in (4, 7)) def test_assisted_decoding_num_assistant_tokens_heuristic_transient_schedule(self): # This test ensures that the assisted generation num_assistant_tokens 'heuristic' schedule works properly. prompt = "Alice and Bob" checkpoint = "EleutherAI/pythia-160m-deduped" tokenizer = AutoTokenizer.from_pretrained(checkpoint) inputs = tokenizer(prompt, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained(checkpoint) assistant_model = model assistant_model.generation_config.num_assistant_tokens = 5 assistant_model.generation_config.num_assistant_tokens_schedule = "heuristic_transient" generation_kwargs = { "eos_token_id": -1, "max_new_tokens": 5, "do_sample": False, "assistant_model": assistant_model, } model.generate(**inputs, **generation_kwargs) # update_candidate_strategy is called once but assistant_model.generation_config.num_assistant_tokens should stay 5 self.assertEqual(assistant_model.generation_config.num_assistant_tokens, 5) @slow def test_validate_assistant(self): # Generate a random sample: inputs = np.random.rand(160000) # Load a main encoder-decoder model: model_id = "openai/whisper-large-v2" processor = AutoProcessor.from_pretrained(model_id) model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, use_safetensors=True, ) model.to(torch_device) # process the input: features = processor(inputs, return_tensors="pt").to(torch_device) # Load an encoder-decoder assistant with same encoder as the main model: assistant_distil_model_id = "distil-whisper/distil-large-v2" assistant_seq_to_seq = AutoModelForSpeechSeq2Seq.from_pretrained( assistant_distil_model_id, use_safetensors=True, ).to(torch_device) self.assertTrue(model.generate(**features, assistant_model=assistant_seq_to_seq).sum()) # Load its decoder only version: assistant_causal_lm = AutoModelForCausalLM.from_pretrained( assistant_distil_model_id, use_safetensors=True, ).to(torch_device) self.assertTrue(model.generate(**features, assistant_model=assistant_causal_lm).sum()) # Load an encoder-decoder assistant with a different encoder than the main model: assistant_distil_model_id = "openai/whisper-tiny" assistant_seq_to_seq = AutoModelForSpeechSeq2Seq.from_pretrained( assistant_distil_model_id, use_safetensors=True, ).to(torch_device) self.assertTrue(model.generate(**features, assistant_model=assistant_seq_to_seq).sum()) # Load its decoder only version: assistant_causal_lm = AutoModelForCausalLM.from_pretrained( assistant_distil_model_id, use_safetensors=True, ).to(torch_device) # It will raise an error as the encoder of the main and assistant model are not compatible: with self.assertRaises(ValueError): model.generate(**features, assistant_model=assistant_causal_lm) # Load an encoder-decoder model with a different tokenizer than the main model: assistant_distil_model_id = "hf-internal-testing/tiny-random-SeamlessM4Tv2ForSpeechToText" assistant_seq_to_seq = AutoModelForSpeechSeq2Seq.from_pretrained( assistant_distil_model_id, ).to(torch_device) # This should raise an error as the main and assistant model don't use the same tokenizer: with self.assertRaises(ValueError): model.generate(**features, assistant_model=assistant_seq_to_seq) def test_compare_unprocessed_logit_scores(self): # Get unprocessed logit scores back from model generate function. # Assert that unprocessed logits from generate() are same as those from modal eval() # tell model to generate text and return unprocessed/unwarped logit scores tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = "generate yes or no: " input_ids = tokenizer([text], return_tensors="pt").input_ids.to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) with torch.no_grad(): # Get logits for the next token from fwd pass logits_fwd = model(input_ids).logits[:, -1, :][0] # Get logits for the next token from generate function outputs = model.generate( input_ids=input_ids, return_dict_in_generate=True, output_logits=True, max_new_tokens=1, do_sample=True, ) logits_gen = outputs.logits[0][0] # assert that unprocessed logits from generate() are same as those from modal eval() torch.testing.assert_allclose(logits_fwd.tolist(), logits_gen.tolist()) def test_return_unprocessed_logit_scores(self): # tell model to generate text and return unprocessed/unwarped logit scores tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = "generate yes or no: " input_ids = tokenizer([text], return_tensors="pt").input_ids.to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) outputs = model.generate( input_ids=input_ids, return_dict_in_generate=True, output_logits=True, max_new_tokens=3 ) # perform dummy check if unpreprocessed logits make sense. # do preselection on high probabilities; find scores of y and n tokens probs_all = torch.nn.functional.softmax(outputs.logits[2][0], dim=-1) indices = torch.argwhere(probs_all > 0.001) indices = indices[:, -1] tokens_max = tokenizer.batch_decode(indices, skip_special_tokens=True) probs_max = probs_all[probs_all > 0.001] self.assertTrue(len(indices) >= 2) next_token_dict = {str(t): p for t, p in zip(tokens_max, probs_max)} self.assertTrue("n" in next_token_dict) self.assertTrue("y" in next_token_dict) y_prob = next_token_dict["y"] n_prob = next_token_dict["n"] self.assertTrue(y_prob > 0.001 and n_prob > 0.001) self.assertTrue(y_prob <= 1.0 and n_prob <= 1.0) @slow @require_torch_multi_accelerator def test_assisted_decoding_in_different_accelerator(self): device_0 = f"{torch_device}:0" if torch_device != "cpu" else "cpu" device_1 = f"{torch_device}:1" if torch_device != "cpu" else "cpu" model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to(device_0) assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to( device_1 ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model.config.pad_token_id = tokenizer.eos_token_id assistant.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) input_length = input_ids.shape[-1] out = model.generate( input_ids, assistant_model=assistant, max_new_tokens=20, ) self.assertTrue(input_length <= out.shape[-1] <= input_length + 20) @slow @require_torch_accelerator def test_assisted_decoding_model_in_accelerator_assistant_in_cpu(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to( torch_device ) assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to( "cpu" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model.config.pad_token_id = tokenizer.eos_token_id assistant.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) input_length = input_ids.shape[-1] out = model.generate( input_ids, assistant_model=assistant, max_new_tokens=20, ) self.assertTrue(input_length <= out.shape[-1] <= input_length + 20) def test_special_tokens_fall_back_to_model_default(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to( torch_device ) test_bos_id = 50 # Sanity-check: the model has a BOS token set, and the first generated token is a BOS token gen_output = model.generate() self.assertTrue(model.generation_config.bos_token_id is not None) self.assertTrue(model.generation_config.bos_token_id == gen_output[0, 0]) # If we pass a generation config **with** a BOS token, `generate` will use it generation_config = GenerationConfig(bos_token_id=test_bos_id) gen_output = model.generate(generation_config=generation_config) self.assertFalse(model.generation_config.bos_token_id == gen_output[0, 0]) self.assertTrue(generation_config.bos_token_id == gen_output[0, 0]) self.assertTrue(test_bos_id == gen_output[0, 0]) # If we pass a generation config **without** a BOS token, `generate` will fetch the BOS token from # `model.generation_config` generation_config = GenerationConfig(bos_token_id=None) gen_output = model.generate(generation_config=generation_config) self.assertTrue(model.generation_config.bos_token_id == gen_output[0, 0]) self.assertFalse(test_bos_id == gen_output[0, 0]) self.assertTrue(generation_config.bos_token_id is None) # Changing `model.generation_config` will affect fallback behavior model.generation_config.bos_token_id = test_bos_id gen_output = model.generate(generation_config=generation_config) self.assertTrue(model.generation_config.bos_token_id == gen_output[0, 0]) self.assertTrue(test_bos_id == gen_output[0, 0]) self.assertTrue(generation_config.bos_token_id is None) def test_speculative_decoding_equals_regular_decoding(self): draft_name = "double7/vicuna-68m" target_name = "Qwen/Qwen2-0.5B-Instruct" draft_model = AutoModelForCausalLM.from_pretrained(draft_name) target_model = AutoModelForCausalLM.from_pretrained(target_name) assistant_tokenizer = AutoTokenizer.from_pretrained(draft_name) target_tokenizer = AutoTokenizer.from_pretrained(target_name) prompt_size = torch.randint(low=20, high=100, size=(1,)) max_new_tokens = torch.randint(low=10, high=50, size=(1,)) input_ids = (torch.rand(1, prompt_size[0]) * 100).to(int) + 50 max_new_tokens_item = max_new_tokens[0].item() expected_out = target_model.generate(input_ids, do_sample=False, max_new_tokens=max_new_tokens_item) predicted_out = target_model.generate( input_ids, do_sample=False, max_new_tokens=max_new_tokens_item, assistant_model=draft_model, tokenizer=target_tokenizer, assistant_tokenizer=assistant_tokenizer, ) self.assertEqual(expected_out.shape, predicted_out.shape) self.assertTrue((expected_out == predicted_out).all().item()) @pytest.mark.generate @require_torch_multi_accelerator def test_generate_with_static_cache_multi_accelerator(self): """ Tests if the static cache has been set correctly and if generate works correctly when we are using multi-acceleratorss. """ # need to split manually as auto doesn't work well with unbalanced model device_map = {"model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 1, "model.norm": 1, "lm_head": 0} model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) generation_kwargs = { "max_new_tokens": 20, "cache_implementation": "static", "return_dict_in_generate": True, # Required to return `past_key_values` } results = model.generate(input_ids, **generation_kwargs) self.assertTrue(isinstance(results.past_key_values, StaticCache)) # check device of each layer keys_0 = results.past_key_values.layers[0].keys values_0 = results.past_key_values.layers[0].values self.assertTrue(keys_0.device == values_0.device == torch.device(0)) keys_1 = results.past_key_values.layers[1].keys values_1 = results.past_key_values.layers[1].values self.assertTrue(keys_1.device == values_1.device == torch.device(1)) @pytest.mark.generate @require_torch_multi_accelerator def test_generate_multi_accelerator_causal_mask(self): """ Tests that cache position device doesn't clash with causal mask device when we are using multi-accelerators. In real life happens only when multimodal encoder size is big, so `embed_tokens` gets allocated to the next device. The error will be triggered whenever a bacthed input is used, so that `causal_mask` is actually prepared instead of being `None`. """ # need to split manually as auto doesn't work well with unbalanced model device_map = { "visual": 0, "model.embed_tokens": 1, "model.layers.0": 1, "model.layers.1": 1, "model.rotary_emb": 1, "model.norm.weight": 1, "lm_head": 1, } model = AutoModelForImageTextToText.from_pretrained( "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration", device_map=device_map ) processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") text = ["Hello world", "Today I went to the supermarket to buy"] inputs = processor(text=text, padding=True, return_tensors="pt").to(torch_device) _ = model.generate(**inputs, max_new_tokens=20) @pytest.mark.generate @require_torch_multi_accelerator def test_init_static_cache_multi_accelerator(self): """ Tests if the static cache has been set correctly when we initialize it manually in a multi-accelerator setup. """ # need to split manually as auto doesn't work well with unbalanced model device_map = {"model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 1, "model.norm": 1, "lm_head": 0} model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) generation_kwargs = { "max_new_tokens": 20, "return_dict_in_generate": True, # Required to return `past_key_values` } # TODO: We need to raise a warning in case the cache is not set correctly # with self.assertRaisesRegex(ValueError, "If you are manually initializing the cache"): # past_key_values = StaticCache( # config=model.config, max_batch_size=1, max_cache_len=30, device=torch_device, dtype=model.dtype # ) # results = model.generate(input_ids, past_key_values=past_key_values, **generation_kwargs) past_key_values = StaticCache(config=model.config, max_cache_len=30) results = model.generate(input_ids, past_key_values=past_key_values, **generation_kwargs) # check device of each layer keys_0 = results.past_key_values.layers[0].keys values_0 = results.past_key_values.layers[0].values self.assertTrue(keys_0.device == values_0.device == torch.device(0)) keys_1 = results.past_key_values.layers[1].keys values_1 = results.past_key_values.layers[1].values self.assertTrue(keys_1.device == values_1.device == torch.device(1)) @slow def test_padding_input_contrastive_search_gpt2(self): # Load the pre-trained GPT-2 model and tokenizer model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") model.to(torch_device) tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2", clean_up_tokenization_spaces=True) # Set the tokenizer to left-pad the sequences tokenizer.padding_side = "left" # Define the PAD token as the EOS token tokenizer.pad_token = tokenizer.eos_token model.generation_config.pad_token_id = model.generation_config.eos_token_id # Define the input prompt prompt_text = "The whispered legends of the haunted mansion spoke" # Tokenize the input prompt encoded_prompt = tokenizer(prompt_text, return_tensors="pt", padding=True) input_ids = encoded_prompt.input_ids.to(torch_device) attention_mask = encoded_prompt.attention_mask.to(torch_device) # Define the contrastive search params penalty_alpha = 0.6 top_k = 4 # Define the padding length to add to the input IDs and attention mask padding_length = 10 # Generate text without padding outputs = model.generate( input_ids=input_ids, attention_mask=attention_mask, do_sample=False, penalty_alpha=penalty_alpha, top_k=top_k, max_new_tokens=64, ) generated_text_no_padding = tokenizer.decode(outputs[0], skip_special_tokens=True) # Pad the input IDs and attention mask on the left padded_input_ids = F.pad( input_ids, (padding_length, 0), "constant", value=model.generation_config.pad_token_id ) padded_attention_mask = F.pad(attention_mask, (padding_length, 0), "constant", value=0) # Generate text with padded inputs outputs_with_padding = model.generate( input_ids=padded_input_ids, attention_mask=padded_attention_mask, do_sample=False, penalty_alpha=penalty_alpha, top_k=top_k, max_new_tokens=64, ) generated_text_with_padding = tokenizer.decode(outputs_with_padding[0], skip_special_tokens=True) # Assert that the generated texts are identical for padded and non-padded inputs self.assertEqual(generated_text_no_padding, generated_text_with_padding) self.assertEqual( generated_text_with_padding, 'The whispered legends of the haunted mansion spoke of the "souls of the dead" who were "falling ' 'out of the sky" and "falling into the sea."\n\nThe ghostly apparitions were said to have been ' 'created by the spirits of the dead, who were "falling out of the sky" and "falling into the sea', ) @slow def test_padding_input_contrastive_search_t5(self): # Load the pre-trained T5 model and tokenizer model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small") model.to(torch_device) tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small", clean_up_tokenization_spaces=True) # Define the input prompt prompt_text = "translate English to German: I need to finish this task before the end of the day." # Tokenize the input prompt encoded_prompt = tokenizer(prompt_text, return_tensors="pt") input_ids = encoded_prompt.input_ids.to(torch_device) attention_mask = encoded_prompt.attention_mask.to(torch_device) # Define the decoder prompt decoder_prompt_text = "Ich muss diese Aufgabe" encoded_decoder_prompt = tokenizer(decoder_prompt_text, add_special_tokens=False, return_tensors="pt") decoder_input_ids = encoded_decoder_prompt.input_ids.to(torch_device) decoder_attention_mask = encoded_decoder_prompt.attention_mask.to(torch_device) # Define the contrastive search params penalty_alpha = 0.6 top_k = 4 # Generate text without padding outputs = model.generate( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, do_sample=False, penalty_alpha=penalty_alpha, top_k=top_k, max_new_tokens=64, ) generated_text_no_padding = tokenizer.decode(outputs[0], skip_special_tokens=True) # Define the padding length to add to the input IDs and attention mask padding_length = 10 # Pad the decoder input IDs and attention mask on the left padded_decoder_input_ids = F.pad( decoder_input_ids, (padding_length, 0), "constant", value=model.generation_config.pad_token_id ) padded_decoder_attention_mask = F.pad(decoder_attention_mask, (padding_length, 0), "constant", value=0) # Since the decoder_start_token_id is the same as the pad_token_id, # the last padded token represents the decoder start token. # Set the attention mask for the decoder_start_token_id to True (1). padded_decoder_attention_mask[:, padding_length - 1] = 1 # Generate text with padded inputs outputs_with_padding = model.generate( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=padded_decoder_input_ids, decoder_attention_mask=padded_decoder_attention_mask, do_sample=False, penalty_alpha=penalty_alpha, top_k=top_k, max_new_tokens=64, ) generated_text_with_padding = tokenizer.decode(outputs_with_padding[0], skip_special_tokens=True) # Assert that the generated texts are identical for padded and non-padded inputs self.assertEqual(generated_text_no_padding, generated_text_with_padding) self.assertEqual(generated_text_no_padding, "Ich muss diese Aufgabe vor Ende des Tages beenden.") def test_prepare_inputs_for_generation_decoder_llm(self): """Tests GenerationMixin.prepare_inputs_for_generation against expected usage with decoder-only llms.""" config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") model = model.to(torch_device) # 1. Sanity check: the model's `prepare_inputs_for_generation` comes from `GenerationMixin` self.assertTrue("GenerationMixin" in str(model.prepare_inputs_for_generation)) # 2. If we pass input ids by themselves, we should get back the same input ids input_ids = torch.tensor([[1, 2, 3], [4, 5, 6]]).to(torch_device) model_inputs = model.prepare_inputs_for_generation(input_ids) self.assertTrue(torch.all(model_inputs["input_ids"] == input_ids)) # 3. If we pass the attention mask too, we will get back the attention mask and position ids built from it attention_mask = torch.tensor([[1, 1, 1], [1, 1, 1]]).to(torch_device) model_inputs = model.prepare_inputs_for_generation(input_ids, attention_mask=attention_mask) self.assertTrue(torch.all(model_inputs["attention_mask"] == attention_mask)) self.assertTrue(model_inputs["position_ids"].shape == input_ids.shape) # 4. `use_cache` (and other kwargs) are forwarded self.assertFalse("use_cache" in model_inputs) # From the previous input, there is no `use_cache` model_inputs = model.prepare_inputs_for_generation(input_ids, use_cache=True, foo="bar") self.assertTrue(model_inputs["use_cache"] is True) self.assertTrue(model_inputs["foo"] == "bar") # 5. When we pass a cache, we discard data related to already seen tokens in some tensors. We are now also # forced to pass a correctly prepared `cache_positions` to slice the data accordingly. init_input_ids = input_ids[:, :2] dynamic_cache = DynamicCache() dynamic_cache = model(init_input_ids, past_key_values=dynamic_cache).past_key_values with self.assertRaises(AttributeError): # past_key_values + no cache_position -> exception model_inputs = model.prepare_inputs_for_generation(input_ids, past_key_values=dynamic_cache) cache_position = torch.arange(input_ids.shape[-1], dtype=torch.long).to(torch_device) cache_position = cache_position[dynamic_cache.get_seq_length() :] model_inputs = model.prepare_inputs_for_generation( input_ids, past_key_values=dynamic_cache, cache_position=cache_position, attention_mask=attention_mask ) self.assertTrue("past_key_values" in model_inputs) self.assertTrue(torch.all(model_inputs["cache_position"] == cache_position)) self.assertTrue(model_inputs["input_ids"].shape[-1] == 1) # 1 = 3 fed tokens - 2 tokens in the cache self.assertTrue(model_inputs["position_ids"].shape[-1] == 1) self.assertTrue(model_inputs["attention_mask"].shape[-1] == 3) # we still need the full attention mask! # 6. If we pass a `static_cache`, the attention mask will be prepared as a static shape 4D mask max_cache_len = 10 batch_size = 2 query_length = input_ids.shape[-1] - init_input_ids.shape[-1] static_cache = StaticCache(config=config, max_cache_len=max_cache_len) static_cache = model(init_input_ids, past_key_values=static_cache).past_key_values model_inputs = model.prepare_inputs_for_generation( input_ids, past_key_values=static_cache, cache_position=cache_position, attention_mask=attention_mask ) self.assertTrue("past_key_values" in model_inputs) self.assertTrue(list(model_inputs["attention_mask"].shape) == [batch_size, 1, query_length, max_cache_len]) # 7. We can also pass `inputs_embeds` as the embedded prompt. Because `generate` will append its result to # `input_ids` and the models will only accept one of the two inputs (`input_ids` or `inputs_embeds`), we # a) must use the cache b) must expect `input_ids` after the prompt is processed init_inputs_embeds = model.get_input_embeddings()(init_input_ids) init_cache_positions = torch.arange(init_input_ids.shape[-1], dtype=torch.long).to(torch_device) empty_cache = DynamicCache() # Prompt processing model_inputs = model.prepare_inputs_for_generation( init_input_ids, past_key_values=empty_cache, inputs_embeds=init_inputs_embeds, cache_position=init_cache_positions, ) self.assertTrue(model_inputs["input_ids"] is None) self.assertTrue(model_inputs["inputs_embeds"] is not None) # After prompt processing model_inputs = model.prepare_inputs_for_generation( input_ids, past_key_values=dynamic_cache, inputs_embeds=init_inputs_embeds, cache_position=cache_position ) self.assertTrue(model_inputs["input_ids"] is not None) self.assertTrue(model_inputs["inputs_embeds"] is None) def test_prepare_inputs_for_generation_encoder_decoder_llm(self): """ Same as `test_prepare_inputs_for_generation_decoder_llm` but for encoder-decoder models. Main difference: we should look for `decoder_input_ids`, instead of `input_ids`. """ model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") model = model.to(torch_device) # 1. Sanity check: the model's `prepare_inputs_for_generation` comes from `GenerationMixin` self.assertTrue("GenerationMixin" in str(model.prepare_inputs_for_generation)) # 2. If we pass input ids by themselves, we should get back the same input ids -- with the encoder-decoder key decoder_input_ids = torch.tensor([[1, 2, 3], [4, 5, 6]]).to(torch_device) model_inputs = model.prepare_inputs_for_generation(decoder_input_ids) self.assertTrue(torch.all(model_inputs["decoder_input_ids"] == decoder_input_ids)) # 3. If we pass the attention mask too, we will get back the attention mask. Encoder-decoder models usually # don't use `position_ids` decoder_attention_mask = torch.tensor([[1, 1, 1], [1, 1, 1]]).to(torch_device) model_inputs = model.prepare_inputs_for_generation( decoder_input_ids, decoder_attention_mask=decoder_attention_mask ) self.assertTrue(torch.all(model_inputs["decoder_attention_mask"] == decoder_attention_mask)) self.assertTrue("position_ids" not in model_inputs) # 4. `use_cache` (and other kwargs, like the encoder outputs) are forwarded self.assertFalse("use_cache" in model_inputs) # From the previous input, there is no `use_cache` model_inputs = model.prepare_inputs_for_generation(decoder_input_ids, use_cache=True, encoder_outputs="foo") self.assertTrue(model_inputs["use_cache"] is True) self.assertTrue(model_inputs["encoder_outputs"] == "foo") # See the decoder-only test for more corner cases. The code is the same, so we don't repeat it here. @pytest.mark.torch_compile_test def test_generate_compile_fullgraph_tiny(self): """ Tests that we can call end-to-end generation with a tiny model (i.e. doesn't crash) NOTE: this test is quite slow (~20s on a consumer desktop), but it is important that we keep it as part of the non-slow tests to prevent regressions! """ model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-LlamaForCausalLM", dtype=torch.bfloat16, device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") # compile generate compiled_generate = torch.compile(model.generate, fullgraph=True, mode="reduce-overhead") # compiled generate does NOT accept parameterization except a) model inputs b) a generation config generation_config = copy.deepcopy(model.generation_config) generation_config.pad_token_id = model.config.eos_token_id model_inputs = tokenizer(["Write a poem about the market crashing in summer"], return_tensors="pt") model_inputs = model_inputs.to(model.device) gen_out = compiled_generate(**model_inputs, generation_config=generation_config) self.assertTrue(gen_out.shape[1] > model_inputs["input_ids"].shape[1]) # some text was generated @require_read_token @slow def test_assisted_generation_early_exit(self): """ Tests that assisted generation with early exit works as expected. Under the hood, this has complex cache manipulation, which will cause the test to fail if something goes wrong there. """ expected_output = "Alice and Bob are playing a game of poker. Alice has a pair of 8s and Bob has a pair" prompt = "Alice and Bob" checkpoint = "facebook/layerskip-llama3.2-1B" tokenizer = AutoTokenizer.from_pretrained(checkpoint) inputs = tokenizer(prompt, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained(checkpoint).to(torch_device) original_outputs = model.generate(**inputs, do_sample=False, max_new_tokens=20) original_decoded = tokenizer.batch_decode(original_outputs, skip_special_tokens=True) self.assertEqual(original_decoded, [expected_output]) outputs_assisted = model.generate(**inputs, assistant_early_exit=4, do_sample=False, max_new_tokens=20) decoded_assisted = tokenizer.batch_decode(outputs_assisted, skip_special_tokens=True) self.assertEqual(decoded_assisted, [expected_output]) @slow def test_beam_search_advanced_stopping_criteria(self): """ Tests that beam search works with a stopping criteria that is not max length or EOS token. Prior to the beam search vectorization PR (#35802), beam search was not accepting other stopping criteria. Test inspired on the original issue (#34843). """ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct").to(torch_device) prompt = ( "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. " "How many clips did Natalia sell altogether in April and May?" ) tokens = tokenizer(prompt, return_tensors="pt").to(torch_device) generation_config = GenerationConfig(num_beams=3, do_sample=False, length_penalty=1.0, max_new_tokens=100) # This particular prompt should result in a ":" being present in the answer out = model.generate(**tokens, generation_config=generation_config, tokenizer=tokenizer) output_text = tokenizer.decode(out[0], skip_special_tokens=True) last_non_special_token_decoded = tokenizer.decode(out[out != tokenizer.pad_token_id][-1]) self.assertTrue(":" in output_text) self.assertFalse(":" in output_text[-5:]) self.assertFalse(":" in last_non_special_token_decoded) # Adding an advanced stopping criteria: text generation should stop when a ":" is generated. # Note that: # 1 - the text up to ":" doesn't have to be the same, it can belong to a different beam # 2 - ":" may not be the last char, but it must be in the last non-special token generation_config.stop_strings = ":" out = model.generate(**tokens, generation_config=generation_config, tokenizer=tokenizer) output_text = tokenizer.decode(out[0], skip_special_tokens=True) last_non_special_token_decoded = tokenizer.decode(out[out != tokenizer.pad_token_id][-1]) self.assertTrue(":" in output_text) self.assertTrue(":" in output_text[-5:]) self.assertTrue(":" in last_non_special_token_decoded) def test_max_time(self): tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) MAX_TIME = 0.1 MAX_LENGTH = 64 # sampling on start = datetime.datetime.now() model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=MAX_LENGTH) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) # sampling off start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=MAX_LENGTH) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) # beam search start = datetime.datetime.now() model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=MAX_LENGTH) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) # sanity check: no time limit start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=None, max_length=MAX_LENGTH) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) def test_validate_generation_inputs(self): """Tests validation of inputs to `generate`""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs) # however, valid model_kwargs are accepted valid_model_kwargs = {"attention_mask": torch.tensor(np.zeros_like(input_ids))} model.generate(input_ids, **valid_model_kwargs) def test_custom_logits_processor(self): """Tests that custom logits processors can be used in `generate`, and that redundant arguments are caught.""" bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart", min_length=1) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids logits_processor = LogitsProcessorList() logits_processor.append(MinLengthLogitsProcessor(min_length=10, eos_token_id=0)) # it should not be allowed to both define `min_length` via config and `logits_processor` list with self.assertRaises(ValueError): bart_model.generate(input_ids, logits_processor=logits_processor, min_length=10) bart_model.generate(input_ids, logits_processor=logits_processor) def test_transition_scores_greedy_search(self): """Test that `compute_transition_scores` is working as expected with gready search""" articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") model.generation_config.eos_token_id = None input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores) transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-57.8844, -60.45698, -70.16364, -65.50791, -66.35648], [-54.417572, -60.216614, -62.661243, -58.621933, -58.298683], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_greedy_search_normalized(self): """ Test that `compute_transition_scores` is working as expected with gready search, with `normalize_logits=True` """ articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") model.generation_config.eos_token_id = None input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True) transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-2.538938, -2.2694316, -2.1580915, -1.572299, -2.6719835], [-1.8826028, -2.2461371, -1.7556462, -2.9644494, -1.7996008], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder(self): """ Test that `compute_transition_scores` is working as expected with beam search and encoder-decoder models """ articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart") input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder_with_eos(self): """ Test that `compute_transition_scores` is working as expected with beam search and encoder-decoder models, when an EOS token is defined """ articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart") input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_length=10, num_beams=4, num_return_sequences=2, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_decoder_only(self): """ Test that `compute_transition_scores` is working as expected with beam search and decoder-only models """ articles = [ "Justin Timberlake", "Michael Phelps", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_length=10, num_beams=4, num_return_sequences=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) @slow def test_transition_scores_early_stopping(self): """ Test that `compute_transition_scores` is working as expected with beam search and early stopping This is an aggressive test that makes sure that `beam_search's` transition scores are computed correctly for varying `num_return_sequences`, `num_beams` and `batch_size > 1` 2 x input_ids for "question: How are you? \n context: I had a long day, " """ input_ids = torch.tensor(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]]) model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids, max_length=10, return_dict_in_generate=True, output_scores=True, forced_eos_token_id=model.config.eos_token_id, num_beams=4, do_sample=False, num_return_sequences=3, length_penalty=0.0, ) transition_scores = model.compute_transition_scores( sequences=outputs.sequences, scores=outputs.scores, beam_indices=outputs.beam_indices ) transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores)) def test_encoder_decoder_generate_attention_mask(self): """ Test that `generate` automagically creates the correct `attention_mask` for encoder-decoder models (which has a different keyword) """ articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") # need extreme generation values here to force this test # to fail when `attention_mask` is not correctly treated in generate model = AutoModelForSeq2SeqLM.from_pretrained( "hf-internal-testing/tiny-random-bart", ) model.config.eos_token_id = None input_ids = tokenizer(articles[0], return_tensors="pt").input_ids input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) input_ids_batched = input_ids_batched.to(torch_device) generate_kwargs = { "return_dict_in_generate": True, "output_scores": True, "max_length": 50, "num_beams": 5, "num_return_sequences": 5, } output_sequences_batched = model.generate(input_ids=input_ids_batched, **generate_kwargs) output_sequences = model.generate(input_ids=input_ids, **generate_kwargs) batched_out = output_sequences_batched.sequences_scores out = output_sequences.sequences_scores batched_out = batched_out.cpu().numpy() out = out.cpu().numpy() diff = np.abs(np.sum(batched_out[:5]) - np.sum(out)) self.assertTrue(diff < 1e-4) def test_generate_input_ids_as_kwarg(self): """Test that `input_ids` work equally as a positional and keyword argument in decoder-only models""" article = "I need input_ids to generate" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15) input_ids = tokenizer(article, return_tensors="pt").input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 15)) def test_generate_input_ids_as_encoder_kwarg(self): """Test that `input_ids` work equally as a positional and keyword argument in encoder-decoder models""" article = "Justin Timberlake and Jessica Biel, welcome to parenthood." tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart") model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors="pt").input_ids model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids, max_length=5) output_sequences = model.generate(input_ids, max_length=5) output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 5)) def test_generate_inputs_and_encoder_kwargs(self): """ Test that an exception is thrown if the main tensor (`input_ids` in LLMs) is passed as both a positional and keyword argument """ article = "I need input_ids to generate" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10) input_ids = tokenizer(article, return_tensors="pt").input_ids with self.assertRaises(ValueError): model.generate(input_ids, input_ids=input_ids) def test_generate_too_many_encoder_kwargs(self): """Test that passing redundant inputs results in an exception (`input_ids` and `inputs_embeds` in LLMs)""" article = "I need input_ids to generate" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=10) input_ids = tokenizer(article, return_tensors="pt").input_ids with self.assertRaises(ValueError): model.generate(input_ids=input_ids, inputs_embeds=input_ids) def test_generate_input_features_as_encoder_kwarg(self): """Test that non-`input_ids` main model inputs are correctly handled as positional arguments""" input_features = floats_tensor((3, 80, 60)) model = AutoModelForSpeechSeq2Seq.from_pretrained( "hf-internal-testing/tiny-random-WhisperForConditionalGeneration" ) input_features.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(input_features=input_features, max_length=5) output_sequences = model.generate(input_features, max_length=5) output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (3, 5)) def test_generate_encoder_outputs_attention_mask(self): """Test that `generate` can handle attention masks when the encoder outputs are passed""" input_features = floats_tensor((3, 80, 60)) attention_mask = torch.randint(0, 2, input_features.shape).to(torch_device) model = AutoModelForSpeechSeq2Seq.from_pretrained( "hf-internal-testing/tiny-random-WhisperForConditionalGeneration" ) input_features = input_features.to(torch_device) attention_mask = attention_mask.to(torch_device) model = model.to(torch_device) encoder = model.get_encoder() encoder_outputs = encoder(input_features) output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs) output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask) output_sequences_no_mask = output_sequences_no_mask.cpu().numpy() output_sequences_with_mask = output_sequences_with_mask.cpu().numpy() self.assertFalse(np.array_equal(output_sequences_no_mask, output_sequences_with_mask)) def test_eos_token_id_int_and_list_greedy_search(self): """Test that `generate` can handle multiple EOS tokens""" generation_kwargs = { "do_sample": False, "num_beams": 1, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_generate_vision2text_conditioning(self): """Test that `decoder_input_ids` can be used to condition the generation in vision-to-text models""" pixel_values = floats_tensor((2, 3, 30, 30)) conditioning_input = torch.tensor([[10], [10]]) # this should be the 2nd output token, after the BOS token model = AutoModelForVision2Seq.from_pretrained( "hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2" ) pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) conditioning_input = conditioning_input.to(torch_device) # we can condition on decoder_input_ids (expected decoder input) and input_ids (which we pipe internally as # decoder_input_ids, if the encoder is not a model with text input) output_sequences_decoder_input_ids = model.generate( pixel_values, max_length=5, decoder_input_ids=conditioning_input ) output_sequences_input_ids = model.generate(pixel_values, max_length=5, input_ids=conditioning_input) output_sequences_decoder_input_ids = output_sequences_decoder_input_ids.cpu().numpy() output_sequences_input_ids = output_sequences_input_ids.cpu().numpy() conditioning_input = conditioning_input.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_decoder_input_ids, output_sequences_input_ids)) self.assertTrue(np.array_equal(output_sequences_decoder_input_ids[:, 1:2], conditioning_input)) @require_read_token @slow @require_torch_accelerator def test_cache_device_map_with_vision_layer_device_map(self): """ Test that the cache device map is correctly set when the vision layer has a device map. Regression test for #36942 """ # gemma 3 uses hybrid cache, which can be compiled -> needs a device map at allocation time model_id = "google/gemma-3-4b-it" # important part of this device map: the `.layers.` pattern is NOT present in the decoder device_map = { "vision_tower.vision_model.embeddings": 0, "vision_tower.vision_model.encoder.layers.0": 0, "vision_tower.vision_model.encoder.layers.1": 0, "vision_tower.vision_model.encoder.layers.2": 0, "vision_tower.vision_model.encoder.layers.3": 0, "vision_tower.vision_model.encoder.layers.4": 0, "vision_tower.vision_model.encoder.layers.5": 0, "vision_tower.vision_model.encoder.layers.6": 0, "vision_tower.vision_model.encoder.layers.7": 0, "vision_tower.vision_model.encoder.layers.8": 0, "vision_tower.vision_model.encoder.layers.9": 0, "vision_tower.vision_model.encoder.layers.10": 0, "vision_tower.vision_model.encoder.layers.11": 0, "vision_tower.vision_model.encoder.layers.12": 0, "vision_tower.vision_model.encoder.layers.13": 0, "vision_tower.vision_model.encoder.layers.14": "cpu", "vision_tower.vision_model.encoder.layers.15": "cpu", "vision_tower.vision_model.encoder.layers.16": "cpu", "vision_tower.vision_model.encoder.layers.17": "cpu", "vision_tower.vision_model.encoder.layers.18": "cpu", "vision_tower.vision_model.encoder.layers.19": "cpu", "vision_tower.vision_model.encoder.layers.20": "cpu", "vision_tower.vision_model.encoder.layers.21": "cpu", "vision_tower.vision_model.encoder.layers.22": "cpu", "vision_tower.vision_model.encoder.layers.23": "cpu", "vision_tower.vision_model.encoder.layers.24": "cpu", "vision_tower.vision_model.encoder.layers.25": "cpu", "vision_tower.vision_model.encoder.layers.26": "cpu", "vision_tower.vision_model.post_layernorm": "cpu", "multi_modal_projector": "cpu", "language_model": "cpu", } model = AutoModelForImageTextToText.from_pretrained(model_id, device_map=device_map, dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(["This is a text input"], return_tensors="pt").to(model.device) # If the generate doesn't infer the DECODER device map correctly, this will fail _ = model.generate(**inputs, max_new_tokens=2, do_sample=False) @require_torch_accelerator @pytest.mark.torch_compile_test def test_cpu_offload_doesnt_compile(self): """Test that CPU offload doesn't trigger compilation""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") tokenized_inputs = tokenizer(["Hello world"], return_tensors="pt") generate_kwargs = {"max_new_tokens": 3, "cache_implementation": "static"} # Sanity check: if we don't specify a device map, the model will get compiled model_gpu = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto" ) input_ids = tokenized_inputs.input_ids.to(model_gpu.device) _ = model_gpu.generate(input_ids, **generate_kwargs) self.assertTrue(hasattr(model_gpu, "_compiled_call")) # If we specify a device map, the model will not be compiled # (as of April 2025, compiling with CPU offload results in a crash) device_map = { "model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": "cpu", "model.norm": "cpu", "lm_head": 0, } model_cpu = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map ) input_ids = tokenized_inputs.input_ids.to(model_cpu.device) _ = model_cpu.generate(input_ids, **generate_kwargs) self.assertFalse(hasattr(model_cpu, "_compiled_call")) def test_custom_generate_from_argument_in_generate(self): """Tests that the `custom_generate` argument is used when passed to `generate`""" model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device) # Note: `transformers-community/custom_generate_example` has a custom decoding method with a `left_padding` # argument (int), which prepends as many pad tokens. gen_out = model.generate( **model_inputs, left_padding=5, max_new_tokens=5, custom_generate="transformers-community/custom_generate_example", trust_remote_code=True, ) text_output = tokenizer.decode(gen_out[0]) self.assertTrue(text_output.startswith("<unk><unk><unk><unk><unk>")) # <unk> is the pad token def test_custom_generate_from_model_repo_with_custom_generate_code(self): """ Tests that models from model repos containing custom generation code override `generate` with the custom code """ model = AutoModelForCausalLM.from_pretrained( "transformers-community/custom_generate_example", device_map="auto", trust_remote_code=True ) generate_signature = inspect.signature(model.generate) # `left_padding` is a custom argument, doesn't exist in the base `generate` method self.assertTrue(generate_signature.parameters.get("left_padding")) def test_custom_generate_bad_requirements(self): """Tests that we check the `requirements.txt` file from custom generation repos""" model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device) with self.assertRaises(ImportError): # Note: `transformers-community/custom_generate_bad_requirements` has a `requirements.txt` with # impossible requirements model.generate( **model_inputs, custom_generate="transformers-community/custom_generate_bad_requirements", trust_remote_code=True, ) def test_custom_generate_requires_trust_remote_code(self): """Tests that `trust_remote_code` is required when using `custom_generate`""" # Case 1: A model from a repo containing custom generation code must be loaded with `trust_remote_code` with self.assertRaises(ValueError): AutoModelForCausalLM.from_pretrained("transformers-community/custom_generate_example", device_map="auto") # Case 2: Using the `custom_generate` argument in `generate` requires `trust_remote_code` if the code is not # local model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device) with self.assertRaises(ValueError): model.generate(**model_inputs, custom_generate="transformers-community/custom_generate_example") def test_custom_generate_local_directory(self): """Tests that custom_generate works with local directories containing importable relative modules""" with tempfile.TemporaryDirectory() as tmp_dir: custom_generate_dir = Path(tmp_dir) / "custom_generate" custom_generate_dir.mkdir() with open(custom_generate_dir / "generate.py", "w") as f: f.write("from .helper import ret_success\ndef generate(*args, **kwargs):\n return ret_success()\n") with open(custom_generate_dir / "helper.py", "w") as f: f.write('def ret_success():\n return "success"\n') model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device) value = model.generate( **model_inputs, custom_generate=str(tmp_dir), trust_remote_code=True, ) assert value == "success" def test_custom_generate_callable(self): """Tests that passing a callable to `custom_generate` executes the callable decoding loop""" model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device) def custom_loop(model, input_ids, logits_processor, stopping_criteria, generation_config, **model_kwargs): # Check that generate() correctly prepares the stopping criteria assert stopping_criteria[0].max_length == input_ids.shape[1] + 3 return "callable_success" value = model.generate( **model_inputs, max_new_tokens=3, custom_generate=custom_loop, ) self.assertEqual(value, "callable_success") @pytest.mark.generate def test_generate_custom_cache_position(self): """ Regression test for #39261. Tests that we can continue generating from past key values, returned from a previous `generate` call, without the tokens that correspond to the cached part. This is achieved by passing manually creating `cache_position` -- this tests that it is piped correctly. """ model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device) generate_kwargs = { "use_cache": True, "do_sample": False, "return_dict_in_generate": True, "output_scores": True, } # Traditional way to continue generating text using kv cache # output2 # /~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ # input2 # /~~~~~~~~~~~~~~~~~~~~~~~~\ # output1 # /~~~~~~~~~~~~~~~~\ # input1 # /~~~~~~\ # IIIIIIIIOOOOOOOOOOIIIIIIIIOOOOOOOOOOOOOOOOOO inputs_1a = model_inputs outputs_1a = model.generate(**inputs_1a, **generate_kwargs, max_new_tokens=2) inputs_2a = {**model_inputs} inputs_2a["input_ids"] = torch.cat((outputs_1a.sequences, model_inputs["input_ids"]), dim=1) inputs_2a["attention_mask"] = torch.nn.functional.pad( inputs_1a["attention_mask"], (0, inputs_2a["input_ids"].shape[1] - inputs_1a["input_ids"].shape[1]), mode="constant", value=1, ) inputs_2a["past_key_values"] = outputs_1a.past_key_values outputs_2a = model.generate(**inputs_2a, **generate_kwargs, max_new_tokens=2) # Keep only the part of the output related to the second output + last token from the first output, for future # comparison traditional_outputs = copy.deepcopy(outputs_2a) traditional_outputs.sequences = traditional_outputs.sequences[:, outputs_1a.sequences.shape[1] - 1 :] # Continue generating text using kv cache, but without providing the cached part of the input in the input_ids. # cache_position # /~~~~~~~\ # inputs2["attention_mask"] # /~~~~~~~~~~~~~~~~~~~~~~~~~\ # output1 output2 # /~~~~~~~~~~~~~~~~\/~~~~~~~~~~~~~~~~~~~~~~~~~\ # input1 input2 # /~~~~~~\ /~~~~~~~\ # IIIIIIIIOOOOOOOOOOIIIIIIIIIOOOOOOOOOOOOOOOOOO # inputs_1b = model_inputs outputs_1b = model.generate(**inputs_1b, **generate_kwargs, max_new_tokens=2) inputs_2b = {**model_inputs} # The last output token isn't cached, so it needs to be included in the new input inputs_2b["input_ids"] = torch.cat((outputs_1b.sequences[:, -1:], model_inputs["input_ids"]), dim=1) inputs_2b["attention_mask"] = torch.nn.functional.pad( inputs_1b["attention_mask"], (0, outputs_1b.sequences.shape[1]), mode="constant", value=1, ) inputs_2b["past_key_values"] = outputs_1b.past_key_values cache_length_1b = outputs_1b.past_key_values[0][0].shape[-2] inputs_2b["cache_position"] = torch.arange( cache_length_1b, cache_length_1b + inputs_2b["input_ids"].shape[1], dtype=torch.int64, device=model.device, ) outputs_2b = model.generate(**inputs_2b, **generate_kwargs, max_new_tokens=2) incremental_outputs = outputs_2b # The two sets of generated text and past kv should be equal to each other self.assertTrue(has_similar_generate_outputs(traditional_outputs, incremental_outputs)) for layer_idx in range(len(traditional_outputs.past_key_values)): for kv_idx in range(len(traditional_outputs.past_key_values[layer_idx])): self.assertTrue( torch.allclose( traditional_outputs.past_key_values[layer_idx][kv_idx], incremental_outputs.past_key_values[layer_idx][kv_idx], ) ) @require_torch class TokenHealingTestCase(unittest.TestCase): @parameterized.expand( [ ("url", 'The link is <a href="http:', 'The link is <a href="http://'), # aggressive_healing: "http" shouldn't be replaced with "https" ("aggressive_healing", 'The link is <a href="http', 'The link is <a href="http'), ("trailing_whitespace", "I read a book about ", "I read a book about"), ("nothing_to_heal", "I read a book about", "I read a book about"), ("single_token", "I", "I"), ("empty_prompt", "", ""), ] ) def test_prompts(self, name, input, expected): model_name_or_path = "distilbert/distilgpt2" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) completion_model = AutoModelForCausalLM.from_pretrained( model_name_or_path, device_map="auto", trust_remote_code=False, revision="main", use_cache=True, ) """ tokenizer.pad_token value can be empty but it is required in the latter codes so assigned it here with eos_token """ tokenizer.pad_token = tokenizer.eos_token input_ids = tokenizer(input, return_tensors="pt").input_ids.to(completion_model.device) healed_ids = completion_model.heal_tokens(input_ids, tokenizer=tokenizer) predicted = tokenizer.decode(healed_ids[0], skip_special_tokens=True) self.assertEqual(predicted, expected) def test_generate_from_inputs_embeds_with_bos_token_id_is_none(self): article = "Today a dragon flew over Paris." model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) inputs_embeds = model.get_input_embeddings()(input_ids) model.generate(inputs_embeds=inputs_embeds, max_length=20, bos_token_id=None) # bos_token_id is required when no input ids nor inputs_embeds is passed with self.assertRaises(ValueError): model.generate(max_length=20, bos_token_id=None) class TestAssistedCandidateGeneratorDifferentTokenizers(unittest.TestCase): def test_no_intersection(self): prompt = np.array([[1, 2, 3]]) prompt_plus_new_tokens = np.array([[4, 5, 6]]) result = AssistedCandidateGeneratorDifferentTokenizers._get_tokens_diag(prompt, prompt_plus_new_tokens) self.assertEqual(result, (None, None, None)) def test_complete_overlap(self): prompt = np.array([[1, 2, 3]]) prompt_plus_new_tokens = np.array([[1, 2, 3, 4, 5]]) discrep_length, new_tokens_only, discrep_only = AssistedCandidateGeneratorDifferentTokenizers._get_tokens_diag( prompt, prompt_plus_new_tokens ) self.assertEqual(discrep_length, 0) np.testing.assert_array_equal(new_tokens_only, np.array([[4, 5]])) np.testing.assert_array_equal(discrep_only, np.array([[]])) def test_partial_overlap(self): prompt = np.array([[1, 2, 3]]) prompt_plus_new_tokens = np.array([[2, 3, 4, 5]]) discrep_length, new_tokens_only, discrep_only = AssistedCandidateGeneratorDifferentTokenizers._get_tokens_diag( prompt, prompt_plus_new_tokens ) self.assertEqual(discrep_length, 0) np.testing.assert_array_equal(new_tokens_only, np.array([[4, 5]])) np.testing.assert_array_equal(discrep_only, np.array([[]])) def test_no_new_tokens(self): prompt = np.array([[1, 2, 3]]) prompt_plus_new_tokens = np.array([[1, 2, 3]]) discrep_length, new_tokens_only, discrep_only = AssistedCandidateGeneratorDifferentTokenizers._get_tokens_diag( prompt, prompt_plus_new_tokens ) self.assertEqual(discrep_length, 0) np.testing.assert_array_equal(new_tokens_only, np.array([[]])) np.testing.assert_array_equal(discrep_only, np.array([[]])) class TestAssistedCandidateGeneratorUpdateStrategy(unittest.TestCase): def setUp(self): checkpoint = "EleutherAI/pythia-160m-deduped" self.assistant_model = AutoModelForCausalLM.from_pretrained(checkpoint) self.assistant_model.generation_config.assistant_confidence_threshold = 0.4 self.model_kwargs = {} self.input_ids = torch.randint(1, 10, (1, 9)) self.candidate_generator = AssistedCandidateGenerator( input_ids=self.input_ids, assistant_model=self.assistant_model, generation_config=self.assistant_model.generation_config, model_kwargs=self.model_kwargs, ) self.candidate_generator.probs = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] self.original_probs = self.candidate_generator.probs self.original_threshold = self.assistant_model.generation_config.assistant_confidence_threshold def assert_no_sklearn(self): with patch("transformers.utils.import_utils._sklearn_available", False): self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches) self.assertEqual(self.candidate_generator.matches, self.original_matches) self.assertEqual(self.candidate_generator.probs, self.original_probs) self.assertEqual( self.assistant_model.generation_config.assistant_confidence_threshold, self.original_threshold ) @parameterized.expand([(is_sklearn_available(),), (False,)]) def test_update_candidate_strategy_no_matches_short(self, sklearn_available): self.original_matches = [] self.candidate_generator.matches = self.original_matches self.num_matches = 0 if sklearn_available: self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches) self.assertEqual(self.candidate_generator.matches, [0]) self.assertEqual(self.candidate_generator.probs, [0.9]) self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.4) else: self.assert_no_sklearn() @parameterized.expand([(is_sklearn_available(),), (False,)]) def test_update_candidate_strategy_with_mix_matches_3(self, sklearn_available): self.original_matches = [1, 0, 1, 0, 1] self.candidate_generator.matches = self.original_matches self.num_matches = 3 if sklearn_available: self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches) self.assertEqual(self.candidate_generator.matches, [1, 0, 1, 0, 1, 1, 1, 1, 0]) self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]) self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.2) else: self.assert_no_sklearn() @parameterized.expand([(is_sklearn_available(),), (False,)]) def test_update_candidate_strategy_with_matches_4(self, sklearn_available): self.original_matches = [1, 1, 1, 1, 1] self.candidate_generator.matches = self.original_matches self.num_matches = 4 if sklearn_available: self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches) self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 1, 1, 1]) self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]) self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.4) else: self.assert_no_sklearn() @parameterized.expand([(is_sklearn_available(),), (False,)]) def test_update_candidate_strategy_with_matches_3(self, sklearn_available): self.original_matches = [1, 1, 1, 1, 1] self.candidate_generator.matches = self.original_matches self.num_matches = 3 if sklearn_available: self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches) self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 1, 1, 0]) self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]) self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.2) else: self.assert_no_sklearn() @parameterized.expand([(is_sklearn_available(),), (False,)]) def test_update_candidate_strategy_with_matches_2(self, sklearn_available): self.original_matches = [1, 1, 1, 1, 1] self.candidate_generator.matches = self.original_matches self.num_matches = 2 if sklearn_available: self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches) self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 1, 0]) self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]) self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.3) else: self.assert_no_sklearn() @parameterized.expand([(is_sklearn_available(),), (False,)]) def test_update_candidate_strategy_with_matches_1(self, sklearn_available): self.original_matches = [1, 1, 1, 1, 1] self.candidate_generator.matches = self.original_matches self.num_matches = 1 if sklearn_available: self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches) self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 0]) self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]) self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.4) else: self.assert_no_sklearn() def has_similar_generate_outputs(output_1, output_2, atol=1e-5, rtol=1e-5) -> bool: """ Returns a boolean indicating whether a pair of generate outputs are similar. Two `generate` call outputs are considered similar in the following situations: 1. The sequences are the same 2. The sequences are different, but the scores up to (and including) the first mismatch are nearly identical Args: output_1 (`GenerateOutput`): The first `generate` call output. output_2 (`GenerateOutput`): The second `generate` call output. atol (`float`, *optional*, defaults to 1e-5): The absolute tolerance for the scores. rtol (`float`, *optional*, defaults to 1e-5): The relative tolerance for the scores. Returns: A boolean indicating whether the two generate outputs are similar. """ # scores doesn't include data regarding decoder input tokens decoder_input_length = output_1.sequences.shape[1] - len(output_1.scores) output_matches = output_1.sequences == output_2.sequences has_matching_outputs = output_matches.all() has_matching_scores = None if not has_matching_outputs: for batch_idx in range(output_1.sequences.shape[0]): batch_matches = output_matches[batch_idx] if batch_matches.all(): continue first_mismatch_idx = batch_matches.int().argmin() # gets the index of the first False first_mismatch_idx -= decoder_input_length output_1_first_mismatch_scores = output_1.scores[first_mismatch_idx][batch_idx] output_2_first_mismatch_scores = output_2.scores[first_mismatch_idx][batch_idx] has_matching_scores = torch.allclose( output_1_first_mismatch_scores, output_2_first_mismatch_scores, rtol=atol, atol=rtol ) if not has_matching_scores: break return has_matching_outputs or has_matching_scores
transformers/tests/generation/test_utils.py/0
{ "file_path": "transformers/tests/generation/test_utils.py", "repo_id": "transformers", "token_count": 118895 }
577
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import ChannelDimension, PILImageResampling from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin if is_vision_available(): from PIL import Image from transformers import AriaImageProcessor if is_torch_available(): import torch class AriaImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, num_images=1, min_resolution=30, max_resolution=40, size=None, max_image_size=980, min_image_size=336, split_resolutions=None, split_image=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_convert_rgb=True, resample=PILImageResampling.BICUBIC, ): self.size = size if size is not None else {"longest_edge": max_resolution} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_images = num_images self.min_resolution = min_resolution self.max_resolution = max_resolution self.resample = resample self.max_image_size = max_image_size self.min_image_size = min_image_size self.split_resolutions = split_resolutions if split_resolutions is not None else [[980, 980]] self.split_image = split_image self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "max_image_size": self.max_image_size, "min_image_size": self.min_image_size, "split_resolutions": self.split_resolutions, "split_image": self.split_image, "do_convert_rgb": self.do_convert_rgb, "do_normalize": self.do_normalize, "resample": self.resample, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to AriaImageProcessor, assuming do_resize is set to True. The expected size in that case the max image size. """ return self.max_image_size, self.max_image_size def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs( self, batch_size=None, min_resolution=None, max_resolution=None, num_channels=None, num_images=None, size_divisor=None, equal_resolution=False, numpify=False, torchify=False, ): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. One can specify whether the images are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" batch_size = batch_size if batch_size is not None else self.batch_size min_resolution = min_resolution if min_resolution is not None else self.min_resolution max_resolution = max_resolution if max_resolution is not None else self.max_resolution num_channels = num_channels if num_channels is not None else self.num_channels num_images = num_images if num_images is not None else self.num_images images_list = [] for i in range(batch_size): images = [] for j in range(num_images): if equal_resolution: width = height = max_resolution else: # To avoid getting image width/height 0 if size_divisor is not None: # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) images_list.append(images) if not numpify and not torchify: # PIL expects the channel dimension as last dimension images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list] if torchify: images_list = [[torch.from_numpy(image) for image in images] for images in images_list] if numpify: # Numpy images are typically in channels last format images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list] return images_list @require_torch @require_vision class AriaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = AriaImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = AriaImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "max_image_size")) self.assertTrue(hasattr(image_processing, "min_image_size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "split_image")) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_numpy_4_channels(self): # Aria always processes images as RGB, so it always returns images with 3 channels for image_processing_class in self.image_processor_list: # Initialize image_processing image_processor_dict = self.image_processor_dict image_processing = self.image_processing_class(**image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for images in image_inputs: for image in images: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for images in image_inputs: for image in images: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), ) def test_pad_for_patching(self): for image_processing_class in self.image_processor_list: if image_processing_class == self.fast_image_processing_class: numpify = False torchify = True input_data_format = image_processing_class.data_format else: numpify = True torchify = False input_data_format = ChannelDimension.LAST image_processing = image_processing_class(**self.image_processor_dict) # Create odd-sized images image_input = self.image_processor_tester.prepare_image_inputs( batch_size=1, max_resolution=400, num_images=1, equal_resolution=True, numpify=numpify, torchify=torchify, )[0][0] self.assertIn(image_input.shape, [(3, 400, 400), (400, 400, 3)]) # Test odd-width image_shape = (400, 601) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape) # Test odd-height image_shape = (503, 400) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape) def test_get_num_patches_without_images(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) num_patches = image_processing.get_number_of_image_patches(height=100, width=100, images_kwargs={}) self.assertEqual(num_patches, 1) num_patches = image_processing.get_number_of_image_patches( height=300, width=500, images_kwargs={"split_image": True} ) self.assertEqual(num_patches, 1) num_patches = image_processing.get_number_of_image_patches( height=100, width=100, images_kwargs={"split_image": True, "max_image_size": 200} ) self.assertEqual(num_patches, 19)
transformers/tests/models/aria/test_image_processing_aria.py/0
{ "file_path": "transformers/tests/models/aria/test_image_processing_aria.py", "repo_id": "transformers", "token_count": 6234 }
578
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import BeitImageProcessor if is_torchvision_available(): from transformers import BeitImageProcessorFast class BeitImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): size = size if size is not None else {"height": 20, "width": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_reduce_labels = do_reduce_labels def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_semantic_single_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") example = ds[0] return example["image"], example["map"] def prepare_semantic_batch_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") return list(ds["image"][:2]), list(ds["map"][:2]) @require_torch @require_vision class BeitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BeitImageProcessor if is_vision_available() else None fast_image_processing_class = BeitImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BeitImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_reduce_labels")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) self.assertEqual(image_processor.do_reduce_labels, False) image_processor = image_processing_class.from_dict( self.image_processor_dict, size=42, crop_size=84, do_reduce_labels=True ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) self.assertEqual(image_processor.do_reduce_labels, True) def test_call_segmentation_maps(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched encoding = image_processing(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() encoding = image_processing(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = image_processing(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 image, map = prepare_semantic_single_inputs() encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) image_processing.do_reduce_labels = True encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image, dummy_map = prepare_semantic_single_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values) self._assert_slow_fast_tensors_equivalence( image_encoding_slow.labels.float(), image_encoding_fast.labels.float() ) def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images, dummy_maps = prepare_semantic_batch_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) self._assert_slow_fast_tensors_equivalence(encoding_slow.labels.float(), encoding_fast.labels.float())
transformers/tests/models/beit/test_image_processing_beit.py/0
{ "file_path": "transformers/tests/models/beit/test_image_processing_beit.py", "repo_id": "transformers", "token_count": 6176 }
579
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import BlipImageProcessor if is_torchvision_available(): from transformers import BlipImageProcessorFast class BlipImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, do_pad=False, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"height": 20, "width": 20} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "do_pad": self.do_pad, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class BlipImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None fast_image_processing_class = BlipImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BlipImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_convert_rgb")) @require_torch @require_vision class BlipImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None fast_image_processing_class = BlipImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BlipImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
transformers/tests/models/blip/test_image_processing_blip.py/0
{ "file_path": "transformers/tests/models/blip/test_image_processing_blip.py", "repo_id": "transformers", "token_count": 2098 }
580
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ColPali model.""" import collections import gc import re import unittest from typing import ClassVar import pytest import torch from datasets import load_dataset from tests.test_configuration_common import ConfigTester from tests.test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from transformers import ( is_torch_available, ) from transformers.models.colpali.configuration_colpali import ColPaliConfig from transformers.models.colpali.modeling_colpali import ColPaliForRetrieval, ColPaliForRetrievalOutput from transformers.models.colpali.processing_colpali import ColPaliProcessor from transformers.testing_utils import ( backend_empty_cache, require_torch, require_vision, slow, torch_device, ) if is_torch_available(): import torch from transformers.pytorch_utils import id_tensor_storage class ColPaliForRetrievalModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=25, vision_feature_select_strategy="default", vision_feature_layer=-1, projection_dim=32, text_config={ "model_type": "gemma", "seq_length": 128, "is_training": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 1, "head_dim": 8, "intermediate_size": 37, "hidden_activation": "gelu_pytorch_tanh", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, is_training=False, vision_config={ "use_labels": True, "image_size": 20, "patch_size": 5, "num_image_tokens": 4, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_key_value_heads": 1, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, use_cache=False, embedding_dim=128, ): self.parent = parent self.ignore_index = ignore_index # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.projection_dim = projection_dim self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = vision_config["num_channels"] self.image_size = vision_config["image_size"] self.encoder_seq_length = seq_length self.use_cache = use_cache self.embedding_dim = embedding_dim self.vlm_config = { "model_type": "paligemma", "text_config": self.text_config, "vision_config": self.vision_config, "ignore_index": self.ignore_index, "image_token_index": self.image_token_index, "projector_hidden_act": self.projector_hidden_act, "projection_dim": self.projection_dim, "vision_feature_select_strategy": self.vision_feature_select_strategy, "vision_feature_layer": self.vision_feature_layer, } def get_config(self): return ColPaliConfig( vlm_config=self.vlm_config, embedding_dim=self.embedding_dim, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.vlm_config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) # set the 16 first tokens to be image, and ensure that no other tokens are image tokens # do not change this unless you modified image size or patch size input_ids[input_ids == config.vlm_config.image_token_index] = self.pad_token_id input_ids[:, :16] = config.vlm_config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, } return config, inputs_dict @require_torch class ColPaliForRetrievalModelTest(ModelTesterMixin, unittest.TestCase): """ Model tester for `ColPaliForRetrieval`. """ all_model_classes = (ColPaliForRetrieval,) if is_torch_available() else () fx_compatible = False test_torchscript = False test_pruning = False test_resize_embeddings = True test_head_masking = False def setUp(self): self.model_tester = ColPaliForRetrievalModelTester(self) self.config_tester = ConfigTester(self, config_class=ColPaliConfig, has_text_modality=False) @slow @require_vision def test_colpali_forward_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) with torch.no_grad(): outputs = model(**inputs, return_dict=True) self.assertIsInstance(outputs, ColPaliForRetrievalOutput) # ColPali uses a VLM internally which has its state dict keys renames with `conversion_mapping` # This test is written assuming that `_tied_weights_keys` are not going to be renamed, thus we # overwrite it. NOTE: ColPali inference/save/load works without issues, it is the testcase # that makes general assumptions def test_tied_weights_keys(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.get_text_config().tie_word_embeddings = True for model_class in self.all_model_classes: model_tied = model_class(config) ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else [] # Detect we get a hit for each key for key in tied_weight_keys: key = key.replace(".language_model", "") # remove 'language_model' prefix is_tied_key = any(re.search(key, p) for group in tied_params for p in group) self.assertTrue(is_tied_key, f"{key} is not a tied weight key for {model_class}.") # Removed tied weights found from tied params -> there should only be one left after for key in tied_weight_keys: key = key.replace(".language_model", "") # remove 'language_model' prefix for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] tied_params = [group for group in tied_params if len(group) > 1] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="From PaliGemma: Some undefined behavior encountered with test versions of this model. Skip for now." ) def test_model_parallelism(self): pass @unittest.skip( reason="PaliGemma's SigLip encoder uses the same initialization scheme as the Flax original implementation" ) def test_initialization(self): pass # TODO extend valid outputs to include this test @Molbap @unittest.skip(reason="PaliGemma has currently one output format.") def test_model_outputs_equivalence(self): pass @unittest.skip(reason="Pass because ColPali requires `attention_mask is not None`") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="Pass because ColPali requires `attention_mask is not None`") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @require_torch class ColPaliModelIntegrationTest(unittest.TestCase): model_name: ClassVar[str] = "vidore/colpali-v1.2-hf" def setUp(self): self.processor = ColPaliProcessor.from_pretrained(self.model_name) def tearDown(self): gc.collect() backend_empty_cache(torch_device) @slow def test_model_integration_test(self): """ Test if the model is able to retrieve the correct pages for a small and easy dataset. """ model = ColPaliForRetrieval.from_pretrained( self.model_name, dtype=torch.bfloat16, device_map=torch_device, ).eval() # Load the test dataset ds = load_dataset("hf-internal-testing/document-visual-retrieval-test", split="test") # Preprocess the examples batch_images = self.processor(images=ds["image"]).to(torch_device) batch_queries = self.processor(text=ds["query"]).to(torch_device) # Run inference with torch.inference_mode(): image_embeddings = model(**batch_images).embeddings query_embeddings = model(**batch_queries).embeddings # Compute retrieval scores scores = self.processor.score_retrieval( query_embeddings=query_embeddings, passage_embeddings=image_embeddings, ) # (num_queries, num_passages) assert scores.ndim == 2, f"Expected 2D tensor, got {scores.ndim}" assert scores.shape == (len(ds), len(ds)), f"Expected shape {(len(ds), len(ds))}, got {scores.shape}" # Check if the maximum scores per row are in the diagonal of the matrix score self.assertTrue((scores.argmax(axis=1) == torch.arange(len(ds), device=scores.device)).all()) # Further validation: fine-grained check, with a hardcoded score from the original implementation expected_scores = torch.tensor( [ [15.5625, 6.5938, 14.4375], [12.2500, 16.2500, 11.0000], [15.0625, 11.7500, 21.0000], ], dtype=scores.dtype, ) assert torch.allclose(scores, expected_scores, atol=1), f"Expected scores {expected_scores}, got {scores}"
transformers/tests/models/colpali/test_modeling_colpali.py/0
{ "file_path": "transformers/tests/models/colpali/test_modeling_colpali.py", "repo_id": "transformers", "token_count": 5848 }
581
# Copyright 2018 HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.models.cpm.tokenization_cpm import CpmTokenizer from transformers.testing_utils import custom_tokenizers @custom_tokenizers class CpmTokenizationTest(unittest.TestCase): # There is no `CpmModel` def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def test_pre_tokenization(self): tokenizer = CpmTokenizer.from_pretrained("TsinghuaAI/CPM-Generate") text = "Hugging Face大法好,谁用谁知道。" normalized_text = "Hugging Face大法好,谁用谁知道。<unk>" bpe_tokens = "▁Hu gg ing ▁ ▂ ▁F ace ▁大法 ▁好 ▁ , ▁谁 ▁用 ▁谁 ▁知 道 ▁ 。".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [13789, 13283, 1421, 8, 10, 1164, 13608, 16528, 63, 8, 9, 440, 108, 440, 121, 90, 8, 12, 0] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) reconstructed_text = tokenizer.decode(input_bpe_tokens) self.assertEqual(reconstructed_text, normalized_text)
transformers/tests/models/cpm/test_tokenization_cpm.py/0
{ "file_path": "transformers/tests/models/cpm/test_tokenization_cpm.py", "repo_id": "transformers", "token_count": 782 }
582
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch DecisionTransformer model.""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel class DecisionTransformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, act_dim=6, state_dim=17, hidden_size=23, is_training=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.act_dim = act_dim self.state_dim = state_dim self.hidden_size = hidden_size self.is_training = is_training def prepare_config_and_inputs(self): states = floats_tensor((self.batch_size, self.seq_length, self.state_dim)) actions = floats_tensor((self.batch_size, self.seq_length, self.act_dim)) rewards = floats_tensor((self.batch_size, self.seq_length, 1)) returns_to_go = floats_tensor((self.batch_size, self.seq_length, 1)) timesteps = ids_tensor((self.batch_size, self.seq_length), vocab_size=1000) attention_mask = random_attention_mask((self.batch_size, self.seq_length)) config = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def get_config(self): return DecisionTransformerConfig( batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, ) def create_and_check_model( self, config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ): model = DecisionTransformerModel(config=config) model.to(torch_device) model.eval() result = model(states, actions, rewards, returns_to_go, timesteps, attention_mask) self.parent.assertEqual(result.state_preds.shape, states.shape) self.parent.assertEqual(result.action_preds.shape, actions.shape) self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) = config_and_inputs inputs_dict = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class DecisionTransformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DecisionTransformerModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids test_generate_without_input_ids = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features test_pruning = False test_resize_embeddings = False test_head_masking = False test_attention_outputs = False test_hidden_states_output = False test_inputs_embeds = False test_gradient_checkpointing = False test_torchscript = False def setUp(self): self.model_tester = DecisionTransformerModelTester(self) self.config_tester = ConfigTester(self, config_class=DecisionTransformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "edbeeching/decision-transformer-gym-hopper-medium" model = DecisionTransformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip(reason="Model does not have input embeddings") def test_model_get_set_embeddings(self): pass @require_torch class DecisionTransformerModelIntegrationTest(unittest.TestCase): @slow def test_autoregressive_prediction(self): """ An integration test that performs autoregressive prediction of state, action and return from a sequence of state, actions and returns. Test is performed over two timesteps. """ NUM_STEPS = 2 # number of steps of autoregressive prediction we will perform TARGET_RETURN = 10 # defined by the RL environment, may be normalized model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert") model = model.to(torch_device) config = model.config torch.manual_seed(0) state = torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32) # env.reset() expected_outputs = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]], device=torch_device ) returns_to_go = torch.tensor(TARGET_RETURN, device=torch_device, dtype=torch.float32).reshape(1, 1, 1) states = state actions = torch.zeros(1, 0, config.act_dim, device=torch_device, dtype=torch.float32) rewards = torch.zeros(1, 0, device=torch_device, dtype=torch.float32) timesteps = torch.tensor(0, device=torch_device, dtype=torch.long).reshape(1, 1) for step in range(NUM_STEPS): actions = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=torch_device)], dim=1) rewards = torch.cat([rewards, torch.zeros(1, 1, device=torch_device)], dim=1) attention_mask = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device) with torch.no_grad(): _, action_pred, _ = model( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) self.assertEqual(action_pred.shape, actions.shape) torch.testing.assert_close(action_pred[0, -1], expected_outputs[step], rtol=1e-4, atol=1e-4) state, reward, _, _ = ( # env.step(action) torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32), 1.0, False, {}, ) actions[-1] = action_pred[0, -1] states = torch.cat([states, state], dim=1) pred_return = returns_to_go[0, -1] - reward returns_to_go = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1) timesteps = torch.cat( [timesteps, torch.ones((1, 1), device=torch_device, dtype=torch.long) * (step + 1)], dim=1 )
transformers/tests/models/decision_transformer/test_modeling_decision_transformer.py/0
{ "file_path": "transformers/tests/models/decision_transformer/test_modeling_decision_transformer.py", "repo_id": "transformers", "token_count": 4114 }
583
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch DPT model.""" import unittest from transformers import Dinov2Config, DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device from transformers.utils.import_utils import get_torch_major_and_minor_version from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DPTForDepthEstimation from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=32, patch_size=16, use_labels=True, num_labels=3, is_training=True, hidden_size=4, num_hidden_layers=2, num_attention_heads=2, intermediate_size=8, out_features=["stage1", "stage2"], apply_layernorm=False, reshape_hidden_states=False, neck_hidden_sizes=[2, 2], fusion_hidden_size=6, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.out_features = out_features self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_labels = use_labels self.num_labels = num_labels self.is_training = is_training self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size # DPT's sequence length self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( backbone_config=self.get_backbone_config(), backbone=None, neck_hidden_sizes=self.neck_hidden_sizes, fusion_hidden_size=self.fusion_hidden_size, ) def get_backbone_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, is_training=self.is_training, out_features=self.out_features, reshape_hidden_states=self.reshape_hidden_states, ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DPTForDepthEstimation,) if is_torch_available() else () pipeline_model_mapping = {"depth-estimation": DPTForDepthEstimation} if is_torch_available() else {} test_pruning = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True test_torch_exportable_strictly = get_torch_major_and_minor_version() != "2.7" def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "Intel/dpt-large" model = DPTForDepthEstimation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation_dinov2(self): image_processor = DPTImageProcessor.from_pretrained("facebook/dpt-dinov2-small-kitti") model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-kitti").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 576, 736)) self.assertEqual(predicted_depth.shape, expected_shape) expectations = Expectations( { (None, None): [[6.0336, 7.1502, 7.4130], [6.8977, 7.2383, 7.2268], [7.9180, 8.0525, 8.0134]], ("cuda", 8): [[6.0350, 7.1518, 7.4144], [6.8992, 7.2396, 7.2280], [7.9194, 8.0538, 8.0145]], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4) def test_inference_depth_estimation_beit(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-base-384") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-beit-base-384").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expectations = Expectations( { (None, None): [ [2669.7061, 2663.7144, 2674.9399], [2633.9326, 2650.9092, 2665.4270], [2621.8271, 2632.0129, 2637.2290], ], ("cuda", 8): [ [2669.4292, 2663.4121, 2674.6233], [2633.7400, 2650.7026, 2665.2085], [2621.6572, 2631.8452, 2637.0525], ], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4) def test_inference_depth_estimation_swinv2(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 256, 256)) self.assertEqual(predicted_depth.shape, expected_shape) expectations = Expectations( { (None, None): [ [1032.7719, 1025.1886, 1030.2661], [1023.7619, 1021.0075, 1024.9121], [1022.5667, 1018.8522, 1021.4145], ], ("cuda", 8): [ [1032.7170, 1025.0629, 1030.1941], [1023.7309, 1020.9786, 1024.8594], [1022.5233, 1018.8235, 1021.3312], ], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py/0
{ "file_path": "transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py", "repo_id": "transformers", "token_count": 6064 }
584
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Encodec model.""" import copy import inspect import os import tempfile import unittest import numpy as np from datasets import Audio, load_dataset from parameterized import parameterized from transformers import AutoProcessor, EncodecConfig from transformers.testing_utils import ( is_torch_available, require_torch, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EncodecFeatureExtractor, EncodecModel def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} decoder_dict = {"decoder_input_ids": decoder_input_ids} if decoder_input_ids is not None else {} return {**encoder_dict, **decoder_dict} @require_torch class EncodecModelTester: def __init__( self, parent, # `batch_size` needs to be an even number if the model has some outputs with batch dim != 0. batch_size=12, num_channels=2, is_training=False, intermediate_size=40, hidden_size=32, num_filters=8, num_residual_layers=1, upsampling_ratios=[8, 4], num_lstm_layers=1, codebook_size=64, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.intermediate_size = intermediate_size self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.num_lstm_layers = num_lstm_layers self.codebook_size = codebook_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0) config = self.get_config() inputs_dict = {"input_values": input_values} return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def prepare_config_and_inputs_for_model_class(self, model_class): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["audio_codes"] = ids_tensor([1, self.batch_size, 1, self.num_channels], self.codebook_size).type( torch.int32 ) inputs_dict["audio_scales"] = [None] return config, inputs_dict def prepare_config_and_inputs_for_normalization(self): input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0) config = self.get_config() config.normalize = True processor = EncodecFeatureExtractor(feature_size=config.audio_channels, sampling_rate=config.sampling_rate) input_values = input_values.tolist() inputs_dict = processor( input_values, sampling_rate=config.sampling_rate, padding=True, return_tensors="pt" ).to(torch_device) return config, inputs_dict def get_config(self): return EncodecConfig( audio_channels=self.num_channels, chunk_in_sec=None, hidden_size=self.hidden_size, num_filters=self.num_filters, num_residual_layers=self.num_residual_layers, upsampling_ratios=self.upsampling_ratios, num_lstm_layers=self.num_lstm_layers, codebook_size=self.codebook_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = EncodecModel(config=config).to(torch_device).eval() result = model(**inputs_dict) self.parent.assertEqual( result.audio_values.shape, (self.batch_size, self.num_channels, self.intermediate_size) ) @require_torch class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EncodecModel,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False pipeline_model_mapping = {"feature-extraction": EncodecModel} if is_torch_available() else {} def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # model does not have attention and does not support returning hidden states inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if "output_attentions" in inputs_dict: inputs_dict.pop("output_attentions") if "output_hidden_states" in inputs_dict: inputs_dict.pop("output_hidden_states") return inputs_dict def setUp(self): self.model_tester = EncodecModelTester(self) self.config_tester = ConfigTester( self, config_class=EncodecConfig, hidden_size=37, common_properties=[], has_text_modality=False ) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "padding_mask", "bandwidth"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip(reason="The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_inputs_embeds(self): pass @unittest.skip(reason="The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_model_get_set_embeddings(self): pass @unittest.skip( reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic" ) def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip( reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic" ) def test_torchscript_output_attentions(self): pass @unittest.skip( reason="The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic" ) def test_torchscript_output_hidden_state(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @unittest.skip( reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic" ) def test_attention_outputs(self): pass def test_feed_forward_chunking(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() # original_config.norm_type = "time_group_norm" for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) config.chunk_length_s = None config.overlap = None config.sampling_rate = 20 model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) inputs["input_values"] = inputs["input_values"].repeat(1, 1, 10) hidden_states_no_chunk = model(**inputs)[1] torch.manual_seed(0) config.chunk_length_s = 2 config.overlap = 0 config.sampling_rate = 20 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**inputs)[1] torch.testing.assert_close(hidden_states_no_chunk, hidden_states_with_chunk, rtol=1e-1, atol=1e-2) @unittest.skip( reason="The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic" ) def test_hidden_states_output(self): pass def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): # outputs are not tensors but list (since each sequence don't have the same frame_length) out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def assert_nested_tensors_close(a, b): if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)): assert len(a) == len(b), f"Length mismatch: {len(a)} vs {len(b)}" for i, (x, y) in enumerate(zip(a, b)): assert_nested_tensors_close(x, y) elif torch.is_tensor(a) and torch.is_tensor(b): a_clean = set_nan_tensor_to_zero(a) b_clean = set_nan_tensor_to_zero(b) assert torch.allclose(a_clean, b_clean, atol=1e-5), ( "Tuple and dict output are not equal. Difference:" f" Max diff: {torch.max(torch.abs(a_clean - b_clean))}. " f"Tuple has nan: {torch.isnan(a).any()} and inf: {torch.isinf(a)}. " f"Dict has nan: {torch.isnan(b).any()} and inf: {torch.isinf(b)}." ) else: raise ValueError(f"Mismatch between {a} vs {b}") def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs) self.assertTrue(isinstance(tuple_output, tuple)) self.assertTrue(isinstance(dict_output, dict)) # cast dict_output.values() to list as it is a odict_values object assert_nested_tensors_close(tuple_output, list(dict_output.values())) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv"] ignore_init = ["lstm"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif not any(x in name for x in ignore_init): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_identity_shortcut(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_conv_shortcut = False self.model_tester.create_and_check_model_forward(config, inputs_dict) def test_model_forward_with_normalization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_normalization() self.model_tester.create_and_check_model_forward(config, inputs_dict) def normalize(arr): norm = np.linalg.norm(arr) normalized_arr = arr / norm return normalized_arr def compute_rmse(arr1, arr2): arr1_np = arr1.cpu().numpy().squeeze() arr2_np = arr2.cpu().numpy().squeeze() max_length = min(arr1.shape[-1], arr2.shape[-1]) arr1_np = arr1_np[..., :max_length] arr2_np = arr2_np[..., :max_length] arr1_normalized = normalize(arr1_np) arr2_normalized = normalize(arr2_np) return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean()) """ Integration tests for the Encodec model. Code for expected output can be found below: - test_integration: https://gist.github.com/ebezzam/2a34e249e729881130d1f5a42229d31f#file-test_encodec-py - test_batch: https://gist.github.com/ebezzam/2a34e249e729881130d1f5a42229d31f#file-test_encodec_batch-py """ # fmt: off # first key is model_id from hub, second key is bandwidth # -- test_integration EXPECTED_ENCODER_CODES = { "facebook/encodec_24khz": { "1.5": torch.tensor([[[ 62, 835, 835, 835, 835, 835, 835, 835, 408, 408], [1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424]]]), "3.0": torch.tensor( [ [ [62, 835, 835, 835, 835, 835, 835, 835, 408, 408], [1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424], [786, 678, 821, 786, 36, 36, 786, 212, 937, 937], [741, 741, 741, 993, 741, 1018, 993, 919, 741, 741], ], ] ), "6.0": torch.tensor( [ [ [62, 835, 835, 835, 835, 835, 835, 835, 408, 408], [1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424], [786, 678, 821, 786, 36, 36, 786, 212, 937, 937], [741, 741, 741, 993, 741, 1018, 993, 919, 741, 741], [528, 446, 198, 190, 446, 622, 646, 448, 646, 448], [1011, 140, 185, 986, 683, 986, 435, 41, 140, 939], [896, 772, 562, 772, 485, 528, 896, 853, 562, 772], [899, 975, 468, 468, 468, 701, 1013, 828, 518, 899], ], ] ), "12.0": torch.tensor( [ [ [62, 835, 835, 835, 835, 835, 835, 835, 408, 408], [1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424], [786, 678, 821, 786, 36, 36, 786, 212, 937, 937], [741, 741, 741, 993, 741, 1018, 993, 919, 741, 741], [528, 446, 198, 190, 446, 622, 646, 448, 646, 448], [1011, 140, 185, 986, 683, 986, 435, 41, 140, 939], [896, 772, 562, 772, 485, 528, 896, 853, 562, 772], [899, 975, 468, 468, 468, 701, 1013, 828, 518, 899], [827, 807, 938, 320, 699, 470, 909, 628, 301, 827], [963, 801, 630, 477, 717, 354, 205, 359, 874, 744], [1000, 1000, 388, 1000, 408, 740, 568, 364, 709, 843], [413, 835, 382, 840, 742, 1019, 375, 962, 835, 742], [971, 410, 998, 485, 798, 410, 351, 485, 485, 920], [848, 694, 662, 784, 848, 427, 1022, 848, 920, 694], [420, 911, 889, 911, 993, 776, 948, 477, 911, 911], [587, 755, 834, 962, 860, 425, 982, 982, 425, 461], ], ] ), "24.0": torch.tensor( [ [ [62, 835, 835, 835, 835, 835, 835, 835, 408, 408], [1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424], [786, 678, 821, 786, 36, 36, 786, 212, 937, 937], [741, 741, 741, 993, 741, 1018, 993, 919, 741, 741], [528, 446, 198, 190, 446, 622, 646, 448, 646, 448], [1011, 140, 185, 986, 683, 986, 435, 41, 140, 939], [896, 772, 562, 772, 485, 528, 896, 853, 562, 772], [899, 975, 468, 468, 468, 701, 1013, 828, 518, 899], [827, 807, 938, 320, 699, 470, 909, 628, 301, 827], [963, 801, 630, 477, 717, 354, 205, 359, 874, 744], [1000, 1000, 388, 1000, 408, 740, 568, 364, 709, 843], [413, 835, 382, 840, 742, 1019, 375, 962, 835, 742], [971, 410, 998, 485, 798, 410, 351, 485, 485, 920], [848, 694, 662, 784, 848, 427, 1022, 848, 920, 694], [420, 911, 889, 911, 993, 776, 948, 477, 911, 911], [587, 755, 834, 962, 860, 425, 982, 982, 425, 461], [270, 160, 26, 131, 597, 506, 670, 637, 248, 160], [ 15, 215, 134, 69, 215, 155, 1012, 1009, 260, 417], [580, 561, 686, 896, 497, 637, 580, 245, 896, 264], [511, 239, 560, 691, 571, 627, 571, 571, 258, 619], [591, 942, 591, 251, 250, 250, 857, 486, 295, 295], [565, 546, 654, 301, 301, 623, 639, 568, 565, 282], [539, 317, 639, 539, 651, 539, 538, 640, 615, 615], [637, 556, 637, 582, 640, 515, 515, 632, 254, 613], [305, 643, 500, 550, 522, 500, 550, 561, 522, 305], [954, 456, 584, 755, 505, 782, 661, 671, 497, 505], [577, 464, 637, 647, 552, 552, 624, 647, 624, 647], [728, 748, 931, 608, 538, 1015, 294, 294, 666, 538], [602, 535, 666, 665, 655, 979, 574, 535, 571, 781], [321, 620, 557, 566, 511, 910, 672, 623, 853, 674], [621, 556, 947, 474, 610, 752, 1002, 597, 474, 474], [605, 948, 657, 588, 485, 633, 459, 968, 939, 325], ], ] ), }, "facebook/encodec_48khz": { "3.0": torch.tensor([[[214, 214, 214, 214, 214, 118, 214, 214, 214, 214], [989, 989, 611, 77, 77, 989, 976, 976, 976, 77]]]), "6.0": torch.tensor([[[ 214, 214, 214, 214, 214, 118, 214, 214, 214, 214], [ 989, 989, 611, 77, 77, 989, 976, 976, 976, 77], [ 977, 1009, 538, 925, 925, 977, 1022, 1022, 1022, 925], [ 376, 1012, 1023, 725, 725, 1023, 376, 962, 376, 847]]]), "12.0": torch.tensor([[[ 214, 214, 214, 214, 214, 118, 214, 214, 214, 214], [ 989, 989, 611, 77, 77, 989, 976, 976, 976, 77], [ 977, 1009, 538, 925, 925, 977, 1022, 1022, 1022, 925], [ 376, 1012, 1023, 725, 725, 1023, 376, 962, 376, 847], [ 979, 1012, 323, 695, 1018, 1023, 979, 1023, 979, 650], [ 945, 762, 528, 865, 824, 945, 945, 945, 957, 957], [ 904, 973, 1014, 681, 582, 1014, 1014, 1014, 1014, 681], [ 229, 392, 796, 392, 977, 1017, 250, 1017, 250, 1017]]]), "24.0": torch.tensor([[[ 214, 214, 214, 214, 214, 118, 214, 214, 214, 214], [ 989, 989, 611, 77, 77, 989, 976, 976, 976, 77], [ 977, 1009, 538, 925, 925, 977, 1022, 1022, 1022, 925], [ 376, 1012, 1023, 725, 725, 1023, 376, 962, 376, 847], [ 979, 1012, 323, 695, 1018, 1023, 979, 1023, 979, 650], [ 945, 762, 528, 865, 824, 945, 945, 945, 957, 957], [ 904, 973, 1014, 681, 582, 1014, 1014, 1014, 1014, 681], [ 229, 392, 796, 392, 977, 1017, 250, 1017, 250, 1017], [ 902, 436, 935, 1011, 1023, 1023, 1023, 154, 1023, 392], [ 982, 878, 961, 832, 629, 431, 919, 629, 919, 792], [ 727, 727, 401, 727, 979, 587, 727, 487, 413, 201], [ 928, 924, 965, 934, 840, 480, 924, 920, 924, 486], [ 10, 625, 712, 552, 712, 259, 394, 131, 726, 516], [ 882, 1022, 32, 524, 267, 861, 974, 882, 108, 521], [ 304, 841, 306, 415, 69, 376, 928, 510, 381, 104], [ 0, 0, 0, 484, 83, 0, 307, 262, 0, 0]]]) } } EXPECTED_ENCODER_SCALES = { "facebook/encodec_24khz": { "1.5": None, "3.0": None, "6.0": None, "12.0": None, "24.0": None }, "facebook/encodec_48khz": { "3.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02, 4.483359e-02, 1.000000e-08]), "6.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02, 4.483359e-02, 1.000000e-08]), "12.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02, 4.483359e-02, 1.000000e-08]), "24.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02, 4.483359e-02, 1.000000e-08]) } } EXPECTED_DECODER_OUTPUTS = { "facebook/encodec_24khz": { "1.5": torch.tensor( [[ 0.0003, -0.0002, -0.0000, -0.0004, 0.0004, 0.0003, -0.0000, 0.0001, 0.0005, 0.0001, -0.0015, -0.0007, -0.0002, -0.0018, -0.0003, 0.0013, 0.0011, 0.0008, 0.0008, 0.0008, 0.0008, 0.0002, -0.0003, -0.0004, -0.0006, -0.0009, -0.0010, -0.0012, -0.0011, -0.0006, -0.0006, -0.0005, 0.0000, 0.0001, 0.0003, 0.0002, -0.0001, -0.0002, -0.0008, -0.0012, -0.0011, -0.0012, -0.0013, -0.0003, 0.0002, 0.0006, 0.0006, 0.0006, 0.0009, 0.0010]] ), "3.0": torch.tensor( [[ 0.0003, -0.0002, -0.0000, -0.0004, 0.0004, 0.0003, -0.0000, 0.0001, 0.0006, 0.0002, -0.0015, -0.0008, -0.0002, -0.0018, -0.0003, 0.0013, 0.0011, 0.0008, 0.0008, 0.0008, 0.0008, 0.0002, -0.0003, -0.0004, -0.0005, -0.0008, -0.0010, -0.0012, -0.0011, -0.0006, -0.0006, -0.0005, -0.0000, 0.0001, 0.0003, 0.0002, -0.0001, -0.0002, -0.0008, -0.0013, -0.0011, -0.0013, -0.0014, -0.0003, 0.0002, 0.0006, 0.0006, 0.0006, 0.0009, 0.0010]] ), "6.0": torch.tensor( [[ 0.0004, -0.0001, 0.0001, -0.0003, 0.0004, 0.0003, 0.0000, 0.0001, 0.0007, 0.0002, -0.0013, -0.0007, -0.0002, -0.0015, -0.0001, 0.0014, 0.0014, 0.0011, 0.0010, 0.0010, 0.0009, 0.0004, 0.0000, 0.0000, 0.0000, -0.0000, -0.0001, -0.0004, -0.0004, -0.0001, -0.0002, -0.0002, 0.0002, 0.0005, 0.0009, 0.0010, 0.0008, 0.0007, 0.0002, -0.0003, -0.0004, -0.0008, -0.0008, 0.0000, 0.0006, 0.0010, 0.0012, 0.0012, 0.0013, 0.0014]] ), "12.0": torch.tensor( [[ 0.0004, -0.0001, 0.0001, -0.0004, 0.0003, 0.0002, -0.0000, 0.0001, 0.0006, 0.0002, -0.0013, -0.0006, -0.0001, -0.0014, 0.0001, 0.0018, 0.0018, 0.0014, 0.0012, 0.0013, 0.0011, 0.0006, 0.0000, 0.0000, -0.0000, -0.0001, -0.0001, -0.0004, -0.0004, -0.0000, -0.0000, -0.0000, 0.0005, 0.0007, 0.0011, 0.0011, 0.0009, 0.0007, 0.0002, -0.0003, -0.0004, -0.0007, -0.0007, 0.0002, 0.0009, 0.0013, 0.0015, 0.0014, 0.0015, 0.0016]] ), "24.0": torch.tensor( [[ 0.0005, 0.0001, 0.0004, -0.0001, 0.0003, 0.0002, 0.0000, 0.0001, 0.0007, 0.0005, -0.0011, -0.0005, -0.0001, -0.0018, -0.0000, 0.0021, 0.0019, 0.0013, 0.0011, 0.0012, 0.0012, 0.0006, -0.0000, -0.0001, -0.0000, -0.0000, -0.0001, -0.0004, -0.0004, -0.0000, -0.0001, -0.0002, 0.0003, 0.0004, 0.0008, 0.0007, 0.0006, 0.0007, 0.0001, -0.0004, -0.0003, -0.0006, -0.0008, 0.0004, 0.0011, 0.0015, 0.0016, 0.0015, 0.0016, 0.0018]] ) }, "facebook/encodec_48khz": { "3.0": torch.tensor( [ [0.0034, 0.0028, 0.0037, 0.0041, 0.0029, 0.0022, 0.0021, 0.0020, 0.0021, 0.0023, 0.0021, 0.0018, 0.0019, 0.0020, 0.0020, 0.0020, 0.0021, 0.0023, 0.0025, 0.0022, 0.0017, 0.0015, 0.0017, 0.0020, 0.0024, 0.0031, 0.0039, 0.0045, 0.0046, 0.0042, 0.0034, 0.0027, 0.0023, 0.0022, 0.0023, 0.0024, 0.0022, 0.0023, 0.0024, 0.0027, 0.0027, 0.0027, 0.0025, 0.0024, 0.0024, 0.0026, 0.0028, 0.0027, 0.0024, 0.0022], [ -0.0031, -0.0027, -0.0018, -0.0017, -0.0024, -0.0029, -0.0030, -0.0026, -0.0021, -0.0018, -0.0018, -0.0019, -0.0017, -0.0014, -0.0012, -0.0010, -0.0008, -0.0004, -0.0001, -0.0004, -0.0012, -0.0015, -0.0014, -0.0013, -0.0011, -0.0005, 0.0002, 0.0007, 0.0008, 0.0004, -0.0003, -0.0010, -0.0012, -0.0011, -0.0009, -0.0009, -0.0009, -0.0008, -0.0006, -0.0005, -0.0005, -0.0005, -0.0006, -0.0008, -0.0008, -0.0006, -0.0005, -0.0007, -0.0010, -0.0012], ] ), "6.0": torch.tensor( [ [0.0052, 0.0049, 0.0057, 0.0058, 0.0048, 0.0043, 0.0042, 0.0041, 0.0041, 0.0042, 0.0040, 0.0038, 0.0038, 0.0038, 0.0037, 0.0037, 0.0037, 0.0037, 0.0038, 0.0037, 0.0035, 0.0034, 0.0036, 0.0039, 0.0043, 0.0047, 0.0053, 0.0057, 0.0057, 0.0055, 0.0050, 0.0046, 0.0043, 0.0041, 0.0042, 0.0042, 0.0041, 0.0041, 0.0042, 0.0043, 0.0043, 0.0043, 0.0041, 0.0040, 0.0040, 0.0041, 0.0042, 0.0042, 0.0040, 0.0039], [ 0.0001, 0.0006, 0.0013, 0.0011, 0.0005, 0.0001, -0.0001, 0.0001, 0.0003, 0.0005, 0.0005, 0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.0010, 0.0013, 0.0015, 0.0014, 0.0010, 0.0008, 0.0010, 0.0012, 0.0015, 0.0019, 0.0023, 0.0026, 0.0026, 0.0024, 0.0020, 0.0016, 0.0013, 0.0013, 0.0014, 0.0015, 0.0015, 0.0016, 0.0017, 0.0017, 0.0017, 0.0016, 0.0015, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0010, 0.0009], ] ), "12.0": torch.tensor( [ [0.0014, 0.0012, 0.0021, 0.0024, 0.0017, 0.0013, 0.0012, 0.0011, 0.0011, 0.0012, 0.0011, 0.0010, 0.0009, 0.0009, 0.0008, 0.0008, 0.0009, 0.0010, 0.0012, 0.0012, 0.0009, 0.0008, 0.0010, 0.0013, 0.0017, 0.0024, 0.0031, 0.0036, 0.0036, 0.0033, 0.0028, 0.0023, 0.0020, 0.0020, 0.0022, 0.0022, 0.0022, 0.0022, 0.0023, 0.0024, 0.0024, 0.0023, 0.0021, 0.0021, 0.0021, 0.0023, 0.0024, 0.0024, 0.0022, 0.0021], [ -0.0034, -0.0029, -0.0020, -0.0020, -0.0024, -0.0027, -0.0030, -0.0030, -0.0028, -0.0025, -0.0025, -0.0025, -0.0025, -0.0025, -0.0023, -0.0022, -0.0020, -0.0017, -0.0013, -0.0014, -0.0017, -0.0019, -0.0018, -0.0015, -0.0011, -0.0006, 0.0000, 0.0005, 0.0005, 0.0002, -0.0003, -0.0008, -0.0010, -0.0009, -0.0007, -0.0006, -0.0006, -0.0005, -0.0005, -0.0005, -0.0005, -0.0007, -0.0008, -0.0009, -0.0009, -0.0008, -0.0007, -0.0008, -0.0010, -0.0011], ] ), "24.0": torch.tensor( [ [ 0.0010, 0.0008, 0.0018, 0.0021, 0.0014, 0.0011, 0.0009, 0.0007, 0.0006, 0.0006, 0.0005, 0.0003, 0.0003, 0.0002, 0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, -0.0002, -0.0004, -0.0003, 0.0000, 0.0005, 0.0011, 0.0018, 0.0022, 0.0022, 0.0018, 0.0012, 0.0007, 0.0004, 0.0003, 0.0004, 0.0006, 0.0006, 0.0007, 0.0007, 0.0009, 0.0008, 0.0007, 0.0005, 0.0004, 0.0004, 0.0006, 0.0007, 0.0007, 0.0005, 0.0004], [-0.0039, -0.0035, -0.0027, -0.0026, -0.0028, -0.0031, -0.0035, -0.0035, -0.0034, -0.0033, -0.0032, -0.0032, -0.0031, -0.0031, -0.0029, -0.0028, -0.0026, -0.0024, -0.0021, -0.0021, -0.0024, -0.0025, -0.0024, -0.0021, -0.0017, -0.0011, -0.0006, -0.0002, -0.0002, -0.0004, -0.0009, -0.0013, -0.0015, -0.0015, -0.0014, -0.0013, -0.0012, -0.0011, -0.0010, -0.0010, -0.0011, -0.0012, -0.0014, -0.0015, -0.0015, -0.0014, -0.0013, -0.0014, -0.0016, -0.0017], ] ) } } EXPECTED_CODEC_ERROR = { "facebook/encodec_24khz": { "1.5": 0.0022229827009141445, "3.0": 0.001862662611529231, "6.0": 0.0015231302240863442, "12.0": 0.0013, "24.0": 0.0012, }, "facebook/encodec_48khz": { "3.0": 0.000840399123262614, "6.0": 0.0006692984024994075, "12.0": 0.0005328940460458398, "24.0": 0.0004473362350836396, } } # -- test_batch EXPECTED_ENCODER_CODES_BATCH = { "facebook/encodec_24khz": { "1.5": torch.tensor( [ [ [62, 106, 475, 475, 404, 404, 475, 404, 404, 475, 475, 404, 475, 475, 475, 835, 475, 475, 835, 835, 106, 106, 738, 106, 738, 106, 408, 408, 738, 408, 408, 408, 738, 408, 408, 408, 408, 738, 408, 1017, 604, 64, 303, 394, 5, 570, 991, 570, 969, 814], [424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007, 729, 1007, 961, 1007, 1007, 961, 969, 1007, 1007, 424, 518, 1007, 544, 1007, 518, 913, 424, 424, 544, 424, 518, 518, 518, 302, 424, 424, 424, 544, 424, 114, 200, 787, 931, 343, 434, 315, 487, 872, 769, 463], ], [ [835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 408, 835, 738, 408, 408, 408, 408, 408, 408, 738, 408, 408, 408, 408, 408, 408, 408, 408, 738, 408, 408, 408, 408, 408, 408, 408, 408, 408, 339, 834, 819, 875, 957, 670, 811, 670, 237, 53], [857, 857, 544, 518, 937, 518, 913, 913, 518, 913, 518, 913, 518, 518, 544, 424, 424, 518, 424, 424, 424, 544, 424, 424, 424, 518, 424, 518, 518, 937, 544, 424, 518, 302, 518, 424, 424, 518, 424, 424, 913, 857, 841, 363, 463, 78, 176, 645, 255, 571], ], ] ), "3.0": torch.tensor( [ [ [62, 106, 475, 475, 404, 404, 475, 404, 404, 475], [424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007], [212, 832, 212, 36, 36, 36, 767, 653, 982, 1016], [956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019], ], [ [835, 835, 835, 835, 835, 835, 835, 835, 835, 835], [857, 857, 544, 518, 937, 518, 913, 913, 518, 913], [705, 989, 934, 989, 678, 934, 934, 786, 934, 786], [366, 1018, 398, 398, 398, 398, 673, 741, 398, 741], ], ] ), "6.0": torch.tensor( [ [ [62, 106, 475, 475, 404, 404, 475, 404, 404, 475], [424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007], [212, 832, 212, 36, 36, 36, 767, 653, 982, 1016], [956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019], [712, 862, 712, 448, 528, 646, 446, 373, 694, 373], [939, 881, 939, 19, 334, 881, 1005, 763, 632, 781], [853, 464, 772, 782, 782, 983, 890, 874, 983, 782], [899, 475, 173, 701, 701, 947, 468, 1019, 882, 518], ], [ [835, 835, 835, 835, 835, 835, 835, 835, 835, 835], [857, 857, 544, 518, 937, 518, 913, 913, 518, 913], [705, 989, 934, 989, 678, 934, 934, 786, 934, 786], [366, 1018, 398, 398, 398, 398, 673, 741, 398, 741], [373, 373, 375, 373, 373, 222, 862, 373, 190, 373], [293, 949, 435, 435, 435, 293, 949, 881, 632, 986], [800, 528, 528, 853, 782, 485, 772, 900, 528, 853], [916, 237, 828, 701, 518, 835, 948, 315, 948, 315], ], ] ), "12.0": torch.tensor( [ [ [62, 106, 475, 475, 404, 404, 475, 404, 404, 475], [424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007], [212, 832, 212, 36, 36, 36, 767, 653, 982, 1016], [956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019], [712, 862, 712, 448, 528, 646, 446, 373, 694, 373], [939, 881, 939, 19, 334, 881, 1005, 763, 632, 781], [853, 464, 772, 782, 782, 983, 890, 874, 983, 782], [899, 475, 173, 701, 701, 947, 468, 1019, 882, 518], [817, 470, 588, 675, 675, 588, 960, 927, 909, 466], [953, 776, 717, 630, 359, 717, 861, 630, 861, 359], [623, 740, 1000, 388, 420, 388, 740, 818, 958, 743], [413, 835, 742, 249, 892, 352, 190, 498, 866, 890], [817, 351, 804, 751, 938, 535, 434, 879, 351, 971], [792, 495, 935, 848, 792, 795, 942, 935, 723, 531], [622, 681, 477, 713, 752, 871, 713, 514, 993, 777], [928, 799, 962, 1005, 860, 439, 312, 922, 982, 922], ], [ [835, 835, 835, 835, 835, 835, 835, 835, 835, 835], [857, 857, 544, 518, 937, 518, 913, 913, 518, 913], [705, 989, 934, 989, 678, 934, 934, 786, 934, 786], [366, 1018, 398, 398, 398, 398, 673, 741, 398, 741], [373, 373, 375, 373, 373, 222, 862, 373, 190, 373], [293, 949, 435, 435, 435, 293, 949, 881, 632, 986], [800, 528, 528, 853, 782, 485, 772, 900, 528, 853], [916, 237, 828, 701, 518, 835, 948, 315, 948, 315], [420, 628, 918, 628, 628, 628, 248, 628, 909, 811], [736, 717, 994, 974, 477, 874, 963, 979, 355, 979], [1002, 1002, 894, 875, 388, 709, 534, 408, 881, 709], [735, 828, 763, 742, 640, 835, 828, 375, 840, 375], [898, 938, 556, 658, 410, 951, 486, 658, 877, 877], [ 0, 797, 428, 694, 428, 920, 1022, 1022, 809, 797], [622, 421, 422, 776, 911, 911, 958, 421, 776, 421], [1005, 312, 922, 755, 834, 461, 461, 702, 597, 974], ], ] ), "24.0": torch.tensor( [ [ [62, 106, 475, 475, 404, 404, 475, 404, 404, 475], [424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007], [212, 832, 212, 36, 36, 36, 767, 653, 982, 1016], [956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019], [712, 862, 712, 448, 528, 646, 446, 373, 694, 373], [939, 881, 939, 19, 334, 881, 1005, 763, 632, 781], [853, 464, 772, 782, 782, 983, 890, 874, 983, 782], [899, 475, 173, 701, 701, 947, 468, 1019, 882, 518], [817, 470, 588, 675, 675, 588, 960, 927, 909, 466], [953, 776, 717, 630, 359, 717, 861, 630, 861, 359], [623, 740, 1000, 388, 420, 388, 740, 818, 958, 743], [413, 835, 742, 249, 892, 352, 190, 498, 866, 890], [817, 351, 804, 751, 938, 535, 434, 879, 351, 971], [792, 495, 935, 848, 792, 795, 942, 935, 723, 531], [622, 681, 477, 713, 752, 871, 713, 514, 993, 777], [928, 799, 962, 1005, 860, 439, 312, 922, 982, 922], [939, 637, 861, 506, 861, 61, 475, 264, 1019, 260], [166, 215, 69, 69, 890, 69, 284, 828, 396, 180], [561, 896, 841, 144, 580, 659, 886, 514, 686, 451], [691, 691, 239, 735, 62, 287, 383, 972, 550, 505], [451, 811, 238, 251, 250, 841, 734, 329, 551, 846], [313, 601, 494, 763, 811, 565, 748, 441, 601, 480], [653, 242, 630, 572, 701, 973, 632, 374, 561, 521], [984, 987, 419, 454, 386, 507, 532, 636, 515, 671], [647, 550, 515, 292, 876, 1011, 719, 549, 691, 911], [683, 536, 656, 603, 698, 867, 987, 857, 886, 491], [444, 937, 826, 555, 585, 710, 466, 852, 655, 591], [658, 952, 903, 508, 739, 596, 420, 721, 464, 306], [665, 334, 765, 532, 618, 278, 836, 838, 517, 597], [613, 674, 596, 904, 987, 977, 938, 615, 672, 776], [689, 386, 749, 658, 250, 869, 957, 806, 750, 659], [652, 509, 910, 826, 566, 622, 951, 696, 900, 895], ], [ [835, 835, 835, 835, 835, 835, 835, 835, 835, 835], [857, 857, 544, 518, 937, 518, 913, 913, 518, 913], [705, 989, 934, 989, 678, 934, 934, 786, 934, 786], [366, 1018, 398, 398, 398, 398, 673, 741, 398, 741], [373, 373, 375, 373, 373, 222, 862, 373, 190, 373], [293, 949, 435, 435, 435, 293, 949, 881, 632, 986], [800, 528, 528, 853, 782, 485, 772, 900, 528, 853], [916, 237, 828, 701, 518, 835, 948, 315, 948, 315], [420, 628, 918, 628, 628, 628, 248, 628, 909, 811], [736, 717, 994, 974, 477, 874, 963, 979, 355, 979], [1002, 1002, 894, 875, 388, 709, 534, 408, 881, 709], [735, 828, 763, 742, 640, 835, 828, 375, 840, 375], [898, 938, 556, 658, 410, 951, 486, 658, 877, 877], [ 0, 797, 428, 694, 428, 920, 1022, 1022, 809, 797], [622, 421, 422, 776, 911, 911, 958, 421, 776, 421], [1005, 312, 922, 755, 834, 461, 461, 702, 597, 974], [248, 248, 637, 248, 977, 506, 546, 270, 670, 506], [547, 447, 15, 134, 1009, 215, 134, 396, 260, 160], [635, 497, 686, 765, 264, 497, 244, 675, 624, 656], [864, 571, 616, 511, 588, 781, 525, 258, 674, 503], [449, 757, 857, 451, 658, 486, 299, 299, 251, 596], [809, 628, 255, 568, 623, 301, 639, 546, 617, 623], [551, 497, 908, 539, 661, 710, 640, 539, 646, 315], [689, 507, 875, 515, 613, 637, 527, 515, 662, 637], [983, 686, 456, 768, 601, 561, 768, 653, 500, 688], [493, 566, 664, 782, 683, 683, 721, 603, 323, 497], [1015, 552, 411, 423, 607, 646, 687, 1018, 689, 607], [516, 293, 471, 294, 293, 294, 608, 538, 803, 717], [974, 994, 952, 637, 637, 927, 535, 571, 602, 535], [776, 789, 476, 944, 652, 959, 589, 679, 321, 623], [776, 931, 720, 1009, 676, 731, 386, 676, 701, 676], [684, 543, 716, 392, 661, 517, 792, 588, 922, 676], ], ] ) }, "facebook/encodec_48khz": { "3.0": torch.tensor([[[790, 790, 790, 214, 214, 214, 799, 214, 214, 214], [989, 989, 77, 546, 989, 546, 989, 160, 546, 989]], [[214, 214, 214, 214, 214, 214, 214, 214, 214, 214], [289, 289, 989, 764, 289, 289, 882, 882, 882, 882]]]), "6.0": torch.tensor([[[ 790, 790, 790, 214, 214, 214, 799, 214, 214, 214], [ 989, 989, 77, 546, 989, 546, 989, 160, 546, 989], [ 977, 977, 977, 977, 538, 977, 977, 960, 977, 977], [ 376, 376, 962, 962, 607, 962, 963, 896, 962, 376]], [[ 214, 214, 214, 214, 214, 214, 214, 214, 214, 214], [ 289, 289, 989, 764, 289, 289, 882, 882, 882, 882], [1022, 1022, 471, 925, 821, 821, 267, 925, 925, 267], [ 979, 992, 914, 921, 0, 0, 1023, 963, 963, 1023]]]), "12.0": torch.tensor([[[ 790, 790, 790, 214, 214, 214, 799, 214, 214, 214], [ 989, 989, 77, 546, 989, 546, 989, 160, 546, 989], [ 977, 977, 977, 977, 538, 977, 977, 960, 977, 977], [ 376, 376, 962, 962, 607, 962, 963, 896, 962, 376], [ 979, 979, 979, 1012, 979, 1012, 921, 0, 1002, 695], [ 824, 1018, 762, 957, 824, 762, 762, 1007, 957, 336], [ 681, 973, 973, 452, 211, 681, 802, 679, 547, 884], [ 950, 1017, 1016, 1017, 986, 1017, 229, 607, 1017, 689]], [[ 214, 214, 214, 214, 214, 214, 214, 214, 214, 214], [ 289, 289, 989, 764, 289, 289, 882, 882, 882, 882], [1022, 1022, 471, 925, 821, 821, 267, 925, 925, 267], [ 979, 992, 914, 921, 0, 0, 1023, 963, 963, 1023], [ 403, 940, 976, 1018, 677, 1002, 979, 677, 677, 677], [1018, 794, 762, 444, 485, 485, 974, 548, 548, 1018], [ 679, 243, 679, 1005, 1005, 973, 1014, 1005, 1005, 1014], [ 810, 13, 1017, 537, 522, 702, 202, 1017, 1017, 15]]]), "24.0": torch.tensor( [ [ [790, 790, 790, 214, 214, 214, 799, 214, 214, 214], [989, 989, 77, 546, 989, 546, 989, 160, 546, 989], [977, 977, 977, 977, 538, 977, 977, 960, 977, 977], [376, 376, 962, 962, 607, 962, 963, 896, 962, 376], [979, 979, 979, 1012, 979, 1012, 921, 0, 1002, 695], [824, 1018, 762, 957, 824, 762, 762, 1007, 957, 336], [681, 973, 973, 452, 211, 681, 802, 679, 547, 884], [950, 1017, 1016, 1017, 986, 1017, 229, 607, 1017, 689], [1004, 1011, 669, 1023, 1023, 1023, 905, 297, 810, 970], [982, 681, 982, 629, 662, 919, 878, 476, 629, 982], [727, 727, 959, 959, 979, 959, 530, 959, 337, 961], [924, 456, 924, 486, 924, 959, 102, 924, 805, 924], [649, 542, 993, 993, 949, 787, 56, 886, 949, 405], [864, 1022, 1022, 1022, 460, 753, 805, 309, 1022, 32], [953, 0, 0, 180, 352, 10, 581, 516, 322, 452], [300, 0, 1020, 307, 0, 543, 924, 627, 258, 262], ], [ [214, 214, 214, 214, 214, 214, 214, 214, 214, 214], [289, 289, 989, 764, 289, 289, 882, 882, 882, 882], [1022, 1022, 471, 925, 821, 821, 267, 925, 925, 267], [979, 992, 914, 921, 0, 0, 1023, 963, 963, 1023], [403, 940, 976, 1018, 677, 1002, 979, 677, 677, 677], [1018, 794, 762, 444, 485, 485, 974, 548, 548, 1018], [679, 243, 679, 1005, 1005, 973, 1014, 1005, 1005, 1014], [810, 13, 1017, 537, 522, 702, 202, 1017, 1017, 15], [728, 252, 970, 984, 971, 950, 673, 902, 1011, 810], [332, 1014, 476, 854, 1014, 861, 332, 411, 411, 408], [959, 727, 611, 979, 611, 727, 999, 497, 821, 0], [995, 698, 924, 688, 102, 510, 924, 970, 344, 961], [ 81, 516, 847, 924, 10, 240, 1005, 726, 993, 378], [467, 496, 484, 496, 456, 1022, 337, 600, 456, 1022], [789, 65, 937, 976, 159, 953, 343, 764, 179, 159], [ 10, 790, 483, 10, 1020, 352, 848, 333, 83, 848], ], ] ) } } EXPECTED_ENCODER_SCALES_BATCH = { "facebook/encodec_24khz": { "1.5": None, "3.0": None, "6.0": None, "12.0": None, "24.0": None }, "facebook/encodec_48khz": { "3.0": torch.tensor([[[1.027247e-01], [7.877284e-02]], [[1.014922e-01], [8.696266e-02]], [[6.308002e-02], [7.748771e-02]], [[6.899278e-02], [1.045912e-01]], [[6.440169e-02], [8.843135e-02]], [[4.139878e-02], [1.000000e-08]], [[5.848629e-02], [1.000000e-08]], [[2.329416e-04], [1.000000e-08]], [[1.000000e-08], [1.000000e-08]]]), "6.0": torch.tensor([[[1.027247e-01], [7.877284e-02]], [[1.014922e-01], [8.696266e-02]], [[6.308002e-02], [7.748771e-02]], [[6.899278e-02], [1.045912e-01]], [[6.440169e-02], [8.843135e-02]], [[4.139878e-02], [1.000000e-08]], [[5.848629e-02], [1.000000e-08]], [[2.329416e-04], [1.000000e-08]], [[1.000000e-08], [1.000000e-08]]]), "12.0": torch.tensor([[[1.027247e-01], [7.877284e-02]], [[1.014922e-01], [8.696266e-02]], [[6.308002e-02], [7.748771e-02]], [[6.899278e-02], [1.045912e-01]], [[6.440169e-02], [8.843135e-02]], [[4.139878e-02], [1.000000e-08]], [[5.848629e-02], [1.000000e-08]], [[2.329416e-04], [1.000000e-08]], [[1.000000e-08], [1.000000e-08]]]), "24.0": torch.tensor([[[1.027247e-01], [7.877284e-02]], [[1.014922e-01], [8.696266e-02]], [[6.308002e-02], [7.748771e-02]], [[6.899278e-02], [1.045912e-01]], [[6.440169e-02], [8.843135e-02]], [[4.139878e-02], [1.000000e-08]], [[5.848629e-02], [1.000000e-08]], [[2.329416e-04], [1.000000e-08]], [[1.000000e-08], [1.000000e-08]]]) } } EXPECTED_DECODER_OUTPUTS_BATCH = { "facebook/encodec_24khz": { "1.5": torch.tensor( [ [[ 0.0010, 0.0004, 0.0005, 0.0002, 0.0005, -0.0001, -0.0003, -0.0001, 0.0003, 0.0001, -0.0014, -0.0009, -0.0007, -0.0023, -0.0009, 0.0008, 0.0007, 0.0003, 0.0001, 0.0001, 0.0003, -0.0001, -0.0003, -0.0004, -0.0005, -0.0007, -0.0009, -0.0011, -0.0010, -0.0006, -0.0007, -0.0007, -0.0005, -0.0005, -0.0003, -0.0002, -0.0002, -0.0001, -0.0005, -0.0008, -0.0005, -0.0007, -0.0009, -0.0002, 0.0003, 0.0005, 0.0004, 0.0001, 0.0003, 0.0004]], [[ -0.0001, -0.0000, 0.0003, 0.0001, 0.0005, 0.0001, -0.0006, -0.0002, 0.0002, 0.0002, -0.0031, -0.0004, 0.0006, -0.0066, -0.0032, 0.0044, 0.0025, -0.0019, -0.0017, 0.0001, 0.0019, -0.0010, -0.0014, -0.0009, -0.0007, -0.0009, -0.0019, -0.0024, -0.0019, -0.0001, -0.0017, -0.0022, -0.0004, 0.0005, -0.0014, -0.0023, 0.0002, 0.0015, -0.0022, -0.0033, 0.0024, 0.0009, -0.0041, 0.0000, 0.0030, 0.0020, -0.0015, -0.0018, 0.0014, 0.0007]], ] ), "3.0": torch.tensor( [ [[ 0.0013, 0.0007, 0.0009, 0.0005, 0.0006, 0.0002, -0.0001, 0.0000, 0.0005, 0.0003, -0.0012, -0.0006, -0.0003, -0.0019, -0.0003, 0.0015, 0.0013, 0.0009, 0.0008, 0.0007, 0.0008, 0.0004, 0.0001, -0.0000, -0.0001, -0.0002, -0.0003, -0.0004, -0.0004, 0.0001, -0.0000, -0.0000, 0.0003, 0.0003, 0.0005, 0.0005, 0.0004, 0.0005, 0.0001, -0.0003, -0.0002, -0.0004, -0.0006, 0.0003, 0.0009, 0.0012, 0.0013, 0.0012, 0.0014, 0.0015]], [[ 0.0000, -0.0003, 0.0005, 0.0004, 0.0011, 0.0013, 0.0002, 0.0005, 0.0002, 0.0006, -0.0025, -0.0005, 0.0004, -0.0069, -0.0027, 0.0038, 0.0013, -0.0015, -0.0005, 0.0003, 0.0014, -0.0006, -0.0002, -0.0010, -0.0008, -0.0001, -0.0006, -0.0012, -0.0016, 0.0010, 0.0001, -0.0010, -0.0002, 0.0013, -0.0002, -0.0017, 0.0005, 0.0019, -0.0019, -0.0035, 0.0022, -0.0001, -0.0040, 0.0012, 0.0015, 0.0012, 0.0001, -0.0010, 0.0005, 0.0004]], ] ), "6.0": torch.tensor( [ [[ 0.0010, 0.0005, 0.0007, 0.0001, 0.0003, -0.0000, -0.0002, -0.0001, 0.0003, 0.0001, -0.0014, -0.0007, -0.0004, -0.0019, -0.0004, 0.0013, 0.0012, 0.0008, 0.0007, 0.0007, 0.0008, 0.0003, 0.0001, 0.0001, -0.0000, -0.0001, -0.0001, -0.0002, -0.0001, 0.0002, 0.0002, 0.0001, 0.0005, 0.0005, 0.0008, 0.0008, 0.0007, 0.0008, 0.0004, 0.0001, 0.0002, -0.0001, -0.0002, 0.0006, 0.0012, 0.0015, 0.0016, 0.0014, 0.0016, 0.0017]], [[ -0.0005, -0.0001, 0.0003, 0.0001, 0.0010, 0.0012, 0.0002, 0.0004, 0.0012, 0.0003, -0.0023, -0.0003, -0.0005, -0.0063, -0.0026, 0.0040, 0.0024, -0.0018, -0.0005, 0.0016, 0.0004, -0.0008, 0.0009, 0.0002, -0.0015, -0.0003, 0.0004, -0.0011, -0.0013, 0.0012, 0.0001, -0.0019, 0.0007, 0.0021, -0.0009, -0.0016, 0.0015, 0.0013, -0.0022, -0.0015, 0.0016, -0.0014, -0.0033, 0.0017, 0.0025, -0.0004, -0.0005, 0.0010, 0.0005, 0.0001]], ] ), "12.0": torch.tensor( [ [[ 0.0003, 0.0002, 0.0004, -0.0004, -0.0003, -0.0007, -0.0008, -0.0006, -0.0001, -0.0002, -0.0016, -0.0009, -0.0004, -0.0021, -0.0003, 0.0015, 0.0016, 0.0012, 0.0011, 0.0010, 0.0010, 0.0005, 0.0002, 0.0001, 0.0000, -0.0001, -0.0002, -0.0004, -0.0004, 0.0000, -0.0000, -0.0002, 0.0001, 0.0001, 0.0004, 0.0003, 0.0002, 0.0004, -0.0001, -0.0005, -0.0004, -0.0006, -0.0007, 0.0003, 0.0009, 0.0013, 0.0015, 0.0015, 0.0017, 0.0018]], [[ -0.0008, -0.0003, 0.0003, -0.0001, 0.0008, 0.0013, 0.0004, 0.0008, 0.0015, 0.0006, -0.0021, -0.0001, -0.0003, -0.0062, -0.0022, 0.0043, 0.0028, -0.0013, -0.0002, 0.0017, 0.0010, -0.0001, 0.0008, 0.0001, -0.0010, 0.0003, 0.0008, -0.0006, -0.0007, 0.0012, 0.0003, -0.0013, 0.0007, 0.0019, -0.0002, -0.0013, 0.0011, 0.0016, -0.0016, -0.0017, 0.0014, -0.0006, -0.0029, 0.0011, 0.0028, 0.0006, -0.0004, 0.0005, 0.0008, 0.0003]], ] ), "24.0": torch.tensor( [ [[ 0.0009, 0.0004, 0.0007, 0.0002, 0.0004, -0.0001, -0.0003, -0.0002, 0.0002, 0.0001, -0.0015, -0.0009, -0.0006, -0.0024, -0.0005, 0.0016, 0.0014, 0.0010, 0.0009, 0.0008, 0.0008, 0.0004, 0.0001, 0.0000, -0.0001, -0.0002, -0.0003, -0.0006, -0.0006, -0.0003, -0.0005, -0.0006, -0.0003, -0.0004, -0.0001, -0.0002, -0.0003, -0.0001, -0.0006, -0.0011, -0.0008, -0.0010, -0.0012, -0.0000, 0.0007, 0.0011, 0.0012, 0.0011, 0.0013, 0.0014]], [[ -0.0009, -0.0004, 0.0001, -0.0003, 0.0007, 0.0012, 0.0003, 0.0006, 0.0017, 0.0008, -0.0020, 0.0001, -0.0002, -0.0064, -0.0023, 0.0047, 0.0029, -0.0016, -0.0004, 0.0019, 0.0010, -0.0002, 0.0007, -0.0001, -0.0013, 0.0005, 0.0012, -0.0007, -0.0008, 0.0013, -0.0001, -0.0022, 0.0004, 0.0020, -0.0004, -0.0014, 0.0017, 0.0020, -0.0018, -0.0016, 0.0015, -0.0015, -0.0036, 0.0014, 0.0030, 0.0004, 0.0002, 0.0015, 0.0011, 0.0007]], ] ) }, "facebook/encodec_48khz": { "3.0": torch.tensor([[[ 0.005083, 0.004669, 0.005723, 0.005600, 0.004231, 0.003830, 0.003684, 0.003349, 0.003032, 0.003055, 0.002768, 0.002370, 0.002384, 0.002450, 0.002391, 0.002363, 0.002357, 0.002435, 0.002568, 0.002463, 0.002137, 0.002092, 0.002440, 0.002772, 0.003035, 0.003473, 0.003963, 0.004288, 0.004315, 0.004087, 0.003618, 0.003166, 0.002874, 0.002775, 0.002820, 0.002758, 0.002565, 0.002498, 0.002583, 0.002671, 0.002656, 0.002613, 0.002433, 0.002236, 0.002215, 0.002302, 0.002287, 0.002113, 0.001909, 0.001767], [-0.003928, -0.002733, -0.001330, -0.001914, -0.002927, -0.003272, -0.003677, -0.003615, -0.003341, -0.002907, -0.002764, -0.002742, -0.002593, -0.002308, -0.002024, -0.001856, -0.001672, -0.001256, -0.000929, -0.001217, -0.001864, -0.002118, -0.002025, -0.001932, -0.001816, -0.001572, -0.001214, -0.000885, -0.000829, -0.000976, -0.001417, -0.001874, -0.002030, -0.001952, -0.001858, -0.001863, -0.001895, -0.001843, -0.001801, -0.001792, -0.001812, -0.001865, -0.002008, -0.002120, -0.002132, -0.002093, -0.002170, -0.002370, -0.002587, -0.002749]], [[ 0.004229, 0.003422, 0.005044, 0.006059, 0.005242, 0.004623, 0.004231, 0.004050, 0.004314, 0.004701, 0.004559, 0.004105, 0.003874, 0.003713, 0.003355, 0.003055, 0.003235, 0.003927, 0.004500, 0.004195, 0.003328, 0.002804, 0.002628, 0.002456, 0.002693, 0.003883, 0.005604, 0.006791, 0.006702, 0.005427, 0.003622, 0.002328, 0.002173, 0.002871, 0.003505, 0.003410, 0.002851, 0.002511, 0.002534, 0.002685, 0.002714, 0.002538, 0.002110, 0.001697, 0.001786, 0.002415, 0.002940, 0.002856, 0.002348, 0.001883], [-0.003444, -0.002916, -0.000590, 0.000157, -0.000702, -0.001472, -0.002032, -0.001891, -0.001283, -0.000670, -0.000590, -0.000875, -0.001090, -0.001095, -0.001172, -0.001287, -0.000907, 0.000111, 0.000858, 0.000471, -0.000532, -0.001127, -0.001463, -0.001853, -0.001762, -0.000666, 0.000964, 0.002054, 0.001914, 0.000743, -0.000876, -0.001990, -0.001951, -0.001042, -0.000229, -0.000171, -0.000558, -0.000752, -0.000704, -0.000609, -0.000594, -0.000723, -0.001085, -0.001455, -0.001374, -0.000795, -0.000350, -0.000480, -0.000993, -0.001432]]]), "6.0": torch.tensor([[[ 5.892794e-03, 5.767163e-03, 7.065284e-03, 7.068626e-03, 5.825328e-03, 5.601424e-03, 5.582351e-03, 5.209565e-03, 4.829186e-03, 4.809568e-03, 4.663883e-03, 4.402087e-03, 4.337528e-03, 4.311915e-03, 4.236566e-03, 4.209972e-03, 4.179818e-03, 4.196202e-03, 4.309553e-03, 4.267083e-03, 4.052189e-03, 4.068719e-03, 4.381632e-03, 4.692366e-03, 4.998885e-03, 5.466312e-03, 5.895300e-03, 6.115717e-03, 6.055626e-03, 5.773376e-03, 5.316667e-03, 4.826934e-03, 4.450697e-03, 4.315911e-03, 4.310716e-03, 4.202125e-03, 4.008702e-03, 3.957694e-03, 4.017603e-03, 4.060654e-03, 4.036821e-03, 3.923071e-03, 3.659022e-03, 3.427053e-03, 3.387271e-03, 3.462438e-03, 3.434755e-03, 3.247944e-03, 3.009581e-03, 2.800536e-03], [-1.867314e-03, -6.082351e-04, 9.374358e-04, 5.555808e-04, -3.020080e-04, -5.281629e-04, -9.364292e-04, -1.057594e-03, -9.703087e-04, -6.292185e-04, -4.193477e-04, -3.605868e-04, -2.948678e-04, -1.198237e-04, 4.924605e-05, 1.602105e-04, 3.162385e-04, 6.700790e-04, 9.868707e-04, 8.484383e-04, 4.327767e-04, 3.108105e-04, 4.244343e-04, 5.422112e-04, 7.239584e-04, 1.008546e-03, 1.265120e-03, 1.447669e-03, 1.436084e-03, 1.271058e-03, 8.684017e-04, 4.149990e-04, 2.143449e-04, 2.508474e-04, 3.018488e-04, 2.782424e-04, 2.369677e-04, 3.040710e-04, 3.242530e-04, 2.599912e-04, 2.211208e-04, 1.311762e-04, -9.807519e-05, -2.752687e-04, -3.114068e-04, -2.832832e-04, -3.900219e-04, -6.142824e-04, -8.507833e-04, -1.055882e-03]], [[ 3.971702e-04, -2.164055e-04, 1.562327e-03, 2.695718e-03, 2.374928e-03, 2.145125e-03, 1.870762e-03, 1.852614e-03, 2.074345e-03, 2.312302e-03, 2.222824e-03, 1.876336e-03, 1.609606e-03, 1.420574e-03, 1.193270e-03, 9.592943e-04, 1.132237e-03, 1.776782e-03, 2.258269e-03, 1.945908e-03, 9.930646e-04, 1.733529e-04, -2.533881e-04, -3.138177e-04, 3.226010e-04, 1.859203e-03, 3.879325e-03, 5.267750e-03, 5.101699e-03, 3.609465e-03, 1.653315e-03, 2.709297e-04, -3.190451e-05, 5.129501e-04, 1.224789e-03, 1.397457e-03, 1.110794e-03, 8.736057e-04, 8.860155e-04, 1.055910e-03, 1.100855e-03, 8.834896e-04, 3.825913e-04, -3.267327e-05, 6.586456e-05, 7.147206e-04, 1.394876e-03, 1.535393e-03, 1.192172e-03, 7.061819e-04], [-6.897163e-03, -6.407891e-03, -4.015491e-03, -3.082125e-03, -3.434983e-03, -3.885052e-03, -4.456392e-03, -4.296550e-03, -3.861045e-03, -3.553474e-03, -3.547473e-03, -3.800863e-03, -4.123025e-03, -4.237277e-03, -4.244958e-03, -4.263899e-03, -3.808572e-03, -2.811858e-03, -2.147519e-03, -2.516703e-03, -3.550721e-03, -4.353373e-03, -4.846224e-03, -4.960613e-03, -4.273535e-03, -2.714785e-03, -7.043980e-04, 6.689885e-04, 5.069164e-04, -9.122533e-04, -2.816979e-03, -4.124952e-03, -4.235019e-03, -3.491365e-03, -2.676077e-03, -2.381226e-03, -2.492559e-03, -2.634424e-03, -2.632524e-03, -2.528266e-03, -2.536691e-03, -2.746170e-03, -3.187869e-03, -3.553530e-03, -3.462211e-03, -2.862707e-03, -2.273719e-03, -2.201617e-03, -2.565818e-03, -3.044683e-03]]]), "12.0": torch.tensor([[[ 2.237194e-03, 2.508208e-03, 3.986347e-03, 4.020395e-03, 2.889890e-03, 2.733388e-03, 2.684146e-03, 2.251372e-03, 1.787451e-03, 1.720550e-03, 1.689184e-03, 1.495478e-03, 1.321027e-03, 1.185375e-03, 1.098422e-03, 1.055453e-03, 9.591801e-04, 9.328910e-04, 1.026154e-03, 1.031992e-03, 9.155220e-04, 9.732856e-04, 1.282264e-03, 1.624059e-03, 1.920021e-03, 2.333685e-03, 2.730524e-03, 2.919153e-03, 2.856711e-03, 2.632692e-03, 2.256703e-03, 1.901129e-03, 1.684760e-03, 1.638201e-03, 1.644909e-03, 1.569378e-03, 1.448412e-03, 1.478291e-03, 1.580583e-03, 1.633777e-03, 1.597190e-03, 1.475462e-03, 1.242885e-03, 1.065243e-03, 1.052842e-03, 1.103825e-03, 1.059115e-03, 9.251673e-04, 7.235570e-04, 5.053390e-04], [-4.534880e-03, -3.111026e-03, -1.486247e-03, -1.739966e-03, -2.399862e-03, -2.583335e-03, -3.157276e-03, -3.517166e-03, -3.598212e-03, -3.303007e-03, -3.037215e-03, -2.982930e-03, -3.026671e-03, -2.958387e-03, -2.836909e-03, -2.775315e-03, -2.719575e-03, -2.431532e-03, -2.090512e-03, -2.095603e-03, -2.366266e-03, -2.404480e-03, -2.235661e-03, -2.063206e-03, -1.888533e-03, -1.640449e-03, -1.407782e-03, -1.250053e-03, -1.275359e-03, -1.373277e-03, -1.601508e-03, -1.838720e-03, -1.876643e-03, -1.736149e-03, -1.622051e-03, -1.578928e-03, -1.564748e-03, -1.455850e-03, -1.391748e-03, -1.418254e-03, -1.462577e-03, -1.554713e-03, -1.730076e-03, -1.829485e-03, -1.816249e-03, -1.772218e-03, -1.855736e-03, -2.013720e-03, -2.196174e-03, -2.378810e-03]], [[ 8.993230e-04, 6.808847e-04, 2.595528e-03, 3.586462e-03, 3.023965e-03, 2.479527e-03, 1.868662e-03, 1.565682e-03, 1.563900e-03, 1.666364e-03, 1.715061e-03, 1.609638e-03, 1.294764e-03, 8.647116e-04, 5.122397e-04, 2.899101e-04, 3.817413e-04, 8.303743e-04, 1.253686e-03, 1.179640e-03, 6.591807e-04, 1.167982e-04, -3.405492e-04, -5.258832e-04, -4.165239e-05, 1.393227e-03, 3.473584e-03, 4.953051e-03, 4.779391e-03, 3.182305e-03, 1.140233e-03, -2.133392e-04, -4.233644e-04, 2.426380e-04, 1.126914e-03, 1.557022e-03, 1.490265e-03, 1.264647e-03, 1.170405e-03, 1.237709e-03, 1.112253e-03, 6.990263e-04, 1.700171e-04, -1.761244e-04, 1.852706e-05, 8.140961e-04, 1.621285e-03, 1.813497e-03, 1.394625e-03, 7.860070e-04], [-4.677887e-03, -3.966209e-03, -1.634288e-03, -8.592710e-04, -1.395248e-03, -2.189968e-03, -3.198638e-03, -3.410639e-03, -3.241918e-03, -3.051681e-03, -2.845973e-03, -2.786646e-03, -3.078280e-03, -3.367662e-03, -3.450923e-03, -3.427895e-03, -3.058358e-03, -2.258006e-03, -1.607386e-03, -1.647450e-03, -2.164357e-03, -2.647080e-03, -3.110953e-03, -3.304542e-03, -2.798792e-03, -1.407999e-03, 5.630683e-04, 1.961336e-03, 1.813856e-03, 3.529640e-04, -1.526076e-03, -2.695498e-03, -2.702039e-03, -1.889018e-03, -9.337939e-04, -3.885011e-04, -2.970786e-04, -4.415356e-04, -5.492531e-04, -5.430978e-04, -7.051138e-04, -1.102020e-03, -1.577104e-03, -1.846151e-03, -1.623901e-03, -8.853760e-04, -1.772702e-04, -4.866864e-05, -4.633263e-04, -1.017192e-03]]]), "24.0": torch.tensor( [ [ [0.0004, 0.0008, 0.0024, 0.0024, 0.0013, 0.0013, 0.0013, 0.0009, 0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0003, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003, 0.0004, 0.0003, 0.0004, 0.0008, 0.0012, 0.0015, 0.0018, 0.0021, 0.0022, 0.0021, 0.0019, 0.0016, 0.0014, 0.0012, 0.0011, 0.0012, 0.0012, 0.0012, 0.0012, 0.0013, 0.0014, 0.0014, 0.0013, 0.0011, 0.0009, 0.0009, 0.0010, 0.0010, 0.0010, 0.0009, 0.0007], [ -0.0055, -0.0040, -0.0024, -0.0026, -0.0031, -0.0031, -0.0036, -0.0039, -0.0039, -0.0035, -0.0031, -0.0029, -0.0028, -0.0027, -0.0026, -0.0024, -0.0023, -0.0020, -0.0017, -0.0016, -0.0017, -0.0017, -0.0015, -0.0012, -0.0010, -0.0008, -0.0006, -0.0004, -0.0004, -0.0005, -0.0006, -0.0007, -0.0006, -0.0004, -0.0002, -0.0001, 0.0001, 0.0002, 0.0003, 0.0004, 0.0004, 0.0003, 0.0001, 0.0001, 0.0000, 0.0001, 0.0000, -0.0001, -0.0002, -0.0004], ], [ [-0.0024, -0.0029, -0.0009, 0.0002, -0.0002, -0.0007, -0.0012, -0.0013, -0.0012, -0.0011, -0.0011, -0.0012, -0.0016, -0.0021, -0.0024, -0.0026, -0.0024, -0.0018, -0.0013, -0.0015, -0.0022, -0.0029, -0.0035, -0.0038, -0.0031, -0.0015, 0.0008, 0.0025, 0.0023, 0.0006, -0.0016, -0.0030, -0.0032, -0.0024, -0.0015, -0.0010, -0.0009, -0.0011, -0.0010, -0.0009, -0.0010, -0.0014, -0.0020, -0.0023, -0.0020, -0.0011, -0.0001, 0.0001, -0.0003, -0.0009], [-0.0086, -0.0081, -0.0059, -0.0050, -0.0053, -0.0061, -0.0071, -0.0071, -0.0069, -0.0067, -0.0066, -0.0066, -0.0070, -0.0073, -0.0074, -0.0073, -0.0069, -0.0060, -0.0053, -0.0055, -0.0061, -0.0067, -0.0072, -0.0074, -0.0067, -0.0052, -0.0031, -0.0015, -0.0016, -0.0029, -0.0048, -0.0059, -0.0059, -0.0051, -0.0041, -0.0036, -0.0034, -0.0034, -0.0034, -0.0033, -0.0035, -0.0039, -0.0043, -0.0046, -0.0043, -0.0035, -0.0027, -0.0025, -0.0029, -0.0034], ], ] ) } } # ---- error over whole batch EXPECTED_CODEC_ERROR_BATCH = { "facebook/encodec_24khz": { "1.5": 0.0011174238752573729, "3.0": 0.0009308119188062847, "6.0": 0.0008, "12.0": 0.0006830253987573087, "24.0": 0.000642190920189023, }, "facebook/encodec_48khz": { "3.0": 0.00039895583176985383, "6.0": 0.0003249854489695281, "12.0": 0.0002540576097089797, "24.0": 0.00021899679268244654, } } # fmt: on @slow @require_torch class EncodecIntegrationTest(unittest.TestCase): @parameterized.expand( [ (f"{os.path.basename(model_id)}_{bandwidth.replace('.', 'p')}", model_id, bandwidth) for model_id, v in EXPECTED_ENCODER_CODES.items() for bandwidth in v ] ) def test_integration(self, name, model_id, bandwidth): # load model model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id) # load audio librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_array = librispeech_dummy[0]["audio"]["array"] if model.config.audio_channels > 1: audio_array = np.array([audio_array] * model.config.audio_channels) inputs = processor( raw_audio=audio_array, sampling_rate=processor.sampling_rate, return_tensors="pt", padding=True, ).to(torch_device) model = model.eval() with torch.no_grad(): # Compare encoder outputs with expected values encoded_frames = model.encode(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth)) codes = torch.cat([encoded[0] for encoded in encoded_frames["audio_codes"]], dim=-1).unsqueeze(0) torch.testing.assert_close( codes[..., : EXPECTED_ENCODER_CODES[model_id][bandwidth].shape[-1]], EXPECTED_ENCODER_CODES[model_id][bandwidth].to(torch_device), rtol=1e-4, atol=1e-4, ) if EXPECTED_ENCODER_SCALES[model_id][bandwidth] is not None: scales = torch.tensor([encoded[0].squeeze() for encoded in encoded_frames["audio_scales"]]) torch.testing.assert_close(scales, EXPECTED_ENCODER_SCALES[model_id][bandwidth], rtol=1e-4, atol=1e-4) # Compare decoder outputs with expected values decoded_frames = model.decode( encoded_frames["audio_codes"], encoded_frames["audio_scales"], inputs["padding_mask"], last_frame_pad_length=encoded_frames["last_frame_pad_length"], ) torch.testing.assert_close( decoded_frames["audio_values"][0][..., : EXPECTED_DECODER_OUTPUTS[model_id][bandwidth].shape[-1]], EXPECTED_DECODER_OUTPUTS[model_id][bandwidth].to(torch_device), rtol=1e-4, atol=1e-4, ) # Compare codec error with expected values codec_error = compute_rmse(decoded_frames["audio_values"], inputs["input_values"]) torch.testing.assert_close(codec_error, EXPECTED_CODEC_ERROR[model_id][bandwidth], rtol=1e-4, atol=1e-4) # make sure forward and enc-dec give same result full_enc = model(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth)) torch.testing.assert_close( full_enc["audio_values"], decoded_frames["audio_values"], rtol=1e-4, atol=1e-4, ) @parameterized.expand( [ (f"{os.path.basename(model_id)}_{bandwidth.replace('.', 'p')}", model_id, bandwidth) for model_id, v in EXPECTED_ENCODER_CODES_BATCH.items() for bandwidth in v ] ) def test_batch(self, name, model_id, bandwidth): # load model model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id) # load audio librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) n_channels = model.config.audio_channels if n_channels == 1: audio_samples = [audio_sample["array"] for audio_sample in librispeech_dummy[-2:]["audio"]] else: audio_samples = [] for _sample in librispeech_dummy[-2:]["audio"]: # concatenate mono channels to target number of channels audio_array = np.concatenate([_sample["array"][np.newaxis]] * n_channels, axis=0) audio_samples.append(audio_array) inputs = processor( raw_audio=audio_samples, sampling_rate=processor.sampling_rate, return_tensors="pt", padding=True, ).to(torch_device) # apply model model = model.eval() with torch.no_grad(): # Compare encoder outputs with expected values encoded_frames = model.encode(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth)) codes = encoded_frames["audio_codes"].permute(1, 2, 0, 3) codes = codes.reshape(codes.size(0), codes.size(1), -1) torch.testing.assert_close( codes[..., : EXPECTED_ENCODER_CODES_BATCH[model_id][bandwidth].shape[-1]], EXPECTED_ENCODER_CODES_BATCH[model_id][bandwidth].to(torch_device), rtol=1e-4, atol=1e-4, ) if EXPECTED_ENCODER_SCALES_BATCH[model_id][bandwidth] is not None: scales = torch.stack(encoded_frames["audio_scales"]) torch.testing.assert_close( scales, EXPECTED_ENCODER_SCALES_BATCH[model_id][bandwidth].to(torch_device), rtol=1e-4, atol=1e-4 ) # Compare decoder outputs with expected values decoded_frames = model.decode( encoded_frames["audio_codes"], encoded_frames["audio_scales"], inputs["padding_mask"], last_frame_pad_length=encoded_frames["last_frame_pad_length"], ) torch.testing.assert_close( decoded_frames["audio_values"][..., : EXPECTED_DECODER_OUTPUTS_BATCH[model_id][bandwidth].shape[-1]], EXPECTED_DECODER_OUTPUTS_BATCH[model_id][bandwidth].to(torch_device), rtol=1e-4, atol=1e-4, ) # Compare codec error with expected values codec_error = compute_rmse(decoded_frames["audio_values"], inputs["input_values"]) torch.testing.assert_close( codec_error, EXPECTED_CODEC_ERROR_BATCH[model_id][bandwidth], rtol=1e-4, atol=1e-4 ) # make sure forward and enc-dec give same result input_values_dec = model(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth)) torch.testing.assert_close( input_values_dec["audio_values"], decoded_frames["audio_values"], rtol=1e-4, atol=1e-4 )
transformers/tests/models/encodec/test_modeling_encodec.py/0
{ "file_path": "transformers/tests/models/encodec/test_modeling_encodec.py", "repo_id": "transformers", "token_count": 45000 }
585
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the FlauBERT tokenizer.""" import json import os import unittest from transformers import FlaubertTokenizer from transformers.models.flaubert.tokenization_flaubert import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class FlaubertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "flaubert/flaubert_base_cased" tokenizer_class = FlaubertTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "i</w>", "lo", "low", "ne", "new", "er</w>", "low</w>", "lowest</w>", "new</w>", "newer</w>", "wider</w>", "<unk>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["n e 300", "ne w 301", "e r</w> 302", ""] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(cls.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) # Copied from transformers.tests.models.xlm.test_tokenization_xlm.XLMTokenizationTest.test_full_tokenizer def test_full_tokenizer(self): tokenizer = self.get_tokenizer() text = "lower newer" bpe_tokens = ["l", "o", "w", "er</w>", "new", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 18, 17, 18, 24] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow # Copied from transformers.tests.models.xlm.test_tokenization_xlm.XLMTokenizationTest.test_sequence_builders def test_sequence_builders(self): tokenizer = FlaubertTokenizer.from_pretrained("flaubert/flaubert_base_cased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) print(encoded_sentence) print(encoded_sentence) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_2 + [1]
transformers/tests/models/flaubert/test_tokenization_flaubert.py/0
{ "file_path": "transformers/tests/models/flaubert/test_tokenization_flaubert.py", "repo_id": "transformers", "token_count": 1361 }
586
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs if is_torch_available(): from PIL import Image if is_vision_available(): if is_torchvision_available(): from transformers import Glm4vVideoProcessor from transformers.models.glm4v.video_processing_glm4v import smart_resize class Glm4vVideoProcessingTester: def __init__( self, parent, batch_size=5, num_frames=8, num_channels=3, min_resolution=30, max_resolution=80, temporal_patch_size=2, patch_size=14, merge_size=2, do_resize=True, size=None, do_normalize=True, image_mean=IMAGENET_STANDARD_MEAN, image_std=IMAGENET_STANDARD_STD, do_convert_rgb=True, ): size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10} self.parent = parent self.batch_size = batch_size self.num_frames = num_frames self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.temporal_patch_size = temporal_patch_size self.patch_size = patch_size self.merge_size = merge_size def prepare_video_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "do_sample_frames": True, } def prepare_video_metadata(self, videos): video_metadata = [] for video in videos: if isinstance(video, list): num_frames = len(video) elif hasattr(video, "shape"): if len(video.shape) == 4: # (T, H, W, C) num_frames = video.shape[0] else: num_frames = 1 else: num_frames = self.num_frames metadata = { "fps": 2, "duration": num_frames / 2, "total_frames": num_frames, } video_metadata.append(metadata) return video_metadata def expected_output_video_shape(self, videos): grid_t = self.num_frames // self.temporal_patch_size hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size seq_len = 0 for video in videos: if isinstance(video, list) and isinstance(video[0], Image.Image): video = np.stack([np.array(frame) for frame in video]) elif hasattr(video, "shape"): pass else: video = np.array(video) if hasattr(video, "shape") and len(video.shape) >= 3: if len(video.shape) == 4: t, height, width = video.shape[:3] elif len(video.shape) == 3: height, width = video.shape[:2] t = 1 else: t, height, width = self.num_frames, self.min_resolution, self.min_resolution else: t, height, width = self.num_frames, self.min_resolution, self.min_resolution resized_height, resized_width = smart_resize( t, height, width, factor=self.patch_size * self.merge_size, min_pixels=self.size["shortest_edge"], max_pixels=self.size["longest_edge"], ) grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size seq_len += grid_t * grid_h * grid_w return [seq_len, hidden_dim] def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"): videos = prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, return_tensors=return_tensors, ) return videos @require_torch @require_vision class Glm4vVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase): fast_video_processing_class = Glm4vVideoProcessor if is_torchvision_available() else None input_name = "pixel_values_videos" def setUp(self): super().setUp() self.video_processor_tester = Glm4vVideoProcessingTester(self) @property def video_processor_dict(self): return self.video_processor_tester.prepare_video_processor_dict() def test_video_processor_from_dict_with_kwargs(self): video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict) self.assertEqual(video_processor.size, {"longest_edge": 20, "shortest_edge": 10}) video_processor = self.fast_video_processing_class.from_dict( self.video_processor_dict, size={"longest_edge": 42, "shortest_edge": 42} ) self.assertEqual(video_processor.size, {"longest_edge": 42, "shortest_edge": 42}) def test_call_pil(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="pil" ) for video in video_inputs: self.assertIsInstance(video[0], Image.Image) video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) encoded_videos = video_processing( video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_numpy(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) encoded_videos = video_processing( video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_pytorch(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="pt" ) video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) encoded_videos = video_processing( video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) @unittest.skip("Skip for now, the test needs adjustment for GLM-4.1V") def test_call_numpy_4_channels(self): for video_processing_class in self.video_processor_list: # Test that can process videos which have an arbitrary number of channels # Initialize video_processing video_processor = video_processing_class(**self.video_processor_dict) # create random numpy tensors self.video_processor_tester.num_channels = 4 video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) # Test not batched input encoded_videos = video_processor( video_inputs[0], return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processor( video_inputs, return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_nested_input(self): """Tests that the processor can work with nested list where each video is a list of arrays""" for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) video_inputs_nested = [list(video) for video in video_inputs] video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) # Test not batched input encoded_videos = video_processing( video_inputs_nested[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processing(video_inputs_nested, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_sample_frames(self): for video_processing_class in self.video_processor_list: video_processor_dict = self.video_processor_dict.copy() video_processing = video_processing_class(**video_processor_dict) prev_num_frames = self.video_processor_tester.num_frames self.video_processor_tester.num_frames = 8 prev_min_resolution = getattr(self.video_processor_tester, "min_resolution", None) prev_max_resolution = getattr(self.video_processor_tester, "max_resolution", None) self.video_processor_tester.min_resolution = 56 self.video_processor_tester.max_resolution = 112 video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="torch", ) metadata = [[{"total_num_frames": 8, "fps": 4}]] batched_metadata = metadata * len(video_inputs) encoded_videos = video_processing(video_inputs[0], return_tensors="pt", video_metadata=metadata)[ self.input_name ] encoded_videos_batched = video_processing( video_inputs, return_tensors="pt", video_metadata=batched_metadata )[self.input_name] self.assertIsNotNone(encoded_videos) self.assertIsNotNone(encoded_videos_batched) self.assertEqual(len(encoded_videos.shape), 2) self.assertEqual(len(encoded_videos_batched.shape), 2) with self.assertRaises(ValueError): video_processing(video_inputs[0], return_tensors="pt")[self.input_name] self.video_processor_tester.num_frames = prev_num_frames if prev_min_resolution is not None: self.video_processor_tester.min_resolution = prev_min_resolution if prev_max_resolution is not None: self.video_processor_tester.max_resolution = prev_max_resolution
transformers/tests/models/glm4v/test_video_processing_glm4v.py/0
{ "file_path": "transformers/tests/models/glm4v/test_video_processing_glm4v.py", "repo_id": "transformers", "token_count": 6745 }
587
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch GPT Neo model.""" import unittest from transformers import GPTNeoConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPT2Tokenizer, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, ) class GPTNeoModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, attention_types=[[["global", "local"], 1]], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, window_size=7, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.window_size = window_size self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.attention_types = attention_types def get_large_model_config(self): return GPTNeoConfig.from_pretrained("gpt-neo-125M") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config(self): return GPTNeoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, max_position_embeddings=self.max_position_embeddings, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, attention_types=self.attention_types, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_gpt_neo_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # past_key_values is not implemented # self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_gpt_neo_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_neo_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_neo_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_gpt_neo_for_question_answering( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_gpt_neo_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_gpt_neo_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPTNeoForCausalLM(config) if gradient_checkpointing: model.gradient_checkpointing_enable() model.to(torch_device) result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class GPTNeoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GPTNeoModel, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": GPTNeoModel, "question-answering": GPTNeoForQuestionAnswering, "text-classification": GPTNeoForSequenceClassification, "text-generation": GPTNeoForCausalLM, "token-classification": GPTNeoForTokenClassification, "zero-shot": GPTNeoForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False test_model_parallel = False # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = GPTNeoModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gpt_neo_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model(*config_and_inputs) def test_gpt_neo_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_past(*config_and_inputs) def test_gpt_neo_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_attention_mask_past(*config_and_inputs) def test_gpt_neo_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_past_large_inputs(*config_and_inputs) def test_gpt_neo_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gpt_neo_question_answering_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_question_answering(*config_and_inputs) def test_gpt_neo_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_sequence_classification(*config_and_inputs) def test_gpt_neo_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_token_classification(*config_and_inputs) def test_gpt_neo_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def _get_hidden_states(self): return torch.tensor( [ [ [0.4983, -0.7584, -1.6944, 0.5440], [2.6918, 0.4206, 0.4176, 0.2055], [-0.0071, -0.0405, -1.4920, -0.3630], [1.0492, 0.1599, -1.7648, 0.2419], [-1.8348, 2.0514, -0.1946, 0.3203], [0.7672, -1.1600, -1.7118, -0.9056], [0.2986, 0.5372, 0.7729, -0.1927], [0.0285, 0.2629, -1.1156, -1.1992], ] ], dtype=torch.float32, device=torch_device, ) def test_local_attn_probs(self): model = GPTNeoModel.from_pretrained("valhalla/gpt-neo-random-tiny").eval() layer = model.h[1].attn.attention.to(torch_device) hidden_states = self._get_hidden_states() hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2) batch_size, seq_length, _ = hidden_states.shape mask_tokens = 2 attention_mask = torch.ones(batch_size, seq_length, device=torch_device, dtype=torch.long) attention_mask[:, -mask_tokens:] = 0 # dont attend last mask_tokens attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = (1.0 - attention_mask) * -10000.0 attn_probs = layer(hidden_states, attention_mask=attention_mask, output_attentions=True)[-1] # the last 2 tokens are masked, and should have 0 attn_probs self.assertTrue(torch.all(attn_probs[:, :, -mask_tokens:, -mask_tokens:] == 0)) # in local attention each token can only attend to the previous window_size tokens (including itself) # here window_size is 4, so a token at index 5 can only attend to indices [2, 3, 4, 5] # and the attn_probs should be 0 for token [0, 1] self.assertTrue(torch.all(attn_probs[:, :, 5, 2:6] != 0)) self.assertTrue(torch.all(attn_probs[:, :, 5, :2] == 0)) @require_torch class GPTNeoModelLanguageGenerationTest(unittest.TestCase): @cached_property def model(self): return GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B").to(torch_device) @cached_property def tokenizer(self): return GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") @slow def test_lm_generate_gpt_neo(self): for checkpointing in [True, False]: model = self.model if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog # The dog-eared copy of the book, which is a collection of essays by the late author, expected_output_ids = [464, 3290, 12, 3380, 4866, 286, 262, 1492, 11, 543, 318, 257, 4947, 286, 27126, 416, 262, 2739, 1772, 11] # fmt: skip output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_gpt_neo_sample(self): model = self.model tokenizer = self.tokenizer torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) output_ids = model.generate(input_ids, do_sample=True) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = "Today is a nice day and if you don’t get the memo here is what you can" self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @slow def test_batch_generation(self): model = self.model tokenizer = self.tokenizer tokenizer.padding_side = "left" # Define PAD Token = EOS Token = 50256 tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I am", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a kitty. She is a very sweet and loving", "Today, I am going to talk about the best way to get a job in the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): model_name = "EleutherAI/gpt-neo-1.3B" model = GPTNeoModel.from_pretrained(model_name) self.assertIsNotNone(model)
transformers/tests/models/gpt_neo/test_modeling_gpt_neo.py/0
{ "file_path": "transformers/tests/models/gpt_neo/test_modeling_gpt_neo.py", "repo_id": "transformers", "token_count": 10940 }
588
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np import pytest import torch from parameterized import parameterized from transformers import AutoTokenizer, GPT2TokenizerFast from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torchaudio, torch_device, ) from transformers.utils import is_torchaudio_available if is_torchaudio_available(): from transformers import GraniteSpeechFeatureExtractor, GraniteSpeechProcessor @require_torch @require_torchaudio class GraniteSpeechProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.checkpoint = "ibm-granite/granite-speech-3.3-8b" processor = GraniteSpeechProcessor.from_pretrained(self.checkpoint) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_audio_processor(self, **kwargs): return GraniteSpeechFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): """Ensure we can save / reload a processor correctly.""" tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) processor.save_pretrained(self.tmpdirname) processor = GraniteSpeechProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, GPT2TokenizerFast) self.assertEqual(processor.audio_processor.to_json_string(), audio_processor.to_json_string()) self.assertIsInstance(processor.audio_processor, GraniteSpeechFeatureExtractor) def test_requires_text(self): """Ensure we require text""" tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) with pytest.raises(TypeError): processor(text=None) def test_bad_text_fails(self): """Ensure we gracefully fail if text is the wrong type.""" tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor(tokenizer=tokenizer, audio_processor=audio_processor) with pytest.raises(TypeError): processor(text=424, audio=None) def test_bad_nested_text_fails(self): """Ensure we gracefully fail if text is the wrong nested type.""" tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) with pytest.raises(TypeError): processor(text=[424], audio=None) def test_bad_audio_fails(self): """Ensure we gracefully fail if audio is the wrong type.""" tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) with pytest.raises(TypeError): processor(text=None, audio="foo") def test_nested_bad_audio_fails(self): """Ensure we gracefully fail if audio is the wrong nested type.""" tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) with pytest.raises(TypeError): processor(text=None, audio=["foo"]) @parameterized.expand( [ ([1, 269920], [171], torch.rand), ([1, 269920], [171], np.random.rand), ] ) def test_audio_token_filling_same_len_feature_tensors(self, vec_dims, num_expected_features, random_func): """Ensure audio token filling is handled correctly when we have one or more audio inputs whose features are all the same length stacked into a tensor / numpy array. NOTE: Currently we enforce that each sample can only have one audio. """ tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) audio = random_func(*vec_dims) - 0.5 audio_tokens = processor.audio_token * vec_dims[0] inputs = processor(text=f"{audio_tokens} Can you compare this audio?", audio=audio, return_tensors="pt") # Check the number of audio tokens audio_token_id = tokenizer.get_vocab()[processor.audio_token] # Make sure the number of audio tokens matches the number of features num_computed_features = processor.audio_processor._get_num_audio_features( [vec_dims[1] for _ in range(vec_dims[0])], ) num_audio_tokens = int(torch.sum(inputs["input_ids"] == audio_token_id)) assert list(inputs["input_features"].shape) == [vec_dims[0], 844, 160] assert sum(num_computed_features) == num_audio_tokens def test_audio_token_filling_varying_len_feature_list(self): """Ensure audio token filling is handled correctly when we have multiple varying len audio sequences passed as a list. """ tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) vec_dims = [[1, 142100], [1, 269920]] num_expected_features = [90, 171] audio = [torch.rand(dims) - 0.5 for dims in vec_dims] inputs = processor( text=[ f"{processor.audio_token} Can you describe this audio?", f"{processor.audio_token} How does it compare with this audio?", ], audio=audio, return_tensors="pt", ) # Check the number of audio tokens audio_token_id = tokenizer.get_vocab()[processor.audio_token] # Make sure the number of audio tokens matches the number of features num_calculated_features = processor.audio_processor._get_num_audio_features( [dims[1] for dims in vec_dims], ) num_audio_tokens = int(torch.sum(inputs["input_ids"] == audio_token_id)) assert num_calculated_features == [90, 171] assert sum(num_expected_features) == num_audio_tokens @require_torch_accelerator def test_device_override(self): """Ensure that we regardless of the processing device, the tensors produced are on the CPU. """ tokenizer = self.get_tokenizer() audio_processor = self.get_audio_processor() processor = GraniteSpeechProcessor( tokenizer=tokenizer, audio_processor=audio_processor, ) vec_dims = [1, 269920] wav = torch.rand(vec_dims) - 0.5 inputs = processor( text=f"{processor.audio_token} Can you transcribe this audio?", audio=wav, return_tensors="pt", device=torch_device, ) assert inputs["input_features"].device.type == "cpu"
transformers/tests/models/granite_speech/test_processing_granite_speech.py/0
{ "file_path": "transformers/tests/models/granite_speech/test_processing_granite_speech.py", "repo_id": "transformers", "token_count": 3355 }
589
# Copyright 2018 The Google AI Language Team Authors, Allegro.pl and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import HerbertTokenizer, HerbertTokenizerFast from transformers.models.herbert.tokenization_herbert import VOCAB_FILES_NAMES from transformers.testing_utils import get_tests_dir, require_sacremoses, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_sacremoses @require_tokenizers class HerbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "allegro/herbert-base-cased" tokenizer_class = HerbertTokenizer rust_tokenizer_class = HerbertTokenizerFast test_rust_tokenizer = True @classmethod def setUpClass(cls): super().setUpClass() # Use a simpler test file without japanese/chinese characters with open(f"{get_tests_dir()}/fixtures/sample_text_no_unicode.txt", encoding="utf-8") as f_data: cls._data = f_data.read().replace("\n\n", "\n").strip() vocab = [ "<s>", "</s>", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", ",</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(cls.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(vocab_file=self.vocab_file, merges_file=self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [16, 17, 23] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "lower,newer" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("allegro/herbert-base-cased") text = tokenizer.encode("konstruowanie sekwencji", add_special_tokens=False) text_2 = tokenizer.encode("konstruowanie wielu sekwencji", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [2] assert encoded_pair == [0] + text + [2] + text_2 + [2] @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer_with_special_tokens_change(self): pass @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer(self): pass
transformers/tests/models/herbert/test_tokenization_herbert.py/0
{ "file_path": "transformers/tests/models/herbert/test_tokenization_herbert.py", "repo_id": "transformers", "token_count": 2228 }
590
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np from transformers import ( AutoProcessor, IdeficsImageProcessor, IdeficsProcessor, LlamaTokenizerFast, PreTrainedTokenizerFast, ) from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image @require_torch @require_vision class IdeficsProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = IdeficsProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = IdeficsImageProcessor(return_tensors="pt") tokenizer = LlamaTokenizerFast.from_pretrained("HuggingFaceM4/tiny-random-idefics") processor = IdeficsProcessor(image_processor, tokenizer) processor.save_pretrained(cls.tmpdirname) cls.input_keys = ["pixel_values", "input_ids", "attention_mask", "image_attention_mask"] def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def prepare_prompts(self): """This function prepares a list of PIL images""" num_images = 2 images = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8) for x in range(num_images)] images = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in images] # print([type(x) for x in images]) # die prompts = [ # text and 1 image [ "User:", images[0], "Describe this image.\nAssistant:", ], # text and images [ "User:", images[0], "Describe this image.\nAssistant: An image of two dogs.\n", "User:", images[1], "Describe this image.\nAssistant:", ], # only text [ "User:", "Describe this image.\nAssistant: An image of two kittens.\n", "User:", "Describe this image.\nAssistant:", ], # only images [ images[0], images[1], ], ] return prompts def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = IdeficsProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = IdeficsProcessor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, IdeficsImageProcessor) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) prompts = self.prepare_prompts() # test that all prompts succeeded input_processor = processor(text=prompts, return_tensors="pt", padding="longest") for key in self.input_keys: assert torch.is_tensor(input_processor[key]) def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor, return_tensors="pt") predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_tokenizer_padding(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer(padding_side="right") processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor, return_tensors="pt") predicted_tokens = [ "<s> Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk>", "<s> Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk>", ] predicted_attention_masks = [ ([1] * 10) + ([0] * 9), ([1] * 10) + ([0] * 10), ] prompts = [[prompt] for prompt in self.prepare_prompts()[2]] max_length = processor(text=prompts, padding="max_length", truncation=True, max_length=20, return_tensors="pt") longest = processor(text=prompts, padding="longest", truncation=True, max_length=30, return_tensors="pt") decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1]) decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1]) self.assertEqual(decoded_max_length, predicted_tokens[1]) self.assertEqual(decoded_longest, predicted_tokens[0]) self.assertListEqual(max_length["attention_mask"][-1].tolist(), predicted_attention_masks[1]) self.assertListEqual(longest["attention_mask"][-1].tolist(), predicted_attention_masks[0]) def test_tokenizer_left_padding(self): """Identical to test_tokenizer_padding, but with padding_side not explicitly set.""" image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_tokens = [ "<unk><unk><unk><unk><unk><unk><unk><unk><unk><s> Describe this image.\nAssistant:", "<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk><s> Describe this image.\nAssistant:", ] predicted_attention_masks = [ ([0] * 9) + ([1] * 10), ([0] * 10) + ([1] * 10), ] prompts = [[prompt] for prompt in self.prepare_prompts()[2]] max_length = processor(text=prompts, padding="max_length", truncation=True, max_length=20) longest = processor(text=prompts, padding="longest", truncation=True, max_length=30) decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1]) decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1]) self.assertEqual(decoded_max_length, predicted_tokens[1]) self.assertEqual(decoded_longest, predicted_tokens[0]) self.assertListEqual(max_length["attention_mask"][-1].tolist(), predicted_attention_masks[1]) self.assertListEqual(longest["attention_mask"][-1].tolist(), predicted_attention_masks[0])
transformers/tests/models/idefics/test_processing_idefics.py/0
{ "file_path": "transformers/tests/models/idefics/test_processing_idefics.py", "repo_id": "transformers", "token_count": 3430 }
591
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Janus model.""" import tempfile import unittest import numpy as np from transformers import AutoProcessor, AutoTokenizer, JanusProcessor from ...test_processing_common import ProcessorTesterMixin class JanusProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = JanusProcessor def setUp(self): self.tmpdirname = tempfile.mkdtemp() special_image_tokens = { "image_token": "<image_placeholder>", "boi_token": "<begin_of_image>", "eoi_token": "<end_of_image>", } processor = self.processor_class.from_pretrained( "deepseek-community/Janus-Pro-1B", extra_special_tokens=special_image_tokens, ) # Set the processor to use the default system prompt to False as it's used based on input modality. # Hence set to False to avoid any issues in the test irrespective of inputs. processor.use_default_system_prompt = False processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def get_processor(self): return AutoProcessor.from_pretrained(self.tmpdirname) def test_chat_template_single(self): """ Tests that the chat template matches the original implementation when applied to a single message. """ processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") # Single image message messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "What is shown in this image?"}, {"type": "image"}, ], }, ] ] correct_prompt = ["<|User|>: What is shown in this image?\n<image_placeholder>\n\n<|Assistant|>:"] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(formatted_prompt, correct_prompt) # Single image message with capitalization messages = [ [ { "role": "User", "content": [ {"type": "text", "text": "What is shown in this image?"}, {"type": "image"}, ], }, ] ] correct_prompt = ["<|User|>: What is shown in this image?\n<image_placeholder>\n\n<|Assistant|>:"] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(formatted_prompt, correct_prompt) # Single image message with uppercase messages = [ [ { "role": "USER", "content": [ {"type": "text", "text": "What is shown in this image?"}, {"type": "image"}, ], }, ] ] correct_prompt = ["<|User|>: What is shown in this image?\n<image_placeholder>\n\n<|Assistant|>:"] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(formatted_prompt, correct_prompt) """ Warning: normally, the other models have a test comparing chat template+tokenization as two separate steps versus as a single step (i.e. processor.apply_chat_template(..., tokenize=True)). However, our processor has some extra steps other than simply applying prompt to tokenizer. These include prepending the default system prompts and, following the implementation from the Janus codebase, expanding the image token. """ # Checking the output dict keys out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"]) # Now test the ability to return dict messages[0][0]["content"][1].update( {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"} ) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertTrue(self.images_input_name in out_dict) # should always have input_ids and attention_mask self.assertEqual(len(out_dict["input_ids"]), 1) self.assertEqual(len(out_dict["attention_mask"]), 1) self.assertEqual(len(out_dict[self.images_input_name]), 1) # Passing generation prompt explicitly messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "What is shown in this image?"}, {"type": "image"}, ], }, { "role": "assistant", "content": [ {"type": "text", "text": ""}, ], }, ] ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=False) self.assertEqual(formatted_prompt, correct_prompt) # Single prompt with multiple images messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "Compare this image"}, {"type": "image"}, {"type": "text", "text": "with this image"}, {"type": "image"}, ], }, ] ] correct_prompt = [ "<|User|>: Compare this image\n<image_placeholder>\nwith this image\n<image_placeholder>\n\n<|Assistant|>:" ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(formatted_prompt, correct_prompt) # Multiple turns and multiple images messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "Compare this image"}, {"type": "image"}, {"type": "text", "text": "with this image"}, {"type": "image"}, ], }, { "role": "assistant", "content": [ {"type": "text", "text": "The first image is an equation, the second is a pie chart."}, ], }, { "role": "user", "content": [ {"type": "image"}, { "type": "text", "text": "What about this third image? To which of the previous to is it more similar?", }, ], }, ] ] correct_prompt = [ "<|User|>: Compare this image\n<image_placeholder>\nwith this image\n<image_placeholder>\n\n<|Assistant|>: The first image is an equation, the second is a pie chart.<|end▁of▁sentence|><|User|>: <image_placeholder>\nWhat about this third image? To which of the previous to is it more similar?\n\n<|Assistant|>:" ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(formatted_prompt, correct_prompt) def test_chat_template_batched(self): """ Tests that the chat template matches the original implementation when applied to a batch of messages. """ processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") # Test 1: Simple single image per message batch batched_messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "What is shown in this image?"}, {"type": "image"}, ], }, ], [ { "role": "user", "content": [ {"type": "text", "text": "What is shown in this image?"}, {"type": "image"}, ], }, ], ] correct_prompts = [ "<|User|>: What is shown in this image?\n<image_placeholder>\n\n<|Assistant|>:", "<|User|>: What is shown in this image?\n<image_placeholder>\n\n<|Assistant|>:", ] formatted_prompts = processor.apply_chat_template(batched_messages, add_generation_prompt=True) self.assertEqual(formatted_prompts, correct_prompts) # Similarly to the single case, no test for chat template+tokenization as two separate steps versus as a single step # Checking the output dict keys out_dict = processor.apply_chat_template( batched_messages, add_generation_prompt=True, tokenize=True, return_dict=True, padding=True, ) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"]) # Verify image inputs are included in the output dict batched_messages[0][0]["content"][1].update( {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"} ) batched_messages[1][0]["content"][1].update( {"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"} ) out_dict = processor.apply_chat_template( batched_messages, add_generation_prompt=True, tokenize=True, return_dict=True, padding=True ) self.assertTrue(self.images_input_name in out_dict) self.assertEqual(len(out_dict["input_ids"]), 2) # Batch size for text self.assertEqual(len(out_dict["attention_mask"]), 2) # Batch size for attention mask self.assertEqual(len(out_dict[self.images_input_name]), 2) # Batch size for images # Test 2: Two images per message batch with different prompts batched_messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "Compare this image"}, {"type": "image"}, {"type": "text", "text": "with this image"}, {"type": "image"}, ], }, ], [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Describe how the previous image compares to the following"}, {"type": "image"}, ], }, ], ] correct_prompts = [ "<|User|>: Compare this image\n<image_placeholder>\nwith this image\n<image_placeholder>\n\n<|Assistant|>:", "<|User|>: <image_placeholder>\nDescribe how the previous image compares to the following\n<image_placeholder>\n\n<|Assistant|>:", ] formatted_prompts = processor.apply_chat_template(batched_messages, add_generation_prompt=True) self.assertEqual(formatted_prompts, correct_prompts) # Test 3: Multi-turn conversations with multiple images batched_messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "Compare this image"}, {"type": "image"}, {"type": "text", "text": "with this image"}, {"type": "image"}, ], }, { "role": "assistant", "content": [ {"type": "text", "text": "The first image is an equation, the second is a pie chart."}, ], }, { "role": "user", "content": [ {"type": "image"}, { "type": "text", "text": "What about this third image? To which of the previous to is it more similar?", }, ], }, ], [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Describe how the previous image compares to the following"}, {"type": "image"}, ], }, { "role": "assistant", "content": [ {"type": "text", "text": "The first image is a formula, the second is a plot."}, ], }, { "role": "user", "content": [ {"type": "text", "text": "Which of them is closer to the following?"}, {"type": "image"}, ], }, ], ] correct_prompts = [ "<|User|>: Compare this image\n<image_placeholder>\nwith this image\n<image_placeholder>\n\n<|Assistant|>: The first image is an equation, the second is a pie chart.<|end▁of▁sentence|><|User|>: <image_placeholder>\nWhat about this third image? To which of the previous to is it more similar?\n\n<|Assistant|>:", "<|User|>: <image_placeholder>\nDescribe how the previous image compares to the following\n<image_placeholder>\n\n<|Assistant|>: The first image is a formula, the second is a plot.<|end▁of▁sentence|><|User|>: Which of them is closer to the following?\n<image_placeholder>\n\n<|Assistant|>:", ] formatted_prompts = processor.apply_chat_template(batched_messages, add_generation_prompt=True) self.assertEqual(formatted_prompts, correct_prompts) def test_chat_template_accepts_processing_kwargs(self): """Tests that the chat template correctly handles additional processing arguments.""" # Get processor and skip if it doesn't have a chat template processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") # Create a simple text message for testing messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "What is shown in this image?"}, ], }, ] ] # Test 1: Padding to max_length # PS: we have to override the parent max_length of 50 to 80 because the output is already 51 tokens formatted_prompt_tokenized = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, padding="max_length", max_length=80, ) self.assertEqual(len(formatted_prompt_tokenized[0]), 80) # Test 2: Truncation # Verify that the output is truncated to exactly 5 tokens formatted_prompt_tokenized = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, truncation=True, max_length=5, ) self.assertEqual(len(formatted_prompt_tokenized[0]), 5) # Test 3: Image processing kwargs # Add an image and test image processing parameters messages[0][0]["content"].append( {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"} ) # Process with image rescaling and verify the pixel values are negative out_dict = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, do_rescale=True, rescale_factor=-1, return_tensors="np", ) self.assertLessEqual(out_dict[self.images_input_name][0][0].mean(), 0) def test_processor_postprocess(self): processor_components = self.prepare_components() processor = self.processor_class(**processor_components) input_str = "lower newer" orig_image_input = self.prepare_image_inputs() orig_image = np.array(orig_image_input).transpose(2, 0, 1) inputs = processor(text=input_str, images=orig_image, do_resize=False, return_tensors="np") normalized_image_input = inputs.pixel_values unnormalized_images = processor.postprocess(normalized_image_input, return_tensors="np")["pixel_values"] # For an image where pixels go from 0 to 255 the diff can be 1 due to some numerical precision errors when scaling and unscaling self.assertTrue(np.abs(orig_image - unnormalized_images).max() >= 1)
transformers/tests/models/janus/test_processing_janus.py/0
{ "file_path": "transformers/tests/models/janus/test_processing_janus.py", "repo_id": "transformers", "token_count": 9117 }
592
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import shutil import tempfile import unittest import torch from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import ( AutoProcessor, LlavaOnevisionImageProcessor, LlavaOnevisionProcessor, Qwen2TokenizerFast, ) if is_torchvision_available(): from transformers import LlavaOnevisionVideoProcessor @require_vision @require_torch class LlavaOnevisionProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = LlavaOnevisionProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = LlavaOnevisionImageProcessor() video_processor = LlavaOnevisionVideoProcessor() tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen2-0.5B-Instruct") tokenizer.add_special_tokens({"additional_special_tokens": ["<image>", "<video>"]}) processor_kwargs = cls.prepare_processor_dict() processor = LlavaOnevisionProcessor( video_processor=video_processor, image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs ) processor.save_pretrained(cls.tmpdirname) cls.image_token = processor.image_token cls.video_token = processor.video_token def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def get_video_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) @staticmethod def prepare_processor_dict(): return { "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}", "num_image_tokens": 6, "vision_feature_select_strategy": "default" } # fmt: skip # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_chat_template_is_saved def test_chat_template_is_saved(self): processor_loaded = self.processor_class.from_pretrained(self.tmpdirname) processor_dict_loaded = json.loads(processor_loaded.to_json_string()) # chat templates aren't serialized to json in processors self.assertFalse("chat_template" in processor_dict_loaded) # they have to be saved as separate file and loaded back from that file # so we check if the same template is loaded processor_dict = self.prepare_processor_dict() self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None)) def test_image_token_filling(self): processor = self.processor_class.from_pretrained(self.tmpdirname) processor.patch_size = 14 processor.vision_feature_select_strategy = "default" processor.image_processor.crop_size = {"height": 336, "width": 336} processor.image_processor.size = {"shortest_edge": 336} processor.image_processor.image_grid_pinpoints = [[672, 336]] processor.num_image_tokens = (processor.image_processor.size["shortest_edge"] // processor.patch_size) ** 2 # Important to check with non square image image = torch.randint(0, 2, (3, 503, 316)) expected_image_tokens = 1525 image_token_index = processor.image_token_id messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] inputs = processor( text=[processor.apply_chat_template(messages)], images=[image], return_tensors="pt", ) image_tokens = (inputs["input_ids"] == image_token_index).sum().item() self.assertEqual(expected_image_tokens, image_tokens)
transformers/tests/models/llava_onevision/test_processing_llava_onevision.py/0
{ "file_path": "transformers/tests/models/llava_onevision/test_processing_llava_onevision.py", "repo_id": "transformers", "token_count": 2368 }
593
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import MaskFormerImageProcessor from transformers.models.maskformer.image_processing_maskformer import binary_mask_to_rle from transformers.models.maskformer.modeling_maskformer import MaskFormerForInstanceSegmentationOutput if is_torchvision_available(): from transformers import MaskFormerImageProcessorFast if is_vision_available(): from PIL import Image class MaskFormerImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, size=None, do_resize=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], num_labels=10, do_reduce_labels=True, ignore_index=255, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.size_divisor = 0 # for the post_process_functions self.batch_size = 2 self.num_queries = 3 self.num_classes = 2 self.height = 3 self.width = 4 self.num_labels = num_labels self.do_reduce_labels = do_reduce_labels self.ignore_index = ignore_index def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "size_divisor": self.size_divisor, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to MaskFormerImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def get_fake_maskformer_outputs(self): return MaskFormerForInstanceSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)), ) def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) # Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs def prepare_semantic_single_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") example = ds[0] return example["image"], example["map"] # Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs def prepare_semantic_batch_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") return list(ds["image"][:2]), list(ds["map"][:2]) @require_torch @require_vision class MaskFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = MaskFormerImageProcessor if (is_vision_available() and is_torch_available()) else None fast_image_processing_class = ( MaskFormerImageProcessorFast if (is_vision_available() and is_torchvision_available()) else None ) def setUp(self): super().setUp() self.image_processor_tester = MaskFormerImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "ignore_index")) self.assertTrue(hasattr(image_processing, "num_labels")) def comm_get_image_processing_inputs( self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np" ): image_processing = self.image_processing_class(**self.image_processor_dict) # prepare image and target num_labels = self.image_processor_tester.num_labels annotations = None instance_id_to_semantic_id = None image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) if with_segmentation_maps: high = num_labels if is_instance_map: labels_expanded = list(range(num_labels)) * 2 instance_id_to_semantic_id = dict(enumerate(labels_expanded)) annotations = [ np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs ] if segmentation_type == "pil": annotations = [Image.fromarray(annotation) for annotation in annotations] inputs = image_processing( image_inputs, annotations, return_tensors="pt", instance_id_to_semantic_id=instance_id_to_semantic_id, pad_and_return_pixel_mask=True, ) return inputs def test_with_size_divisor(self): size_divisors = [8, 16, 32] weird_input_sizes = [(407, 802), (582, 1094)] for image_processing_class in self.image_processor_list: for size_divisor in size_divisors: image_processor_dict = {**self.image_processor_dict, **{"size_divisor": size_divisor}} image_processing = image_processing_class(**image_processor_dict) for weird_input_size in weird_input_sizes: inputs = image_processing([np.ones((3, *weird_input_size))], return_tensors="pt") pixel_values = inputs["pixel_values"] # check if divisible self.assertTrue((pixel_values.shape[-1] % size_divisor) == 0) self.assertTrue((pixel_values.shape[-2] % size_divisor) == 0) def test_call_with_segmentation_maps(self): def common(is_instance_map=False, segmentation_type=None): inputs = self.comm_get_image_processing_inputs( with_segmentation_maps=True, is_instance_map=is_instance_map, segmentation_type=segmentation_type ) mask_labels = inputs["mask_labels"] class_labels = inputs["class_labels"] pixel_values = inputs["pixel_values"] # check the batch_size for mask_label, class_label in zip(mask_labels, class_labels): self.assertEqual(mask_label.shape[0], class_label.shape[0]) # this ensure padding has happened self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:]) common() common(is_instance_map=True) common(is_instance_map=False, segmentation_type="pil") common(is_instance_map=True, segmentation_type="pil") def test_integration_instance_segmentation(self): # load 2 images and corresponding annotations from the hub repo_id = "nielsr/image-segmentation-toy-data" image1 = Image.open( hf_hub_download(repo_id=repo_id, filename="instance_segmentation_image_1.png", repo_type="dataset") ) image2 = Image.open( hf_hub_download(repo_id=repo_id, filename="instance_segmentation_image_2.png", repo_type="dataset") ) annotation1 = Image.open( hf_hub_download(repo_id=repo_id, filename="instance_segmentation_annotation_1.png", repo_type="dataset") ) annotation2 = Image.open( hf_hub_download(repo_id=repo_id, filename="instance_segmentation_annotation_2.png", repo_type="dataset") ) # get instance segmentations and instance-to-segmentation mappings def get_instance_segmentation_and_mapping(annotation): instance_seg = np.array(annotation)[:, :, 1] class_id_map = np.array(annotation)[:, :, 0] class_labels = np.unique(class_id_map) # create mapping between instance IDs and semantic category IDs inst2class = {} for label in class_labels: instance_ids = np.unique(instance_seg[class_id_map == label]) inst2class.update(dict.fromkeys(instance_ids, label)) return instance_seg, inst2class instance_seg1, inst2class1 = get_instance_segmentation_and_mapping(annotation1) instance_seg2, inst2class2 = get_instance_segmentation_and_mapping(annotation2) # create a image processor image_processing = MaskFormerImageProcessor(do_reduce_labels=True, ignore_index=255, size=(512, 512)) # prepare the images and annotations inputs = image_processing( [image1, image2], [instance_seg1, instance_seg2], instance_id_to_semantic_id=[inst2class1, inst2class2], return_tensors="pt", ) # verify the pixel values and pixel mask self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 512)) self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 512)) # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([30, 55])) torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([4, 4, 23, 55])) # verify the mask labels self.assertEqual(len(inputs["mask_labels"]), 2) self.assertEqual(inputs["mask_labels"][0].shape, (2, 512, 512)) self.assertEqual(inputs["mask_labels"][1].shape, (4, 512, 512)) self.assertEqual(inputs["mask_labels"][0].sum().item(), 41527.0) self.assertEqual(inputs["mask_labels"][1].sum().item(), 26259.0) def test_integration_semantic_segmentation(self): # load 2 images and corresponding semantic annotations from the hub repo_id = "nielsr/image-segmentation-toy-data" image1 = Image.open( hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_image_1.png", repo_type="dataset") ) image2 = Image.open( hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_image_2.png", repo_type="dataset") ) annotation1 = Image.open( hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_annotation_1.png", repo_type="dataset") ) annotation2 = Image.open( hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_annotation_2.png", repo_type="dataset") ) # create a image processor image_processing = MaskFormerImageProcessor(do_reduce_labels=True, ignore_index=255, size=(512, 512)) # prepare the images and annotations inputs = image_processing( [image1, image2], [annotation1, annotation2], return_tensors="pt", ) # verify the pixel values and pixel mask self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 512)) self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 512)) # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([2, 4, 60])) torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([0, 3, 7, 8, 15, 28, 30, 143])) # verify the mask labels self.assertEqual(len(inputs["mask_labels"]), 2) self.assertEqual(inputs["mask_labels"][0].shape, (3, 512, 512)) self.assertEqual(inputs["mask_labels"][1].shape, (8, 512, 512)) self.assertEqual(inputs["mask_labels"][0].sum().item(), 170200.0) self.assertEqual(inputs["mask_labels"][1].sum().item(), 257036.0) def test_integration_panoptic_segmentation(self): # load 2 images and corresponding panoptic annotations from the hub dataset = load_dataset("nielsr/ade20k-panoptic-demo") image1 = dataset["train"][0]["image"] image2 = dataset["train"][1]["image"] segments_info1 = dataset["train"][0]["segments_info"] segments_info2 = dataset["train"][1]["segments_info"] annotation1 = dataset["train"][0]["label"] annotation2 = dataset["train"][1]["label"] def rgb_to_id(color): if isinstance(color, np.ndarray) and len(color.shape) == 3: if color.dtype == np.uint8: color = color.astype(np.int32) return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) def create_panoptic_map(annotation, segments_info): annotation = np.array(annotation) # convert RGB to segment IDs per pixel # 0 is the "ignore" label, for which we don't need to make binary masks panoptic_map = rgb_to_id(annotation) # create mapping between segment IDs and semantic classes inst2class = {segment["id"]: segment["category_id"] for segment in segments_info} return panoptic_map, inst2class panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1) panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2) # create a image processor image_processing = MaskFormerImageProcessor(ignore_index=0, do_resize=False) # prepare the images and annotations pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)] inputs = image_processing.encode_inputs( pixel_values_list, [panoptic_map1, panoptic_map2], instance_id_to_semantic_id=[inst2class1, inst2class2], return_tensors="pt", ) # verify the pixel values and pixel mask self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711)) self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711)) # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip torch.testing.assert_close(inputs["class_labels"][0], torch.tensor(expected_class_labels)) expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels) # verify the mask labels self.assertEqual(len(inputs["mask_labels"]), 2) self.assertEqual(inputs["mask_labels"][0].shape, (79, 512, 711)) self.assertEqual(inputs["mask_labels"][1].shape, (61, 512, 711)) self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0) self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0) def test_binary_mask_to_rle(self): fake_binary_mask = np.zeros((20, 50)) fake_binary_mask[0, 20:] = 1 fake_binary_mask[1, :15] = 1 fake_binary_mask[5, :10] = 1 rle = binary_mask_to_rle(fake_binary_mask) self.assertEqual(len(rle), 4) self.assertEqual(rle[0], 21) self.assertEqual(rle[1], 45) def test_post_process_segmentation(self): for image_processing_class in self.image_processor_list: feature_extractor = image_processing_class(num_labels=self.image_processor_tester.num_classes) outputs = self.image_processor_tester.get_fake_maskformer_outputs() segmentation = feature_extractor.post_process_segmentation(outputs) self.assertEqual( segmentation.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_classes, self.image_processor_tester.height, self.image_processor_tester.width, ), ) target_size = (1, 4) segmentation = feature_extractor.post_process_segmentation(outputs, target_size=target_size) self.assertEqual( segmentation.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_classes, *target_size), ) def test_post_process_semantic_segmentation(self): for image_processing_class in self.image_processor_list: feature_extractor = image_processing_class(num_labels=self.image_processor_tester.num_classes) outputs = self.image_processor_tester.get_fake_maskformer_outputs() segmentation = feature_extractor.post_process_semantic_segmentation(outputs) self.assertEqual(len(segmentation), self.image_processor_tester.batch_size) self.assertEqual( segmentation[0].shape, ( self.image_processor_tester.height, self.image_processor_tester.width, ), ) target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)] segmentation = feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes) self.assertEqual(segmentation[0].shape, target_sizes[0]) def test_post_process_instance_segmentation(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(num_labels=self.image_processor_tester.num_classes) outputs = self.image_processor_tester.get_fake_maskformer_outputs() segmentation = image_processor.post_process_instance_segmentation(outputs, threshold=0) self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual( el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width) ) segmentation = image_processor.post_process_instance_segmentation( outputs, threshold=0, return_binary_maps=True ) self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual(len(el["segmentation"].shape), 3) self.assertEqual( el["segmentation"].shape[1:], (self.image_processor_tester.height, self.image_processor_tester.width), ) def test_post_process_panoptic_segmentation(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(num_labels=self.image_processor_tester.num_classes) outputs = self.image_processor_tester.get_fake_maskformer_outputs() segmentation = image_processing.post_process_panoptic_segmentation(outputs, threshold=0) self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual( el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width) ) def test_post_process_label_fusing(self): for image_processing_class in self.image_processor_list: image_processor = self.image_processing_class(num_labels=self.image_processor_tester.num_classes) outputs = self.image_processor_tester.get_fake_maskformer_outputs() segmentation = image_processor.post_process_panoptic_segmentation( outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0 ) unfused_segments = [el["segments_info"] for el in segmentation] fused_segmentation = image_processor.post_process_panoptic_segmentation( outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0, label_ids_to_fuse={1} ) fused_segments = [el["segments_info"] for el in fused_segmentation] for el_unfused, el_fused in zip(unfused_segments, fused_segments): if len(el_unfused) == 0: self.assertEqual(len(el_unfused), len(el_fused)) continue # Get number of segments to be fused fuse_targets = [1 for el in el_unfused if el["label_id"] in {1}] num_to_fuse = 0 if len(fuse_targets) == 0 else sum(fuse_targets) - 1 # Expected number of segments after fusing expected_num_segments = max([el["id"] for el in el_unfused]) - num_to_fuse num_segments_fused = max([el["id"] for el in el_fused]) self.assertEqual(num_segments_fused, expected_num_segments) def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image, dummy_map = prepare_semantic_single_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values) for mask_label_slow, mask_label_fast in zip(image_encoding_slow.mask_labels, image_encoding_fast.mask_labels): self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast) for class_label_slow, class_label_fast in zip( image_encoding_slow.class_labels, image_encoding_fast.class_labels ): self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float()) def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images, dummy_maps = prepare_semantic_batch_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) for mask_label_slow, mask_label_fast in zip(encoding_slow.mask_labels, encoding_fast.mask_labels): self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast) for class_label_slow, class_label_fast in zip(encoding_slow.class_labels, encoding_fast.class_labels): self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
transformers/tests/models/maskformer/test_image_processing_maskformer.py/0
{ "file_path": "transformers/tests/models/maskformer/test_image_processing_maskformer.py", "repo_id": "transformers", "token_count": 12610 }
594
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the MgpstrProcessor.""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class MgpstrProcessorTest(unittest.TestCase): image_processing_class = ViTImageProcessor if is_vision_available() else None @property def image_processor_dict(self): return self.prepare_image_processor_dict() def setUp(self): self.image_size = (3, 32, 128) self.tmpdirname = tempfile.mkdtemp() vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") image_processor_map = { "do_normalize": False, "do_resize": True, "image_processor_type": "ViTImageProcessor", "resample": 3, "size": {"height": 32, "width": 128}, } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) # We copy here rather than use the ProcessorTesterMixin as this processor has a `char_tokenizer` instead of a # tokenizer attribute, which means all the tests would need to be overridden. @require_vision def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def get_tokenizer(self, **kwargs): return MgpstrTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return ViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor) processor.save_pretrained(self.tmpdirname) processor = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=False) self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer, MgpstrTokenizer) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor.image_processor, ViTImageProcessor) def test_save_load_pretrained_additional_features(self): tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = MgpstrProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer, MgpstrTokenizer) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, ViTImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc: self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "test" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "test" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values", "labels"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.char_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) decode_strs = [seq.replace(" ", "") for seq in decoded_tok] self.assertListEqual(decode_strs, decoded_processor) def test_processor_batch_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor) char_input = torch.randn(1, 27, 38) bpe_input = torch.randn(1, 27, 50257) wp_input = torch.randn(1, 27, 30522) results = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()), ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"])
transformers/tests/models/mgp_str/test_processing_mgp_str.py/0
{ "file_path": "transformers/tests/models/mgp_str/test_processing_mgp_str.py", "repo_id": "transformers", "token_count": 3122 }
595
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin if is_vision_available(): from PIL import Image from transformers import MllamaImageProcessor if is_torch_available(): import torch class MllamaImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, num_images=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_convert_rgb=True, do_pad=True, max_image_tiles=4, ): size = size if size is not None else {"height": 224, "width": 224} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.max_image_tiles = max_image_tiles self.image_size = image_size self.num_images = num_images self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_convert_rgb = do_convert_rgb self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_convert_rgb": self.do_convert_rgb, "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, "max_image_tiles": self.max_image_tiles, } def prepare_image_inputs( self, batch_size=None, min_resolution=None, max_resolution=None, num_channels=None, num_images=None, size_divisor=None, equal_resolution=False, numpify=False, torchify=False, ): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. One can specify whether the images are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" batch_size = batch_size if batch_size is not None else self.batch_size min_resolution = min_resolution if min_resolution is not None else self.min_resolution max_resolution = max_resolution if max_resolution is not None else self.max_resolution num_channels = num_channels if num_channels is not None else self.num_channels num_images = num_images if num_images is not None else self.num_images images_list = [] for i in range(batch_size): images = [] for j in range(num_images): if equal_resolution: width = height = max_resolution else: # To avoid getting image width/height 0 if size_divisor is not None: # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) images_list.append(images) if not numpify and not torchify: # PIL expects the channel dimension as last dimension images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list] if torchify: images_list = [[torch.from_numpy(image) for image in images] for images in images_list] return images_list def expected_output_image_shape(self, images): expected_output_image_shape = ( max(len(images) for images in images), self.max_image_tiles, self.num_channels, self.size["height"], self.size["width"], ) return expected_output_image_shape @require_torch @require_vision class MllamaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = MllamaImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = MllamaImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "max_image_tiles")) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) expected_output_image_shape = ( max(len(images) for images in image_inputs), self.image_processor_tester.max_image_tiles, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for images in image_inputs: for image in images: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_channels_last(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # a white 1x1 pixel RGB image image_inputs = [[np.full(shape=(1, 1, 3), fill_value=1.0, dtype=float)]] encoded_images = image_processing( image_inputs, return_tensors="pt", input_data_format="channels_last" ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) def test_ambiguous_channel_pil_image(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = [[Image.new("RGB", (1, 1))], [Image.new("RGB", (100, 1))]] encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual(tuple(encoded_images.shape), (2, *expected_output_image_shape)) def test_resize_impractical_aspect_ratio(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # Ensure that no error is raised even if the aspect ratio is impractical image_inputs = [[Image.new("RGB", (9999999, 1))]] encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for images in image_inputs: for image in images: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), ) def test_call_numpy_4_channels(self): self.skipTest("4 channels input is not supported yet") def test_image_correctly_tiled(self): def get_empty_tiles(pixel_values): # image has shape batch_size, max_num_images, max_image_tiles, num_channels, height, width # we want to get a binary mask of shape batch_size, max_num_images, max_image_tiles # of empty tiles, i.e. tiles that are completely zero return np.all(pixel_values == 0, axis=(3, 4, 5)) image_processor_dict = {**self.image_processor_dict, "size": {"height": 50, "width": 50}, "max_image_tiles": 4} image_processor = self.image_processing_class(**image_processor_dict) # image fits 2x2 tiles grid (width x height) image = Image.new("RGB", (80, 95)) inputs = image_processor(image, return_tensors="np") pixel_values = inputs.pixel_values empty_tiles = get_empty_tiles(pixel_values)[0, 0].tolist() self.assertEqual(empty_tiles, [False, False, False, False]) aspect_ratio_ids = inputs.aspect_ratio_ids[0, 0] self.assertEqual(aspect_ratio_ids, 6) aspect_ratio_mask = inputs.aspect_ratio_mask[0, 0].tolist() self.assertEqual(aspect_ratio_mask, [1, 1, 1, 1]) # image fits 3x1 grid (width x height) image = Image.new("RGB", (101, 50)) inputs = image_processor(image, return_tensors="np") pixel_values = inputs.pixel_values empty_tiles = get_empty_tiles(pixel_values)[0, 0].tolist() self.assertEqual(empty_tiles, [False, False, False, True]) aspect_ratio_ids = inputs.aspect_ratio_ids[0, 0] self.assertEqual(aspect_ratio_ids, 3) num_tiles = inputs.aspect_ratio_mask[0, 0].sum() self.assertEqual(num_tiles, 3) aspect_ratio_mask = inputs.aspect_ratio_mask[0, 0].tolist() self.assertEqual(aspect_ratio_mask, [1, 1, 1, 0]) # image fits 1x1 grid (width x height) image = Image.new("RGB", (20, 39)) inputs = image_processor(image, return_tensors="np") pixel_values = inputs.pixel_values empty_tiles = get_empty_tiles(pixel_values)[0, 0].tolist() self.assertEqual(empty_tiles, [False, True, True, True]) aspect_ratio_ids = inputs.aspect_ratio_ids[0, 0] self.assertEqual(aspect_ratio_ids, 1) aspect_ratio_mask = inputs.aspect_ratio_mask[0, 0].tolist() self.assertEqual(aspect_ratio_mask, [1, 0, 0, 0]) # image fits 2x1 grid (width x height) image = Image.new("RGB", (51, 20)) inputs = image_processor(image, return_tensors="np") pixel_values = inputs.pixel_values empty_tiles = get_empty_tiles(pixel_values)[0, 0].tolist() self.assertEqual(empty_tiles, [False, False, True, True]) aspect_ratio_ids = inputs.aspect_ratio_ids[0, 0] self.assertEqual(aspect_ratio_ids, 2) aspect_ratio_mask = inputs.aspect_ratio_mask[0, 0].tolist() self.assertEqual(aspect_ratio_mask, [1, 1, 0, 0]) # image is greater than 2x2 tiles grid (width x height) image = Image.new("RGB", (150, 150)) inputs = image_processor(image, return_tensors="np") pixel_values = inputs.pixel_values empty_tiles = get_empty_tiles(pixel_values)[0, 0].tolist() self.assertEqual(empty_tiles, [False, False, False, False]) aspect_ratio_ids = inputs.aspect_ratio_ids[0, 0] self.assertEqual(aspect_ratio_ids, 6) # (2 - 1) * 4 + 2 = 6 aspect_ratio_mask = inputs.aspect_ratio_mask[0, 0].tolist() self.assertEqual(aspect_ratio_mask, [1, 1, 1, 1]) # batch of images image1 = Image.new("RGB", (80, 95)) image2 = Image.new("RGB", (101, 50)) image3 = Image.new("RGB", (23, 49)) inputs = image_processor([[image1], [image2, image3]], return_tensors="np") pixel_values = inputs.pixel_values empty_tiles = get_empty_tiles(pixel_values).tolist() expected_empty_tiles = [ # sample 1 with 1 image 2x2 grid [ [False, False, False, False], [True, True, True, True], # padding ], # sample 2 [ [False, False, False, True], # 3x1 [False, True, True, True], # 1x1 ], ] self.assertEqual(empty_tiles, expected_empty_tiles) aspect_ratio_ids = inputs.aspect_ratio_ids.tolist() expected_aspect_ratio_ids = [[6, 0], [3, 1]] self.assertEqual(aspect_ratio_ids, expected_aspect_ratio_ids) aspect_ratio_mask = inputs.aspect_ratio_mask.tolist() expected_aspect_ratio_mask = [ [ [1, 1, 1, 1], [1, 0, 0, 0], ], [ [1, 1, 1, 0], [1, 0, 0, 0], ], ] self.assertEqual(aspect_ratio_mask, expected_aspect_ratio_mask)
transformers/tests/models/mllama/test_image_processing_mllama.py/0
{ "file_path": "transformers/tests/models/mllama/test_image_processing_mllama.py", "repo_id": "transformers", "token_count": 7357 }
596
# Copyright 2020 The HuggingFace Inc. team, Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import MPNetTokenizerFast from transformers.models.mpnet.tokenization_mpnet import VOCAB_FILES_NAMES, MPNetTokenizer from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class MPNetTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "microsoft/mpnet-base" tokenizer_class = MPNetTokenizer rust_tokenizer_class = MPNetTokenizerFast test_rust_tokenizer = True space_between_special_tokens = True @classmethod def setUpClass(cls): super().setUpClass() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00e9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00e9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/mpnet-base") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [2] assert encoded_pair == [0] + text + [2] + [2] + text_2 + [2]
transformers/tests/models/mpnet/test_tokenization_mpnet.py/0
{ "file_path": "transformers/tests/models/mpnet/test_tokenization_mpnet.py", "repo_id": "transformers", "token_count": 1223 }
597
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from functools import lru_cache from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors, use_cache_if_possible @require_tokenizers class TestTokenizationMvp(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "RUCAIBox/mvp" tokenizer_class = MvpTokenizer rust_tokenizer_class = MvpTokenizerFast test_rust_tokenizer = True from_pretrained_filter = filter_roberta_detectors # from_pretrained_kwargs = {'add_prefix_space': True} @classmethod def setUpClass(cls): super().setUpClass() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] cls.special_tokens_map = {"unk_token": "<unk>"} cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(cls.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): kwargs.update(cls.special_tokens_map) pretrained_name = pretrained_name or cls.tmpdirname return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_rust_tokenizer(cls, pretrained_name=None, **kwargs): kwargs.update(cls.special_tokens_map) pretrained_name = pretrained_name or cls.tmpdirname return cls.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" @cached_property def default_tokenizer(self): return MvpTokenizer.from_pretrained("RUCAIBox/mvp") @cached_property def default_tokenizer_fast(self): return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp") @require_torch def test_prepare_batch(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, max_length=len(expected_src_tokens), padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(expected_src_tokens, result) # Test that special tokens are reset @require_torch def test_prepare_batch_empty_target_text(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, padding=True, return_tensors="pt") # check if input_ids are returned and no labels self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("labels", batch) self.assertNotIn("decoder_attention_mask", batch) @require_torch def test_tokenizer_as_target_length(self): tgt_text = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: targets = tokenizer(text_target=tgt_text, max_length=32, padding="max_length", return_tensors="pt") self.assertEqual(32, targets["input_ids"].shape[1]) @require_torch def test_prepare_batch_not_longer_than_maxlen(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer( ["I am a small frog" * 1024, "I am a small frog"], padding=True, truncation=True, return_tensors="pt" ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 1024)) @require_torch def test_special_tokens(self): src_text = ["A long paragraph for summarization."] tgt_text = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") input_ids = inputs["input_ids"] labels = inputs["labels"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @unittest.skip def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) # Rust correctly handles the space before the mask while python doesn't self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
transformers/tests/models/mvp/test_tokenization_mvp.py/0
{ "file_path": "transformers/tests/models/mvp/test_tokenization_mvp.py", "repo_id": "transformers", "token_count": 3876 }
598
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PaliGemma model.""" import copy import unittest import pytest from parameterized import parameterized from transformers import ( PaliGemmaConfig, PaliGemmaForConditionalGeneration, is_torch_available, ) from transformers.testing_utils import ( require_torch, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch class PaliGemma2VisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=25, vision_feature_select_strategy="default", vision_feature_layer=-1, projection_dim=32, text_config={ "model_type": "gemma2", "seq_length": 128, "is_training": True, # "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 1, "head_dim": 8, "intermediate_size": 37, "hidden_activation": "gelu_pytorch_tanh", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, is_training=True, vision_config={ "use_labels": True, "image_size": 20, "patch_size": 5, "num_image_tokens": 4, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_key_value_heads": 1, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, use_cache=False, ): self.parent = parent self.ignore_index = ignore_index # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.projection_dim = projection_dim self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = vision_config["num_channels"] self.image_size = vision_config["image_size"] self.encoder_seq_length = seq_length self.use_cache = use_cache def get_config(self): return PaliGemmaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, projection_dim=self.projection_dim, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(self.pad_token_id).to(torch_device) # set the 16 first tokens to be image, and ensure that no other tokens are image tokens # do not change this unless you modified image size or patch size input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[:, :16] = config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, "token_type_ids": torch.zeros_like(input_ids), } return config, inputs_dict @require_torch class PaliGemma2ForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `PaliGemmaForConditionalGeneration`. """ all_model_classes = (PaliGemmaForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"image-text-to-text": PaliGemmaForConditionalGeneration} fx_compatible = False test_pruning = False test_torchscript = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = PaliGemma2VisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=PaliGemmaConfig, has_text_modality=False) # Copied from tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_mismatching_num_image_tokens def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images doesn't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further _ = model(**curr_input_dict) # successful forward with no modifications # remove one image but leave the image token in text curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...] with self.assertRaises(ValueError): _ = model(**curr_input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_bin(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_model_parallelism(self): pass @unittest.skip( reason="PaliGemma's SigLip encoder uses the same initialization scheme as the Flax original implementation" ) def test_initialization(self): pass # TODO extend valid outputs to include this test @Molbap @unittest.skip(reason="PaliGemma has currently one output format.") def test_model_outputs_equivalence(self): pass # TODO fix the loss = nan in the testing configuration chosen @Molbap @unittest.skip(reason="Edge case giving loss nan values in testing configuration.") def test_determinism(self): pass @unittest.skip(reason="PaliGemma does not use feedforward chunking.") def test_feed_forward_chunking(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Low memory will be removed soon so no need to fix it") def test_beam_search_low_memory(self): pass @parameterized.expand([("random",), ("same",)]) @pytest.mark.generate @unittest.skip("Gemma2 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_matches_greedy_search(self, assistant_type): pass @unittest.skip("Gemma2 has HybridCache which is not compatible with assisted decoding") def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type): pass @pytest.mark.generate @unittest.skip("Gemma2 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_sample(self): pass @unittest.skip("Gemma2 has HybridCache which is not compatible with dola decoding") def test_dola_decoding_sample(self): pass @unittest.skip("Gemma2 has HybridCache and doesn't support continue from past kv") def test_generate_continue_from_past_key_values(self): pass @unittest.skip("Gemma2 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate(self): pass @unittest.skip("Gemma2 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Gemma2 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_low_memory(self): pass @unittest.skip("Gemma2 has HybridCache and doesn't support StaticCache") def test_generate_with_static_cache(self): pass @unittest.skip("Paligemma position ids are 1 indexed") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Paligemma position ids are 1 indexed") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass
transformers/tests/models/paligemma2/test_modeling_paligemma2.py/0
{ "file_path": "transformers/tests/models/paligemma2/test_modeling_paligemma2.py", "repo_id": "transformers", "token_count": 5231 }
599
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PerceptionLM model.""" import unittest from huggingface_hub import hf_hub_download from transformers import ( AutoProcessor, PerceptionLMConfig, PerceptionLMForConditionalGeneration, PerceptionLMModel, is_torch_available, ) from transformers.testing_utils import ( cleanup, require_bitsandbytes, require_read_token, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch class PerceptionLMVisionText2TextModelTester: def __init__( self, parent, image_token_id=0, video_token_id=2, seq_length=7, tie_word_embeddings=True, projector_pooling_ratio=1, text_config={ "model_type": "llama", "seq_length": 7, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, is_training=True, vision_config={ "architecture": "vit_pe_core_large_patch14_336", "model_args": { "embed_dim": 64, "img_size": (14, 14), "depth": 2, "global_pool": "", "use_post_transformer_norm": False, "init_values": 0.1, "ref_feat_shape": (1, 1), }, }, ): self.parent = parent self.image_token_id = image_token_id self.video_token_id = video_token_id self.text_config = text_config self.vision_config = vision_config self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.tie_word_embeddings = tie_word_embeddings self.batch_size = 3 self.num_tiles = 1 self.num_frames = 1 self.num_channels = 3 self.image_size = self.vision_config["model_args"]["img_size"][0] self.num_image_tokens = (self.vision_config["model_args"]["img_size"][0] // 14) ** 2 self.num_video_tokens = (self.vision_config["model_args"]["img_size"][0] // 14) ** 2 self.seq_length = seq_length + self.num_image_tokens self.encoder_seq_length = self.seq_length def get_config(self): return PerceptionLMConfig( text_config=self.text_config, vision_config=self.vision_config, vision_use_cls_token=True, image_token_id=self.image_token_id, video_token_id=self.video_token_id, tie_word_embeddings=self.tie_word_embeddings, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.num_tiles, self.num_channels, self.vision_config["model_args"]["img_size"][0], self.vision_config["model_args"]["img_size"][1], ] ) pixel_values_videos = floats_tensor( [ self.batch_size, self.num_frames, self.num_channels, self.vision_config["model_args"]["img_size"][0], self.vision_config["model_args"]["img_size"][1], ] ) config = self.get_config() return config, pixel_values, pixel_values_videos def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_values_videos = self.prepare_config_and_inputs() input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) input_ids[input_ids == config.image_token_id] = self.pad_token_id input_ids[input_ids == config.video_token_id] = self.pad_token_id input_ids[:, : self.num_image_tokens] = config.image_token_id input_ids[:, self.num_image_tokens : self.num_video_tokens + self.num_image_tokens] = config.video_token_id inputs_dict = { "pixel_values": pixel_values, "pixel_values_videos": pixel_values_videos, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class PerceptionLMForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `PerceptionLMForConditionalGeneration`. """ all_model_classes = ( ( PerceptionLMModel, PerceptionLMForConditionalGeneration, ) if is_torch_available() else () ) test_pruning = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = PerceptionLMVisionText2TextModelTester(self) common_properties = [ "image_token_id", "video_token_id", ] self.config_tester = ConfigTester( self, config_class=PerceptionLMConfig, has_text_modality=False, common_properties=common_properties, ) def test_config(self): self.config_tester.run_common_tests() # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] del inputs["pixel_values_videos"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] del inputs["pixel_values_videos"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] torch.testing.assert_close(out_embeds, out_ids) def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images doesn't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class == PerceptionLMModel: continue model = model_class(config).to(torch_device) _ = model(**input_dict) # successful forward with no modifications # remove one image but leave the image token in text input_dict["pixel_values"] = input_dict["pixel_values"][-1:, ...] with self.assertRaises(ValueError): _ = model(**input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = input_dict["input_ids"][:1] pixel_values = input_dict["pixel_values"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values) def test_training(self): self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else () super().test_training() def test_training_gradient_checkpointing(self): self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else () super().test_training_gradient_checkpointing() def test_training_gradient_checkpointing_use_reentrant(self): self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else () super().test_training_gradient_checkpointing_use_reentrant() def test_training_gradient_checkpointing_use_reentrant_false(self): self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else () super().test_training_gradient_checkpointing_use_reentrant_false() @unittest.skip(reason="Timm Eva (PE) weights cannot be fully constructed in _init_weights") def test_can_init_all_missing_weights(self): pass @unittest.skip(reason="Timm Eva (PE) weights cannot be fully constructed in _init_weights") def test_initialization(self): pass @unittest.skip( reason="PE/TIMM's attention implementation is self configured and won't raise ValueError on global attention implementation." ) def test_flash_attn_2_can_dispatch_composite_models(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("ViT PE / TimmWrapperModel cannot be tested with meta device") def test_can_be_initialized_on_meta(self): pass @unittest.skip("ViT PE / TimmWrapperModel cannot be tested with meta device") def test_can_load_with_meta_device_context_manager(self): pass @unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM") def test_generate_from_inputs_embeds_0_greedy(self): pass @unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM") def test_generate_from_inputs_embeds_1_beam_search(self): pass @unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM") def test_generate_from_inputs_embeds_with_static_cache(self): pass ## Skip flash attention releated tests below ## correct configuration: ## from_pretrained(model_id, attn_implementation={"text_config": "flash_attention_2", "vision_config": "eager"} @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_eager_matches_fa2_generate(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_fp32_ln(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_from_config(self): pass @unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_eager_matches_sdpa_generate_with_dynamic_cache(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_eager_matches_sdpa_generate(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_inference_equivalence(self): pass @unittest.skip( "PerceptionLMForConditionalGeneration does not have language_model, vision_tower, multi_modal_projector." ) def test_sdpa_can_dispatch_composite_models(self): pass @unittest.skip("Cannot set `output_attentions` for timm models.") def test_attention_outputs(self): pass @unittest.skip("Cannot set `output_attentions` for timm models.") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("Cannot set `output_attentions` for timm models.") def test_generate_compilation_all_outputs(self): pass TEST_MODEL_PATH = "facebook/Perception-LM-1B" @require_torch @require_bitsandbytes @slow @require_read_token class PerceptionLMForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained(TEST_MODEL_PATH) self.image_file = hf_hub_download( repo_id="shumingh/perception_lm_test_images", filename="14496_0.PNG", repo_type="dataset", ) self.video_file = hf_hub_download( repo_id="shumingh/perception_lm_test_videos", filename="GUWR5TyiY-M_000012_000022.mp4", repo_type="dataset", ) self.conversation1 = [ { "role": "user", "content": [ {"type": "image", "url": self.image_file}, {"type": "text", "text": "Describe the bar plot in the image."}, ], } ] self.conversation2 = [ { "role": "user", "content": [ { "type": "video", "url": self.video_file, }, {"type": "text", "text": "Can you describe the video in detail?"}, ], } ] def tearDown(self): cleanup(torch_device, gc_collect=True) def test_small_model_integration_test(self): model = PerceptionLMForConditionalGeneration.from_pretrained( TEST_MODEL_PATH, load_in_4bit=True, cache_dir="./" ) inputs = self.processor.apply_chat_template( [self.conversation1], num_frames=32, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", video_load_backend="decord", padding=True, padding_side="left", ).to(torch_device) generate_ids = model.generate(**inputs, max_new_tokens=18) input_length = inputs["input_ids"].shape[1] generate_ids_without_inputs = generate_ids[:, input_length:] EXPECTED_DECODED_TEXT = "The bar plot displays the values of four categories: step, horror, mood, and lumber" # fmt: skip self.assertEqual( self.processor.decode(generate_ids_without_inputs[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) def test_small_model_integration_test_batched(self): model = PerceptionLMForConditionalGeneration.from_pretrained(TEST_MODEL_PATH, load_in_4bit=True) processor = AutoProcessor.from_pretrained(TEST_MODEL_PATH) inputs = processor.apply_chat_template( [self.conversation1, self.conversation2], num_frames=32, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", video_load_backend="decord", padding=True, padding_side="left", ).to(torch_device) generate_ids = model.generate(**inputs, max_new_tokens=18) input_length = inputs["input_ids"].shape[1] generate_ids_without_inputs = generate_ids[:, input_length:] EXPECTED_DECODED_TEXT = ['The bar plot displays the values of four categories: step, horror, mood, and lumber', 'The video shows a group of people in green shirts and white shorts performing a jump rope routine'] # fmt: skip self.assertEqual( processor.batch_decode(generate_ids_without_inputs, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) def test_generation_no_images(self): # model_id = "facebook/Perception-LM-1B" model = PerceptionLMForConditionalGeneration.from_pretrained(TEST_MODEL_PATH, load_in_4bit=True) processor = AutoProcessor.from_pretrained(TEST_MODEL_PATH) # Prepare inputs with no images inputs = processor(text="Hello, I am", return_tensors="pt").to(torch_device) # Make sure that `generate` works _ = model.generate(**inputs, max_new_tokens=20)
transformers/tests/models/perception_lm/test_modeling_perception_lm.py/0
{ "file_path": "transformers/tests/models/perception_lm/test_modeling_perception_lm.py", "repo_id": "transformers", "token_count": 8339 }
600
# Copyright 2018 Salesforce and HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from functools import lru_cache from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible class PhobertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "vinai/phobert-base" tokenizer_class = PhobertTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = ["T@@", "i", "I", "R@@", "r", "e@@"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l à</w>"] cls.special_tokens_map = {"unk_token": "<unk>"} cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") with open(cls.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): kwargs.update(cls.special_tokens_map) pretrained_name = pretrained_name or cls.tmpdirname return PhobertTokenizer.from_pretrained(pretrained_name, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "Tôi là VinAI Research" output_text = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def test_full_tokenizer(self): tokenizer = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "Tôi là VinAI Research" bpe_tokens = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() tokens = tokenizer.tokenize(text) print(tokens) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
transformers/tests/models/phobert/test_tokenization_phobert.py/0
{ "file_path": "transformers/tests/models/phobert/test_tokenization_phobert.py", "repo_id": "transformers", "token_count": 1286 }
601
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import ( check_json_file_has_correct_format, require_essentia, require_librosa, require_scipy, require_torch, ) from transformers.utils.import_utils import ( is_essentia_available, is_librosa_available, is_scipy_available, is_torch_available, ) from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin requirements_available = ( is_torch_available() and is_essentia_available() and is_scipy_available() and is_librosa_available() ) if requirements_available: import torch from transformers import Pop2PianoFeatureExtractor class Pop2PianoFeatureExtractionTester: def __init__( self, parent, n_bars=2, sample_rate=22050, use_mel=True, padding_value=0, vocab_size_special=4, vocab_size_note=128, vocab_size_velocity=2, vocab_size_time=100, ): self.parent = parent self.n_bars = n_bars self.sample_rate = sample_rate self.use_mel = use_mel self.padding_value = padding_value self.vocab_size_special = vocab_size_special self.vocab_size_note = vocab_size_note self.vocab_size_velocity = vocab_size_velocity self.vocab_size_time = vocab_size_time def prepare_feat_extract_dict(self): return { "n_bars": self.n_bars, "sample_rate": self.sample_rate, "use_mel": self.use_mel, "padding_value": self.padding_value, "vocab_size_special": self.vocab_size_special, "vocab_size_note": self.vocab_size_note, "vocab_size_velocity": self.vocab_size_velocity, "vocab_size_time": self.vocab_size_time, } @require_torch @require_essentia @require_librosa @require_scipy class Pop2PianoFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Pop2PianoFeatureExtractor if requirements_available else None def setUp(self): self.feat_extract_tester = Pop2PianoFeatureExtractionTester(self) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.use_mel mel_2 = feat_extract_second.use_mel self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.use_mel mel_2 = feat_extract_second.use_mel self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input = np.zeros([1000000], dtype=np.float32) input_features = feature_extractor(speech_input, sampling_rate=16_000, return_tensors="np") self.assertTrue(input_features.input_features.ndim == 3) self.assertEqual(input_features.input_features.shape[-1], 512) self.assertTrue(input_features.beatsteps.ndim == 2) self.assertTrue(input_features.extrapolated_beatstep.ndim == 2) def test_integration(self): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select([0])["audio"] input_speech = [x["array"] for x in speech_samples][0] sampling_rate = [x["sampling_rate"] for x in speech_samples][0] feaure_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano") input_features = feaure_extractor( input_speech, sampling_rate=sampling_rate, return_tensors="pt" ).input_features EXPECTED_INPUT_FEATURES = torch.tensor( [[-7.1493, -6.8701, -4.3214], [-5.9473, -5.7548, -3.8438], [-6.1324, -5.9018, -4.3778]] ) torch.testing.assert_close(input_features[0, :3, :3], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4) def test_attention_mask(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2], sampling_rate=[44_100, 16_000], return_tensors="np", return_attention_mask=True, ) self.assertTrue(hasattr(input_features, "attention_mask")) # check shapes self.assertTrue(input_features["attention_mask"].ndim == 2) self.assertEqual(input_features["attention_mask_beatsteps"].shape[0], 2) self.assertEqual(input_features["attention_mask_extrapolated_beatstep"].shape[0], 2) # check if they are any values except 0 and 1 self.assertTrue(np.max(input_features["attention_mask"]) == 1) self.assertTrue(np.max(input_features["attention_mask_beatsteps"]) == 1) self.assertTrue(np.max(input_features["attention_mask_extrapolated_beatstep"]) == 1) self.assertTrue(np.min(input_features["attention_mask"]) == 0) self.assertTrue(np.min(input_features["attention_mask_beatsteps"]) == 0) self.assertTrue(np.min(input_features["attention_mask_extrapolated_beatstep"]) == 0) def test_batch_feature(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_attention_mask=True, ) self.assertEqual(len(input_features["input_features"].shape), 3) # check shape self.assertEqual(input_features["beatsteps"].shape[0], 3) self.assertEqual(input_features["extrapolated_beatstep"].shape[0], 3) def test_batch_feature_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="np", return_attention_mask=True, ) # check np array or not self.assertEqual(type(input_features["input_features"]), np.ndarray) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) def test_batch_feature_pt(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="pt", return_attention_mask=True, ) # check pt tensor or not self.assertEqual(type(input_features["input_features"]), torch.Tensor) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_accepts_tensors_pt(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_accepts_tensors_tf(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_from_list(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_from_array(self): pass @unittest.skip(reason="Pop2PianoFeatureExtractor does not support truncation") def test_attention_mask_with_truncation(self): pass @unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation") def test_truncation_from_array(self): pass @unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation") def test_truncation_from_list(self): pass
transformers/tests/models/pop2piano/test_feature_extraction_pop2piano.py/0
{ "file_path": "transformers/tests/models/pop2piano/test_feature_extraction_pop2piano.py", "repo_id": "transformers", "token_count": 4560 }
602
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import shutil import tempfile import unittest import numpy as np import pytest from transformers import AutoProcessor, Qwen2TokenizerFast from transformers.testing_utils import require_av, require_torch, require_torchvision, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import Qwen2VLProcessor if is_torchvision_available(): from transformers import Qwen2VLImageProcessorFast, Qwen2VLVideoProcessor if is_torch_available(): import torch @require_vision @require_torch @require_torchvision class Qwen2VLProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Qwen2VLProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() processor = Qwen2VLProcessor.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", patch_size=4, max_pixels=56 * 56, min_pixels=28 * 28 ) processor.save_pretrained(cls.tmpdirname) cls.image_token = processor.image_token def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def get_video_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor def get_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() video_processor = self.get_video_processor() processor = Qwen2VLProcessor( tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor ) processor.save_pretrained(self.tmpdirname) processor = Qwen2VLProcessor.from_pretrained(self.tmpdirname, use_fast=True) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor.tokenizer, Qwen2TokenizerFast) self.assertIsInstance(processor.image_processor, Qwen2VLImageProcessorFast) self.assertIsInstance(processor.video_processor, Qwen2VLVideoProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() video_processor = self.get_video_processor() processor = Qwen2VLProcessor( tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor ) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="pt") input_processor = processor(images=image_input, text="dummy", return_tensors="pt") for key in input_image_proc: self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() video_processor = self.get_video_processor() processor = Qwen2VLProcessor( tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor ) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values", "image_grid_thw"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() # test if it raises when no text is passed with pytest.raises(TypeError): processor(images=image_input) @require_torch @require_av def _test_apply_chat_template( self, modality: str, batch_size: int, return_tensors: str, input_name: str, processor_name: str, input_data: list[str], ): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") if processor_name not in self.processor_class.attributes: self.skipTest(f"{processor_name} attribute not present in {self.processor_class}") batch_messages = [ [ { "role": "user", "content": [{"type": "text", "text": "Describe this."}], }, ] ] * batch_size # Test that jinja can be applied formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), batch_size) # Test that tokenizing with template and directly with `self.tokenizer` gives same output formatted_prompt_tokenized = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors ) add_special_tokens = True if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token): add_special_tokens = False tok_output = processor.tokenizer( formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens ) expected_output = tok_output.input_ids self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist()) # Test that kwargs passed to processor's `__call__` are actually used tokenized_prompt_100 = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, padding="max_length", truncation=True, return_tensors=return_tensors, max_length=100, ) self.assertEqual(len(tokenized_prompt_100[0]), 100) # Test that `return_dict=True` returns text related inputs in the dict out_dict_text = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, ) self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"])) self.assertEqual(len(out_dict_text["input_ids"]), batch_size) self.assertEqual(len(out_dict_text["attention_mask"]), batch_size) # Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict for idx, url in enumerate(input_data[:batch_size]): batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}] out_dict = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, num_frames=2, # by default no more than 2 frames, otherwise too slow ) input_name = getattr(self, input_name) self.assertTrue(input_name in out_dict) self.assertEqual(len(out_dict["input_ids"]), batch_size) self.assertEqual(len(out_dict["attention_mask"]), batch_size) if modality == "video": # qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw expected_video_token_count = 0 for thw in out_dict["video_grid_thw"]: expected_video_token_count += thw[0] * thw[1] * thw[2] mm_len = expected_video_token_count else: mm_len = batch_size * 192 self.assertEqual(len(out_dict[input_name]), mm_len) return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list} for k in out_dict: self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors]) @require_av def test_apply_chat_template_video_frame_sampling(self): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest("Processor doesn't accept videos at input") messages = [ [ { "role": "user", "content": [ {"type": "video"}, {"type": "text", "text": "What is shown in this video?"}, ], }, ] ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), 1) formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids self.assertListEqual(expected_output, formatted_prompt_tokenized) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"]) # Add video URL for return dict and load with `num_frames` arg messages[0][0]["content"][0] = { "type": "video", "url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4", } num_frames = 3 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, num_frames=num_frames, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 360) # Load with `fps` arg fps = 1 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 900) # Load with `fps` and `num_frames` args, should raise an error with self.assertRaises(ValueError): out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, num_frames=num_frames, ) # Load without any arg should load the whole video out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 27000) # Load video as a list of frames (i.e. images). NOTE: each frame should have same size # because we assume they come from one video messages[0][0]["content"][0] = { "type": "video", "url": [ "https://www.ilankelman.org/stopsigns/australia.jpg", "https://www.ilankelman.org/stopsigns/australia.jpg", ], } out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 160) def test_kwargs_overrides_custom_image_processor_kwargs(self): processor = self.get_processor() self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, return_tensors="pt") self.assertEqual(inputs[self.images_input_name].shape[0], 100) inputs = processor(text=input_str, images=image_input, max_pixels=56 * 56 * 4, return_tensors="pt") self.assertEqual(inputs[self.images_input_name].shape[0], 612) def test_special_mm_token_truncation(self): """Tests that special vision tokens do not get truncated when `truncation=True` is set.""" processor = self.get_processor() input_str = self.prepare_text_inputs(batch_size=2, modality="image") image_input = self.prepare_image_inputs(batch_size=2) _ = processor( text=input_str, images=image_input, return_tensors="pt", truncation=None, padding=True, ) with self.assertRaises(ValueError): _ = processor( text=input_str, images=image_input, return_tensors="pt", truncation=True, padding=True, max_length=20, )
transformers/tests/models/qwen2_vl/test_processing_qwen2_vl.py/0
{ "file_path": "transformers/tests/models/qwen2_vl/test_processing_qwen2_vl.py", "repo_id": "transformers", "token_count": 6626 }
603
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch RegNet model.""" import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import Expectations, is_flaky, require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class RegNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return RegNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = RegNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = RegNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class RegNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as RegNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_torch_exportable = True def setUp(self): self.model_tester = RegNetModelTester(self) self.config_tester = ConfigTester( self, config_class=RegNetConfig, has_text_modality=False, common_properties=["num_channels", "hidden_sizes"], ) def test_config(self): self.config_tester.run_common_tests() @is_flaky(description="Larger difference with A10. Still flaky after setting larger tolerance") def test_batching_equivalence(self, atol=3e-5, rtol=3e-5): super().test_batching_equivalence(atol=atol, rtol=rtol) @unittest.skip(reason="RegNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="RegNet does not support input and output embeddings") def test_model_get_set_embeddings(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/regnet-y-040" model = RegNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class RegNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = RegNetForImageClassification.from_pretrained("facebook/regnet-y-040").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expectations = Expectations( { (None, None): [-0.4180, -1.5051, -3.4836], ("cuda", 8): [-0.4180, -1.5051, -3.4836], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
transformers/tests/models/regnet/test_modeling_regnet.py/0
{ "file_path": "transformers/tests/models/regnet/test_modeling_regnet.py", "repo_id": "transformers", "token_count": 4153 }
604
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from functools import lru_cache from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible @require_rjieba @require_tokenizers class RoFormerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "junnyu/roformer_chinese_small" tokenizer_class = RoFormerTokenizer rust_tokenizer_class = RoFormerTokenizerFast space_between_special_tokens = True test_rust_tokenizer = True @classmethod def setUpClass(cls): super().setUpClass() tokenizer = cls.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base") tokenizer.save_pretrained(cls.tmpdirname) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): pretrained_name = pretrained_name or cls.tmpdirname return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_rust_tokenizer(cls, pretrained_name=None, **kwargs): pretrained_name = pretrained_name or cls.tmpdirname return cls.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) def get_chinese_input_output_texts(self): input_text = "永和服装饰品有限公司,今天天气非常好" output_text = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def test_tokenizer(self): tokenizer = self.get_tokenizer() input_text, output_text = self.get_chinese_input_output_texts() tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, output_text.split()) input_tokens = tokens + [tokenizer.unk_token] exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens) def test_rust_tokenizer(self): # noqa: F811 tokenizer = self.get_rust_tokenizer() input_text, output_text = self.get_chinese_input_output_texts() tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, output_text.split()) input_tokens = tokens + [tokenizer.unk_token] exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens) @unittest.skip(reason="Cannot train new tokenizer via Tokenizers lib") def test_training_new_tokenizer(self): pass @unittest.skip(reason="Cannot train new tokenizer via Tokenizers lib") def test_training_new_tokenizer_with_special_tokens_change(self): pass def test_save_slow_from_fast_and_reload_fast(self): for cls in [RoFormerTokenizer, RoFormerTokenizerFast]: original = cls.from_pretrained("alchemab/antiberta2") self.assertEqual(original.encode("生活的真谛是"), [1, 4, 4, 4, 4, 4, 4, 2]) with tempfile.TemporaryDirectory() as tmp_dir: original.save_pretrained(tmp_dir) new = cls.from_pretrained(tmp_dir) self.assertEqual(new.encode("生活的真谛是"), [1, 4, 4, 4, 4, 4, 4, 2])
transformers/tests/models/roformer/test_tokenization_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_tokenization_roformer.py", "repo_id": "transformers", "token_count": 1644 }
605
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from transformers import AutoProcessor, Sam2ImageProcessorFast, Sam2Processor if is_torch_available(): import torch if is_tf_available(): pass @require_vision @require_torchvision class Sam2ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = Sam2ImageProcessorFast() processor = Sam2Processor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = torch.randint(0, 256, size=(1, 3, 30, 400), dtype=torch.uint8) # image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def prepare_mask_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ mask_inputs = torch.randint(0, 256, size=(1, 30, 400), dtype=torch.uint8) # mask_inputs = [Image.fromarray(x) for x in mask_inputs] return mask_inputs def test_save_load_pretrained_additional_features(self): image_processor = self.get_image_processor() processor = Sam2Processor(image_processor=image_processor) processor.save_pretrained(self.tmpdirname) image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Sam2Processor.from_pretrained(self.tmpdirname, do_normalize=False, padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, Sam2ImageProcessorFast) def test_image_processor_no_masks(self): image_processor = self.get_image_processor() processor = Sam2Processor(image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input) input_processor = processor(images=image_input) for key in input_feat_extract.keys(): if key == "pixel_values": for input_feat_extract_item, input_processor_item in zip( input_feat_extract[key], input_processor[key] ): np.testing.assert_array_equal(input_feat_extract_item, input_processor_item) else: self.assertEqual(input_feat_extract[key], input_processor[key]) for image in input_feat_extract.pixel_values: self.assertEqual(image.shape, (3, 1024, 1024)) for original_size in input_feat_extract.original_sizes: np.testing.assert_array_equal(original_size, np.array([30, 400])) def test_image_processor_with_masks(self): image_processor = self.get_image_processor() processor = Sam2Processor(image_processor=image_processor) image_input = self.prepare_image_inputs() mask_input = self.prepare_mask_inputs() input_feat_extract = image_processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt") input_processor = processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) for label in input_feat_extract.labels: self.assertEqual(label.shape, (256, 256)) @require_torch def test_post_process_masks(self): image_processor = self.get_image_processor() processor = Sam2Processor(image_processor=image_processor) dummy_masks = [torch.ones((1, 3, 5, 5))] original_sizes = [[1764, 2646]] masks = processor.post_process_masks(dummy_masks, original_sizes) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) masks = processor.post_process_masks(dummy_masks, torch.tensor(original_sizes)) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np dummy_masks = [np.ones((1, 3, 5, 5))] masks = processor.post_process_masks(dummy_masks, np.array(original_sizes)) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] with self.assertRaises(ValueError): masks = processor.post_process_masks(dummy_masks, np.array(original_sizes))
transformers/tests/models/sam2/test_processor_sam2.py/0
{ "file_path": "transformers/tests/models/sam2/test_processor_sam2.py", "repo_id": "transformers", "token_count": 2263 }
606
# Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from functools import lru_cache from transformers import SPIECE_UNDERLINE, AddedToken, BatchEncoding, SiglipTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece @require_tokenizers class SiglipTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "google/siglip-base-patch16-224" tokenizer_class = SiglipTokenizer test_rust_tokenizer = False test_sentencepiece = True test_sentencepiece_ignore_case = True @classmethod def setUpClass(cls): super().setUpClass() # We have a SentencePiece fixture for testing tokenizer = SiglipTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(cls.tmpdirname) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_convert_token_and_id with T5->Siglip def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") def test_full_tokenizer(self): tokenizer = SiglipTokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁this", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [66, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE, "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [7, 23, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 12, 66, 46, 72, 80, 6, 0]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE, "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ], ) @cached_property def siglip_tokenizer(self): return SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224") @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs) -> SiglipTokenizer: pretrained_name = pretrained_name or cls.tmpdirname return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_rust_and_python_full_tokenizers with T5->Siglip def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_eos_treatment(self): tokenizer = self.siglip_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_prepare_batch(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, tokenizer.eos_token_id] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) def test_empty_target_text(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length(self): tokenizer = self.siglip_tokenizer tgt_text = ["Summary of the text.", "Another summary."] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_eos_in_input(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, 1] expected_tgt_tokens = [6254, 267, 260, 1443, 1] batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) @unittest.skip(reason="SiglipTokenizer strips the punctuation") def test_subword_regularization_tokenizer(self): pass @unittest.skip(reason="SiglipTokenizer strips the punctuation") def test_pickle_subword_regularization_tokenizer(self): pass # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_special_tokens_initialization with T5->Siglip def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [f"<extra_id_{i}>" for i in range(100)] + [AddedToken("<special>", lstrip=True)] tokenizer_r = self.get_rust_tokenizer( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) tokenizer_cr = self.get_rust_tokenizer( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") r_output = tokenizer_r.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in r_output) self.assertTrue(special_token_id in cr_output) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_special_tokens_initialization_with_non_empty_additional_special_tokens with T5->Siglip def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(100)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # BySiglipTokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): """Test ``_tokenize`` and ``convert_tokens_to_string``.""" if not self.test_sentencepiece: self.skipTest(reason="test_sentencepiece is set to False") tokenizer = self.get_tokenizer() text = "This is text to test the tokenizer." if self.test_sentencepiece_ignore_case: text = text.lower() tokens = tokenizer.tokenize(text) self.assertTrue(len(tokens) > 0) # check if converting back to original text works reverse_text = tokenizer.convert_tokens_to_string(tokens) if self.test_sentencepiece_ignore_case: reverse_text = reverse_text.lower() expected_text = "this is text to test the tokenizer" self.assertEqual(reverse_text, expected_text) special_tokens = tokenizer.all_special_tokens special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens) for special_token in special_tokens: self.assertIn(special_token, special_tokens_string) if self.test_rust_tokenizer: rust_tokenizer = self.get_rust_tokenizer() special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens) self.assertEqual(special_tokens_string, special_tokens_string_rust) @slow def test_tokenizer_integration(self): tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224") # fmt: off texts = [ 'the real mountain view', 'Zürich', 'San Francisco', 'a picture of a laptop with the lockscreen on, a cup of cappucino, salt and pepper grinders. The view through the window reveals lake Zürich and the Alps in the background of the city.', ] expected_input_ids = [ [260, 638, 3293, 870, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 761, 5879, 5345, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 264, 452, 20563, 15949, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 266, 1357, 267, 262, 266, 4429, 275, 260, 3940, 6360, 277, 262, 266, 3064, 267, 3549, 388, 16538, 296, 298, 2617, 263, 4869, 14998, 264, 260, 870, 393, 260, 1710, 7958, 4324, 262, 761, 5879, 5345, 263, 260, 1518, 388, 264, 268, 260, 1970, 267, 260, 741, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] # fmt: on for text, expected in zip(texts, expected_input_ids): input_ids = tokenizer(text, padding="max_length").input_ids self.assertListEqual(input_ids, expected) def test_some_edge_cases(self): tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224", legacy=False) sp_tokens = tokenizer.sp_model.encode("</s>>", out_type=str) self.assertEqual(sp_tokens, ["</", "s", ">", ">"]) tokens = tokenizer.tokenize("</s>>") self.assertNotEqual(sp_tokens, tokens) self.assertEqual(tokens, ["</s>"]) tokens = tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str)) tokens = tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str)) tokens = tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) tokens = tokenizer.tokenize(" ▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): """ A class that regroups important test to make sure that we properly handle the special tokens. """ @classmethod def setUpClass(cls): tokenizer = SiglipTokenizer(SAMPLE_VOCAB, extra_ids=0, legacy=False) tokenizer.add_special_tokens( {"additional_special_tokens": [AddedToken("<extra_id_0>", rstrip=False, lstrip=False)]} ) cls.tokenizer = tokenizer def test_add_dummy_prefix(self): # make sure `'▁'` is prepended, and outputs match sp_model's # `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute input_ids = self.tokenizer.encode(". Hello", add_special_tokens=False) self.assertEqual(input_ids, [37, 86, 20]) self.assertEqual(input_ids, [37, 86, 20]) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁he", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): # make sure the extra spaces are eaten # sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute input_ids = self.tokenizer.encode(" . Hello", add_special_tokens=False) self.assertEqual(input_ids, [37, 86, 20]) self.assertEqual(input_ids, [37, 86, 20]) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁he", "ll", "o"]) # `'▁'` is also a whitespace input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [37, 46, 44, 2]) tokens = self.tokenizer.tokenize("▁He is not") self.assertEqual(tokens, ["▁he", "▁is", "▁not"]) # no extra space added input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [37, 46, 44, 37, 2]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁he", "▁is", "▁not", "▁he"]) # spaces are eaten by spm even if not start
transformers/tests/models/siglip/test_tokenization_siglip.py/0
{ "file_path": "transformers/tests/models/siglip/test_tokenization_siglip.py", "repo_id": "transformers", "token_count": 9596 }
607
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import Speech2TextFeatureExtractor, Speech2TextProcessor, Speech2TextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, require_torchaudio from transformers.utils import FEATURE_EXTRACTOR_NAME from .test_feature_extraction_speech_to_text import floats_list SAMPLE_SP = get_tests_dir("fixtures/test_sentencepiece.model") @require_torch @require_torchaudio @require_sentencepiece class Speech2TextProcessorTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() vocab = ["<s>", "<pad>", "</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) save_dir = Path(cls.tmpdirname) save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab_file"]) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["spm_file"]) tokenizer = Speech2TextTokenizer.from_pretrained(cls.tmpdirname) tokenizer.save_pretrained(cls.tmpdirname) feature_extractor_map = { "feature_size": 24, "num_mel_bins": 24, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } save_json(feature_extractor_map, save_dir / FEATURE_EXTRACTOR_NAME) def get_tokenizer(self, **kwargs): return Speech2TextTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Speech2TextFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = Speech2TextProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = Speech2TextProcessor( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = Speech2TextTokenizer.from_pretrained(tmpdir, bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = Speech2TextFeatureExtractor.from_pretrained( tmpdir, do_normalize=False, padding_value=1.0 ) processor = Speech2TextProcessor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/speech_to_text/test_processing_speech_to_text.py/0
{ "file_path": "transformers/tests/models/speech_to_text/test_processing_speech_to_text.py", "repo_id": "transformers", "token_count": 2379 }
608
# Copyright 2024 BigCode and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Starcoder2 model.""" import unittest import pytest from transformers import Starcoder2Config, is_torch_available from transformers.testing_utils import ( Expectations, require_bitsandbytes, require_flash_attn, require_torch, require_torch_accelerator, require_torch_gpu, slow, torch_device, ) if is_torch_available(): import torch from transformers import ( AutoTokenizer, Starcoder2ForCausalLM, Starcoder2ForSequenceClassification, Starcoder2ForTokenClassification, Starcoder2Model, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester class Starcoder2ModelTester(CausalLMModelTester): config_class = Starcoder2Config if is_torch_available(): base_model_class = Starcoder2Model causal_lm_class = Starcoder2ForCausalLM sequence_class = Starcoder2ForSequenceClassification token_class = Starcoder2ForTokenClassification @require_torch class Starcoder2ModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( (Starcoder2Model, Starcoder2ForCausalLM, Starcoder2ForSequenceClassification, Starcoder2ForTokenClassification) if is_torch_available() else () ) test_headmasking = False test_pruning = False model_tester_class = Starcoder2ModelTester pipeline_model_mapping = ( { "feature-extraction": Starcoder2Model, "text-classification": Starcoder2ForSequenceClassification, "token-classification": Starcoder2ForTokenClassification, "text-generation": Starcoder2ForCausalLM, } if is_torch_available() else {} ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="Starcoder2 flash attention does not support right padding") @slow @require_torch_accelerator class Starcoder2IntegrationTest(unittest.TestCase): def test_starcoder2_batched_generation_sdpa(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\n@app.route('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, dtype=torch.float16, device_map="auto", attn_implementation="sdpa" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) def test_starcoder2_batched_generation_eager(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\n@app.route('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, dtype=torch.float16, device_map="auto", attn_implementation="eager" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @require_flash_attn @pytest.mark.flash_attn_test def test_starcoder2_batched_generation_fa2(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\n@app.route('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @require_bitsandbytes def test_starcoder2_batched_generation_4bit(self): expectations = Expectations( { (None, None): [ 'Hello my name is Younes and I am a student at the University of Maryland. I am currently working on a project that is related to the topic of "How to make a game". I am currently working on a project', 'def hello_world():\n\treturn "Hello World"\n\n@app.route(\'/hello/<name>\')\ndef hello_name(name):\n\treturn "Hello " + name\n\n@app.route', ], ("cuda", 8): [ "Hello my name is Younes and I am a student at the University of Maryland. I am currently working on a project that is aimed at creating a new way of learning. I am hoping to create a new way of", 'def hello_world():\n\treturn "Hello World"\n\n@app.route(\'/hello/<name>\')\ndef hello_name(name):\n\treturn "Hello " + name\n\n@app.route', ], } ) EXPECTED_TEXT = expectations.get_expectation() model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained(model_id, load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text)
transformers/tests/models/starcoder2/test_modeling_starcoder2.py/0
{ "file_path": "transformers/tests/models/starcoder2/test_modeling_starcoder2.py", "repo_id": "transformers", "token_count": 3170 }
609
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch TimeSeriesTransformer model.""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from transformers.utils import check_torch_load_is_safe from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import ( TimeSeriesTransformerConfig, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, ) from transformers.models.time_series_transformer.modeling_time_series_transformer import ( TimeSeriesTransformerDecoder, TimeSeriesTransformerEncoder, ) @require_torch class TimeSeriesTransformerModelTester: def __init__( self, parent, batch_size=13, prediction_length=7, context_length=14, cardinality=19, embedding_dimension=5, num_time_features=4, is_training=True, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, lags_sequence=[1, 2, 3, 4, 5], ): self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.cardinality = cardinality self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.embedding_dimension = embedding_dimension self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_seq_length = context_length self.decoder_seq_length = prediction_length def get_config(self): return TimeSeriesTransformerConfig( encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, prediction_length=self.prediction_length, context_length=self.context_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_real_features=1, num_static_categorical_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], scaling="std", # we need std to get non-zero `loc` ) def prepare_time_series_transformer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0]) static_real_features = floats_tensor([self.batch_size, 1]) past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features]) past_values = floats_tensor([self.batch_size, _past_length]) past_observed_mask = floats_tensor([self.batch_size, _past_length]) > 0.5 # decoder inputs future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) future_values = floats_tensor([self.batch_size, config.prediction_length]) inputs_dict = { "past_values": past_values, "static_categorical_features": static_categorical_features, "static_real_features": static_real_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_time_series_transformer_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = TimeSeriesTransformerModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = TimeSeriesTransformerEncoder.from_pretrained(tmpdirname).to(torch_device) transformer_inputs, _, _, _ = model.create_network_inputs(**inputs_dict) enc_input = transformer_inputs[:, : config.context_length, ...] dec_input = transformer_inputs[:, config.context_length :, ...] encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = TimeSeriesTransformerDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( inputs_embeds=dec_input, encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class TimeSeriesTransformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TimeSeriesTransformerModel, TimeSeriesTransformerForPrediction) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": TimeSeriesTransformerModel} if is_torch_available() else {} is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False test_torchscript = False test_inputs_embeds = False def setUp(self): self.model_tester = TimeSeriesTransformerModelTester(self) self.config_tester = ConfigTester( self, config_class=TimeSeriesTransformerConfig, has_text_modality=False, prediction_length=self.model_tester.prediction_length, ) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @unittest.skip(reason="Model has no tokens embeddings") def test_resize_tokens_embeddings(self): pass # # Input is 'static_categorical_features' not 'input_ids' def test_model_main_input_name(self): model_signature = inspect.signature(getattr(TimeSeriesTransformerModel, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(TimeSeriesTransformerModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] expected_arg_names.extend( [ "future_observed_mask", "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] if "future_observed_mask" in arg_names else [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) out_len = len(outputs) correct_outlen = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_seq_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_seq_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @parameterized.expand( [ (1, 5, [1]), (1, 5, [1, 10, 15]), (1, 5, [3, 6, 9, 10]), (2, 5, [1, 2, 7]), (2, 5, [2, 3, 4, 6]), (4, 5, [1, 5, 9, 11]), (4, 5, [7, 8, 13, 14]), ], ) def test_create_network_inputs(self, prediction_length, context_length, lags_sequence): history_length = max(lags_sequence) + context_length config = TimeSeriesTransformerConfig( prediction_length=prediction_length, context_length=context_length, lags_sequence=lags_sequence, scaling=False, num_parallel_samples=10, num_static_categorical_features=1, cardinality=[1], embedding_dimension=[2], num_static_real_features=1, ) model = TimeSeriesTransformerModel(config) batch = { "static_categorical_features": torch.tensor([[0]], dtype=torch.int64), "static_real_features": torch.tensor([[0.0]], dtype=torch.float32), "past_time_features": torch.arange(history_length, dtype=torch.float32).view(1, history_length, 1), "past_values": torch.arange(history_length, dtype=torch.float32).view(1, history_length), "past_observed_mask": torch.arange(history_length, dtype=torch.float32).view(1, history_length), } # test with no future_target (only one step prediction) batch["future_time_features"] = torch.arange(history_length, history_length + 1, dtype=torch.float32).view( 1, 1, 1 ) transformer_inputs, loc, scale, _ = model.create_network_inputs(**batch) self.assertTrue((scale == 1.0).all()) assert (loc == 0.0).all() ref = torch.arange(max(lags_sequence), history_length, dtype=torch.float32) for idx, lag in enumerate(lags_sequence): assert torch.isclose(ref - lag, transformer_inputs[0, :, idx]).all() # test with all future data batch["future_time_features"] = torch.arange( history_length, history_length + prediction_length, dtype=torch.float32 ).view(1, prediction_length, 1) batch["future_values"] = torch.arange( history_length, history_length + prediction_length, dtype=torch.float32 ).view(1, prediction_length) transformer_inputs, loc, scale, _ = model.create_network_inputs(**batch) assert (scale == 1.0).all() assert (loc == 0.0).all() ref = torch.arange(max(lags_sequence), history_length + prediction_length, dtype=torch.float32) for idx, lag in enumerate(lags_sequence): assert torch.isclose(ref - lag, transformer_inputs[0, :, idx]).all() # test for generation batch.pop("future_values") transformer_inputs, loc, scale, _ = model.create_network_inputs(**batch) lagged_sequence = model.get_lagged_subsequences( sequence=batch["past_values"], subsequences_length=1, shift=1, ) # assert that the last element of the lagged sequence is the one after the encoders input assert transformer_inputs[0, ..., 0][-1] + 1 == lagged_sequence[0, ..., 0][-1] future_values = torch.arange(history_length, history_length + prediction_length, dtype=torch.float32).view( 1, prediction_length ) # assert that the first element of the future_values is offset by lag after the decoders input assert lagged_sequence[0, ..., 0][-1] + lags_sequence[0] == future_values[0, ..., 0] @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() @unittest.skip(reason="Model does not have input embeddings") def test_model_get_set_embeddings(self): pass def prepare_batch(filename="train-batch.pt"): file = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=filename, repo_type="dataset") check_torch_load_is_safe() batch = torch.load(file, map_location=torch_device, weights_only=True) return batch @require_torch @slow class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase): def test_inference_no_head(self): model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly").to( torch_device ) batch = prepare_batch() with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], ).last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.8196, -1.5131, 1.4620], [1.1268, -1.3238, 1.5997], [1.5098, -1.0715, 1.7359]], device=torch_device ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_inference_head(self): model = TimeSeriesTransformerForPrediction.from_pretrained( "huggingface/time-series-transformer-tourism-monthly" ).to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], future_time_features=batch["future_time_features"], ).encoder_last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-1.2957, -1.0280, -0.6045], [-0.7017, -0.8193, -0.3717], [-1.0449, -0.8149, 0.1405]], device=torch_device ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_seq_to_seq_generation(self): model = TimeSeriesTransformerForPrediction.from_pretrained( "huggingface/time-series-transformer-tourism-monthly" ).to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): outputs = model.generate( static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], ) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor([2825.2749, 3584.9207, 6763.9951], device=torch_device) mean_prediction = outputs.sequences.mean(dim=1) torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1, atol=1e-1)
transformers/tests/models/time_series_transformer/test_modeling_time_series_transformer.py/0
{ "file_path": "transformers/tests/models/time_series_transformer/test_modeling_time_series_transformer.py", "repo_id": "transformers", "token_count": 10533 }
610
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VideoLlava model.""" import copy import unittest import numpy as np import requests from huggingface_hub import hf_hub_download from parameterized import parameterized from transformers import ( VideoLlavaConfig, VideoLlavaForConditionalGeneration, VideoLlavaModel, VideoLlavaProcessor, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( cleanup, require_bitsandbytes, require_torch, run_test_using_subprocess, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): from PIL import Image class VideoLlavaVisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, video_token_index=1, projector_hidden_act="gelu", seq_length=3, num_frames=2, vision_feature_select_strategy="default", vision_feature_layer=-1, text_config={ "model_type": "llama", "seq_length": 13, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 2048, # we need it high because videos are 8 frames "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 3, }, is_training=True, vision_config={ "model_type": "clip_vision_model", "batch_size": 12, "image_size": 8, "patch_size": 6, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.video_token_index = video_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.num_frames = num_frames self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 5 self.num_channels = 3 self.image_size = 224 self.num_image_tokens = (vision_config["image_size"] // vision_config["patch_size"]) ** 2 self.num_video_tokens = (self.num_image_tokens + 1) * self.num_frames self.seq_length = seq_length + self.num_image_tokens + self.num_video_tokens def get_config(self): return VideoLlavaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, video_token_index=self.video_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, image_seq_length=self.num_image_tokens, video_seq_length=self.num_video_tokens, ) def prepare_config_and_inputs(self): pixel_values_videos = floats_tensor( [ self.batch_size, self.num_frames, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) pixel_values_images = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values_images, pixel_values_videos def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values_images, pixel_values_videos = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) input_ids[(input_ids == config.image_token_index) | (input_ids == config.video_token_index)] = ( self.pad_token_id ) input_ids[:, : self.num_image_tokens] = config.image_token_index input_ids[:, self.num_image_tokens : self.num_video_tokens + self.num_image_tokens] = config.video_token_index inputs_dict = { "pixel_values_videos": pixel_values_videos, "pixel_values_images": pixel_values_images, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class VideoLlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `VideoLlavaForConditionalGeneration`. """ all_model_classes = ( ( VideoLlavaModel, VideoLlavaForConditionalGeneration, ) if is_torch_available() else () ) fx_compatible = False test_pruning = False test_resize_embeddings = True test_head_masking = False _is_composite = True def setUp(self): self.model_tester = VideoLlavaVisionText2TextModelTester(self) common_properties = ["image_token_index", "video_token_index", "vision_feature_layer", "image_seq_length"] self.config_tester = ConfigTester( self, config_class=VideoLlavaConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @run_test_using_subprocess def test_mixed_input(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: curr_inputs = copy.deepcopy(inputs) model = model_class(config).to(torch_device).eval() # test that the forward does not fail with torch.no_grad(): _ = model(**curr_inputs) # if we remove some images from inputs leaving only one # image number mismatch error should raise curr_inputs["pixel_values_images"] = curr_inputs["pixel_values_images"][:1] with self.assertRaises(ValueError): _ = model(**curr_inputs) def test_video_only_input(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: curr_inputs = copy.deepcopy(inputs) model = model_class(config).to(torch_device).eval() # replace image token id with dummy id # Error will be raised as num-image-tokens and num-of-image-embeds mismatch curr_inputs["input_ids"][:, : self.model_tester.num_image_tokens] = 2 with self.assertRaises(ValueError): _ = model(**curr_inputs) curr_inputs["pixel_values_images"] = None _ = model(**curr_inputs) def test_image_only_input(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: curr_inputs = copy.deepcopy(inputs) model = model_class(config).to(torch_device).eval() # set dummy id, which is not video token id # Error will be raised as num-video-tokens and num-of-video-embeds mismatch curr_inputs["input_ids"][ :, self.model_tester.num_image_tokens : self.model_tester.num_image_tokens + self.model_tester.num_video_tokens, ] = 2 with self.assertRaises(ValueError): _ = model(**curr_inputs) curr_inputs["pixel_values_videos"] = None _ = model(**curr_inputs) def test_batching_equivalence(self): def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (torch.max(torch.abs(batched_row - single_row_object))) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={torch.max(torch.abs(batched_row - single_row_object))}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() single_row_input = {} for key, value in batched_input_prepared.items(): single_row_input[key] = value[:1] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: # we can't test videos as their output shapes are linked to number of frames # and we don't have to as it is a CLIP model and can be tested from `ClipModelTester` class if key == "video_hidden_states": continue recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) curr_input_dict = copy.deepcopy(input_dict) _ = model(**curr_input_dict) # successful forward with no modifications # remove one image but leave the image token in text curr_input_dict["pixel_values_images"] = curr_input_dict["pixel_values_images"][-1:, ...] with self.assertRaises(ValueError): _ = model(**curr_input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values_images"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values_images=pixel_values) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) _ = model(input_ids=input_ids, pixel_values_images=pixel_values) @parameterized.expand( [ (-1,), ([-1],), ([-1, -2],), ], ) def test_vision_feature_layers(self, vision_feature_layer): """ Test that we can use either one vision feature layer, or a list of vision feature layers. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.vision_feature_layer = vision_feature_layer num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer) hidden_size = config.vision_config.hidden_size expected_features = hidden_size * num_feature_layers for model_class in self.all_model_classes: model = model_class(config).to(torch_device) # We should have the right number of input features, # and should be able to run a forward pass without exploding base_model = getattr(model, "model", model) assert base_model.multi_modal_projector.linear_1.in_features == expected_features model(**input_dict) @require_torch class VideoLlavaForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_bitsandbytes def test_small_model_integration_test(self): # Let' s make sure we test the preprocessing to replace what is used model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:" video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) inputs = self.processor(text=prompt, videos=video_file, return_tensors="pt").to(torch_device) EXPECTED_INPUT_IDS = torch.tensor([1, 3148, 1001, 29901, 29871, 13, 11008, 338, 445, 4863, 2090, 1460, 29973, 319, 1799, 9047, 13566, 29901], device=torch_device) # fmt: skip non_video_inputs = inputs["input_ids"][inputs["input_ids"] != 32001] self.assertTrue(torch.equal(non_video_inputs, EXPECTED_INPUT_IDS)) output = model.generate(**inputs, do_sample=False, max_new_tokens=20) EXPECTED_DECODED_TEXT = "USER: \nWhy is this video funny? ASSISTANT: The video is funny because it shows a baby sitting on a bed and reading a book, which" # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_mixed_inputs(self): model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) prompts = [ "USER: <image>\nWhat are the cats in the image doing? ASSISTANT:", "USER: <video>\nWhy is this video funny? ASSISTANT:", ] video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = self.processor( text=prompts, images=[image], videos=[video_file], padding=True, return_tensors="pt" ).to(torch_device) output = model.generate(**inputs, do_sample=False, max_new_tokens=20) EXPECTED_DECODED_TEXT = [ 'USER: \nWhat are the cats in the image doing? ASSISTANT: The cats in the image are sleeping or resting on a couch.', 'USER: \nWhy is this video funny? ASSISTANT: The video is funny because it shows a baby sitting on a bed and reading a book, which' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_llama(self): model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") prompt = "USER: <video>\nDescribe the video in details. ASSISTANT:" video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) inputs = self.processor(text=prompt, videos=video_file, return_tensors="pt").to(torch_device, torch.float16) output = model.generate(**inputs, max_new_tokens=900, do_sample=False) EXPECTED_DECODED_TEXT = "USER: \nDescribe the video in details. ASSISTANT: The video features a young child sitting on a bed, holding a book and reading it. " \ "The child appears to be enjoying the book, as they are fully engaged in the activity. The bed is located in a bedroom, and there is a chair nearby. The " \ "child is wearing a blue shirt and glasses, which suggests that they might have a visual impairment. The room is well-lit, and there is a clock on the wall, " \ "indicating the time. The child's focus on the book indicates that they are interested in the content and are actively participating in the reading process. " \ "Overall, the video captures a heartwarming moment of a child engaging in a simple yet essential activity, which is reading." # fmt: skip self.assertEqual( processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_llama_batched(self): model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") processor.tokenizer.padding_side = "left" prompts = [ "USER: <video>\nWhat is the baby doing? ASSISTANT:", "USER: <video>\nWho is sitting next to the woman? ASSISTANT:", ] video_1 = np.load( hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset") ) video_2 = np.load( hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo_2.npy", repo_type="dataset") ) inputs = processor(text=prompts, videos=[video_1, video_2], return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = [ 'USER: \nWhat is the baby doing? ASSISTANT: The baby is sitting on a bed and reading a book.', 'USER: \nWho is sitting next to the woman? ASSISTANT: A small dog is sitting next to the woman.' ] # fmt: skip self.assertEqual(processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT)
transformers/tests/models/video_llava/test_modeling_video_llava.py/0
{ "file_path": "transformers/tests/models/video_llava/test_modeling_video_llava.py", "repo_id": "transformers", "token_count": 10056 }
611
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VisualBERT model.""" import copy import unittest from transformers import VisualBertConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForRegionToPhraseAlignment, VisualBertForVisualReasoning, VisualBertModel, ) class VisualBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, visual_seq_length=5, is_training=True, use_attention_mask=True, use_visual_attention_mask=True, use_token_type_ids=True, use_visual_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, visual_embedding_dim=20, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.visual_seq_length = visual_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_visual_attention_mask = use_visual_attention_mask self.use_token_type_ids = use_token_type_ids self.use_visual_token_type_ids = use_visual_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.visual_embedding_dim = visual_embedding_dim self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def get_config(self): return VisualBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, visual_embedding_dim=self.visual_embedding_dim, num_labels=self.num_labels, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) visual_embeds = floats_tensor([self.batch_size, self.visual_seq_length, self.visual_embedding_dim]) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor([self.batch_size, self.visual_seq_length], self.type_vocab_size) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } def prepare_config_and_inputs_for_pretraining(self): masked_lm_labels = None sentence_image_labels = None if self.use_labels: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length + self.visual_seq_length], self.vocab_size) sentence_image_labels = ids_tensor( [self.batch_size], self.type_sequence_label_size, ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": masked_lm_labels, "sentence_image_labels": sentence_image_labels}) return config, input_dict def prepare_config_and_inputs_for_multiple_choice(self): input_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.vocab_size) visual_embeds = floats_tensor( [self.batch_size, self.num_choices, self.visual_seq_length, self.visual_embedding_dim] ) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones( (self.batch_size, self.num_choices, self.seq_length), dtype=torch.long, device=torch_device ) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.num_choices, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor( [self.batch_size, self.num_choices, self.visual_seq_length], self.type_vocab_size ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, "labels": labels, } def prepare_config_and_inputs_for_vqa(self): vqa_labels = None if self.use_labels: vqa_labels = floats_tensor([self.batch_size, self.num_labels]) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": vqa_labels}) return config, input_dict def prepare_config_and_inputs_for_nlvr(self): nlvr_labels = None if self.use_labels: nlvr_labels = ids_tensor([self.batch_size], self.num_labels) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": nlvr_labels}) return config, input_dict def prepare_config_and_inputs_for_flickr(self): region_to_phrase_position = torch.cat( ( ids_tensor([self.batch_size, self.seq_length], self.visual_seq_length), torch.ones(self.batch_size, self.visual_seq_length, dtype=torch.long, device=torch_device) * -1, ), dim=-1, ) flickr_labels = None if self.use_labels: flickr_labels = floats_tensor( [self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length] ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"region_to_phrase_position": region_to_phrase_position, "labels": flickr_labels}) return config, input_dict def create_and_check_model(self, config, input_dict): model = VisualBertModel(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.hidden_size), ) def create_and_check_for_pretraining(self, config, input_dict): model = VisualBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.prediction_logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.vocab_size), ) def create_and_check_for_vqa(self, config, input_dict): model = VisualBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice(self, config, input_dict): model = VisualBertForMultipleChoice(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_nlvr(self, config, input_dict): model = VisualBertForVisualReasoning(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_flickr(self, config, input_dict): model = VisualBertForRegionToPhraseAlignment(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length) ) @require_torch class VisualBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( VisualBertModel, VisualBertForMultipleChoice, VisualBertForVisualReasoning, VisualBertForRegionToPhraseAlignment, VisualBertForQuestionAnswering, VisualBertForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": VisualBertModel} if is_torch_available() else {} test_torchscript = False test_pruning = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VisualBertForMultipleChoice: for key in inputs_dict: value = inputs_dict[key] if isinstance(value, torch.Tensor) and value.ndim > 1: if key != "visual_embeds": inputs_dict[key] = ( inputs_dict[key].unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() ) else: inputs_dict[key] = ( inputs_dict[key] .unsqueeze(1) .expand(-1, self.model_tester.num_choices, -1, self.model_tester.visual_embedding_dim) .contiguous() ) elif model_class == VisualBertForRegionToPhraseAlignment: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["region_to_phrase_position"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) if return_labels: if model_class == VisualBertForMultipleChoice: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class == VisualBertForPreTraining: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["labels"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) inputs_dict["sentence_image_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) # Flickr expects float labels elif model_class == VisualBertForRegionToPhraseAlignment: batch_size = self.model_tester.batch_size total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length inputs_dict["labels"] = torch.ones( ( batch_size, total_length, self.model_tester.visual_seq_length, ), dtype=torch.float, device=torch_device, ) # VQA expects float labels elif model_class == VisualBertForQuestionAnswering: inputs_dict["labels"] = torch.ones( (self.model_tester.batch_size, self.model_tester.num_labels), dtype=torch.float, device=torch_device, ) elif model_class == VisualBertForVisualReasoning: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = VisualBertModelTester(self) self.config_tester = ConfigTester(self, config_class=VisualBertConfig, hidden_size=37) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) visual_seq_len = getattr(self.model_tester, "visual_seq_length", None) encoder_seq_length = (seq_len if seq_len is not None else 0) + ( visual_seq_len if visual_seq_len is not None else 0 ) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length + self.model_tester.visual_seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_pretraining() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_model_for_vqa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_vqa() self.model_tester.create_and_check_for_vqa(*config_and_inputs) def test_model_for_nlvr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_nlvr() self.model_tester.create_and_check_for_nlvr(*config_and_inputs) def test_model_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_multiple_choice() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_model_for_flickr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_flickr() self.model_tester.create_and_check_for_flickr(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "uclanlp/visualbert-vqa" model = VisualBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class VisualBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_vqa_coco_pre(self): model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) vocab_size = 30522 expected_shape = torch.Size((1, 16, vocab_size)) self.assertEqual(output.prediction_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-5.1858, -5.1903, -4.9142], [-6.2214, -5.9238, -5.8381], [-6.3027, -5.9939, -5.9297]]] ) torch.testing.assert_close(output.prediction_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) expected_shape_2 = torch.Size((1, 2)) self.assertEqual(output.seq_relationship_logits.shape, expected_shape_2) expected_slice_2 = torch.tensor([[0.7393, 0.1754]]) torch.testing.assert_close(output.seq_relationship_logits, expected_slice_2, rtol=1e-4, atol=1e-4) @slow def test_inference_vqa(self): model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 3129)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.9898, 3.0803, -1.8016, 2.4542, -8.3420, -2.0224, -3.3124, -4.4139, -3.1491, -3.8997]] ) torch.testing.assert_close(output.logits[:, :10], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_nlvr(self): model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 1024), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 2)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-1.1436, 0.8900]]) torch.testing.assert_close(output.logits, expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_vcr(self): model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr") input_ids = torch.tensor([[[1, 2, 3, 4, 5, 6] for i in range(4)]], dtype=torch.long) attention_mask = torch.ones_like(input_ids) token_type_ids = torch.ones_like(input_ids) visual_embeds = torch.ones(size=(1, 4, 10, 512), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long) visual_attention_mask = torch.ones_like(visual_token_type_ids) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 4)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-7.7697, -7.7697, -7.7697, -7.7697]]) torch.testing.assert_close(output.logits, expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/visual_bert/test_modeling_visual_bert.py/0
{ "file_path": "transformers/tests/models/visual_bert/test_modeling_visual_bert.py", "repo_id": "transformers", "token_count": 14027 }
612
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import datasets import numpy as np import requests from datasets import load_dataset from huggingface_hub import ImageSegmentationOutputElement from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, AutoImageProcessor, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, DetrForSegmentation, ImageSegmentationPipeline, MaskFormerForInstanceSegmentation, is_vision_available, pipeline, ) from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, nested_simplify, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> dict: npimg = np.array(mask) white_pixels = (npimg == 255).sum() shape = npimg.shape return {"hash": hashimage(mask), "white_pixels": white_pixels, "shape": shape} def mask_to_test_readable_only_shape(mask: Image) -> dict: npimg = np.array(mask) shape = npimg.shape return {"shape": shape} @is_pipeline_test @require_vision @require_timm @require_torch class ImageSegmentationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else []) + (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else []) + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else []) ) _dataset = None @classmethod def _load_dataset(cls): # Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process. if cls._dataset is None: # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 cls._dataset = datasets.load_dataset( "hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1" ) def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, dtype="float32", ): image_segmenter = ImageSegmentationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, dtype=dtype, ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, image_segmenter, examples): self._load_dataset() outputs = image_segmenter( "./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, ) self.assertIsInstance(outputs, list) n = len(outputs) if isinstance(image_segmenter.model, (MaskFormerForInstanceSegmentation, DetrForSegmentation)): # Instance segmentation (maskformer, and detr) have a slot for null class # and can output nothing even with a low threshold self.assertGreaterEqual(n, 0) else: self.assertGreaterEqual(n, 1) # XXX: PIL.Image implements __eq__ which bypasses ANY, so we inverse the comparison # to make it work self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs) # RGBA outputs = image_segmenter( self._dataset[0]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0 ) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # LA outputs = image_segmenter( self._dataset[1]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0 ) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # L outputs = image_segmenter( self._dataset[2]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0 ) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) if isinstance(image_segmenter.model, DetrForSegmentation): # We need to test batch_size with images with the same size. # Detr doesn't normalize the size of the images, meaning we can have # 800x800 or 800x1200, meaning we cannot batch simply. # We simply bail on this batch_size = 1 else: batch_size = 2 # 5 times the same image so the output shape is predictable batch = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] outputs = image_segmenter( batch, threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, batch_size=batch_size, ) self.assertEqual(len(batch), len(outputs)) self.assertEqual(len(outputs[0]), n) self.assertEqual( [ [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, ], outputs, f"Expected [{n}, {n}, {n}, {n}, {n}], got {[len(item) for item in outputs]}", ) for single_output in outputs: for output_element in single_output: compare_pipeline_output_to_hub_spec(output_element, ImageSegmentationOutputElement) @require_torch def test_small_model_pt_no_panoptic(self): model_id = "hf-internal-testing/tiny-random-mobilevit" # The default task is `image-classification` we need to override pipe = pipeline(task="image-segmentation", model=model_id) # This model does NOT support neither `instance` nor `panoptic` # We should error out with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="panoptic") self.assertEqual( str(e.exception), "Subtask panoptic is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") self.assertEqual( str(e.exception), "Subtask instance is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = ImageSegmentationPipeline( model=model, image_processor=image_processor, subtask="panoptic", threshold=0.0, mask_threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) # This is extremely brittle, and those values are made specific for the CI. self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ], ) output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(output, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) # This must be surprising to the reader. # The `panoptic` returns only LABEL_215, and this returns 3 labels. # output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="semantic") output_masks = [o["mask"] for o in output] # page links (to visualize) expected_masks = [ "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_0.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_1.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_2.png", ] # actual links to get files expected_masks = [x.replace("/blob/", "/resolve/") for x in expected_masks] expected_masks = [Image.open(requests.get(image, stream=True).raw) for image in expected_masks] # Convert masks to numpy array output_masks = [np.array(x) for x in output_masks] expected_masks = [np.array(x) for x in expected_masks] self.assertEqual(output_masks[0].shape, expected_masks[0].shape) self.assertEqual(output_masks[1].shape, expected_masks[1].shape) self.assertEqual(output_masks[2].shape, expected_masks[2].shape) # With un-trained tiny random models, the output `logits` tensor is very likely to contain many values # close to each other, which cause `argmax` to give quite different results when running the test on 2 # environments. We use a lower threshold `0.9` here to avoid flakiness. self.assertGreaterEqual(np.mean(output_masks[0] == expected_masks[0]), 0.9) self.assertGreaterEqual(np.mean(output_masks[1] == expected_masks[1]), 0.9) self.assertGreaterEqual(np.mean(output_masks[2] == expected_masks[2]), 0.9) for o in output: o["mask"] = mask_to_test_readable_only_shape(o["mask"]) self.maxDiff = None self.assertEqual( nested_simplify(output, decimals=4), [ { "label": "LABEL_88", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_101", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_215", "mask": {"shape": (480, 640)}, "score": None, }, ], ) @require_torch def test_small_model_pt_semantic(self): model_id = "hf-internal-testing/tiny-random-beit-pipeline" image_segmenter = pipeline(model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") for o in outputs: # shortening by hashing o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "LABEL_0", "mask": {"hash": "42d0907228", "shape": (480, 640), "white_pixels": 10714}, }, { "score": None, "label": "LABEL_1", "mask": {"hash": "46b8cc3976", "shape": (480, 640), "white_pixels": 296486}, }, ], ) @require_torch @slow def test_integration_torch_image_segmentation(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline( "image-segmentation", model=model_id, threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) # Shortening by hashing for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ], ) @require_torch @slow def test_threshold(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline("image-segmentation", model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.999) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9995, "label": "remote", "mask": {"hash": "d02404f578", "shape": (480, 640), "white_pixels": 2789}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "eaa115b40c", "shape": (480, 640), "white_pixels": 304411}, }, ], ) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.5) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) @require_torch @slow def test_maskformer(self): threshold = 0.8 model_id = "facebook/maskformer-swin-base-ade" model = AutoModelForInstanceSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline("image-segmentation", model=model, image_processor=image_processor) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = ds[0]["image"].convert("RGB") outputs = image_segmenter(image, threshold=threshold) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9974, "label": "wall", "mask": {"hash": "a547b7c062", "shape": (512, 683), "white_pixels": 14252}, }, { "score": 0.949, "label": "house", "mask": {"hash": "0da9b7b38f", "shape": (512, 683), "white_pixels": 132177}, }, { "score": 0.9995, "label": "grass", "mask": {"hash": "1d07ea0a26", "shape": (512, 683), "white_pixels": 53444}, }, { "score": 0.9976, "label": "tree", "mask": {"hash": "6cdc97c7da", "shape": (512, 683), "white_pixels": 7944}, }, { "score": 0.8239, "label": "plant", "mask": {"hash": "1ab4ce378f", "shape": (512, 683), "white_pixels": 4136}, }, { "score": 0.9942, "label": "road, route", "mask": {"hash": "39c5d17be5", "shape": (512, 683), "white_pixels": 1941}, }, { "score": 1.0, "label": "sky", "mask": {"hash": "a3756324a6", "shape": (512, 683), "white_pixels": 135802}, }, ], ) @require_torch @slow def test_oneformer(self): image_segmenter = pipeline(model="shi-labs/oneformer_ade20k_swin_tiny") ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = ds[0]["image"].convert("RGB") outputs = image_segmenter(image, threshold=0.99) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9981, "label": "grass", "mask": {"hash": "3a92904d4c", "white_pixels": 118131, "shape": (512, 683)}, }, { "score": 0.9992, "label": "sky", "mask": {"hash": "fa2300cc9a", "white_pixels": 231565, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(image, threshold=0.99, subtask="instance") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9991, "label": "sky", "mask": {"hash": "8b1ffad016", "white_pixels": 230566, "shape": (512, 683)}, }, { "score": 0.9981, "label": "grass", "mask": {"hash": "9bbdf83d3d", "white_pixels": 119130, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(image, subtask="semantic") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "wall", "mask": {"hash": "897fb20b7f", "white_pixels": 14506, "shape": (512, 683)}, }, { "score": None, "label": "building", "mask": {"hash": "f2a68c63e4", "white_pixels": 125019, "shape": (512, 683)}, }, { "score": None, "label": "sky", "mask": {"hash": "e0ca3a548e", "white_pixels": 135330, "shape": (512, 683)}, }, { "score": None, "label": "tree", "mask": {"hash": "7c9544bcac", "white_pixels": 16263, "shape": (512, 683)}, }, { "score": None, "label": "road, route", "mask": {"hash": "2c7704e491", "white_pixels": 2143, "shape": (512, 683)}, }, { "score": None, "label": "grass", "mask": {"hash": "bf6c2867e0", "white_pixels": 53040, "shape": (512, 683)}, }, { "score": None, "label": "plant", "mask": {"hash": "93c4b7199e", "white_pixels": 3335, "shape": (512, 683)}, }, { "score": None, "label": "house", "mask": {"hash": "93ec419ad5", "white_pixels": 60, "shape": (512, 683)}, }, ], ) def test_save_load(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline( task="image-segmentation", model=model, image_processor=image_processor, ) with tempfile.TemporaryDirectory() as tmpdirname: image_segmenter.save_pretrained(tmpdirname) pipeline(task="image-segmentation", model=tmpdirname)
transformers/tests/pipelines/test_pipelines_image_segmentation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_segmentation.py", "repo_id": "transformers", "token_count": 15990 }
613
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch from transformers.pipelines.pt_utils import KeyDataset if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, dtype="float32", ): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa", dtype=dtype, ) examples = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def run_pipeline_test(self, vqa_pipeline, examples): outputs = vqa_pipeline(examples, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) @require_torch @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2) vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device(f"{torch_device}:0")) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) @slow @require_torch def test_large_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2, ) @slow @require_torch @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device(f"{torch_device}:0")) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "Question: how many cats are there? Answer:" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": "two"}]] * 2) @require_torch def test_small_model_pt_image_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") images = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000004016.png", ] outputs = vqa_pipeline(image=images, question="How many cats are there?", top_k=1) self.assertEqual( outputs, [[{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}]] ) @require_torch def test_small_model_pt_question_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" questions = ["How many cats are there?", "Are there any dogs?"] outputs = vqa_pipeline(image=image, question=questions, top_k=1) self.assertEqual( outputs, [[{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}]] ) @require_torch def test_small_model_pt_both_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") images = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000004016.png", ] questions = ["How many cats are there?", "Are there any dogs?"] outputs = vqa_pipeline(image=images, question=questions, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt_dataset(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") dataset = load_dataset("hf-internal-testing/dummy_image_text_data", split="train[:2]") question = "What's in the image?" outputs = vqa_pipeline(image=KeyDataset(dataset, "image"), question=question, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], )
transformers/tests/pipelines/test_pipelines_visual_question_answering.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_visual_question_answering.py", "repo_id": "transformers", "token_count": 4193 }
614
# Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest import pytest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, set_seed, ) from transformers.models.opt.modeling_opt import OPTAttention from transformers.testing_utils import ( apply_skip_if_not_implemented, backend_empty_cache, backend_torch_accelerator_module, is_accelerate_available, is_bitsandbytes_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu_if_bnb_not_multi_backend_enabled, require_torch_multi_accelerator, slow, torch_device, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc elif model.config.model_type == "llama": return model.model.layers[0].mlp.gate_proj return model.transformer.h[0].mlp.dense_4h_to_h if is_accelerate_available(): from accelerate import PartialState from accelerate.logging import get_logger logger = get_logger(__name__) _ = PartialState() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int, dtype: torch.dtype): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False, dtype=dtype), nn.Linear(rank, module.out_features, bias=False, dtype=dtype), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) if is_bitsandbytes_available(): import bitsandbytes as bnb @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow class BaseMixedInt8Test(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of the family.\n") # Expected values on a A10 EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") MAX_NEW_TOKENS = 10 # Expected values with offload EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer based in") # Expected values on Intel XPU and NV A100 EXPECTED_OUTPUTS.add("Hello my name is Alina. I have been working as a professional") def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) @apply_skip_if_not_implemented class MixedInt8Test(BaseMixedInt8Test): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained(self.model_name, dtype=torch.float16, device_map="auto") self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() backend_empty_cache(torch_device) def test_get_keys_to_not_convert(self): r""" Test the `get_keys_to_not_convert` function. """ from accelerate import init_empty_weights from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) # The order of the keys does not matter, so we sort them before comparing, same for the other tests. self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "transformer.wte"].sort()) model_id = "Salesforce/blip2-opt-2.7b" config = AutoConfig.from_pretrained(model_id, revision="1ef7f63a8f0a144c13fdca8103eb7b4691c74cec") with init_empty_weights(): model = Blip2ForConditionalGeneration(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["language_model.lm_head", "language_model.model.decoder.embed_tokens"].sort(), ) model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") with init_empty_weights(): model = OPTForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "model.decoder.embed_tokens"].sort()) model_id = "FacebookAI/roberta-large" config = AutoConfig.from_pretrained(model_id, revision="716877d372b884cad6d419d828bac6c85b3b18d9") with init_empty_weights(): model = AutoModelForMaskedLM.from_config(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder"].sort(), ) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_original_dtype(self): r""" A simple test to check if the model successfully stores the original dtype """ self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE, delta=1e-5) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "FacebookAI/roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate( input_ids=encoded_input["input_ids"].to(self.model_8bit.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(model_8bit_from_config.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_dequantize(self): r""" Test that loading the model and dequantizing it produce correct results """ bnb_config = BitsAndBytesConfig(load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) model_8bit.dequantize() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit.generate( input_ids=encoded_input["input_ids"].to(model_8bit.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_if_config_and_load_in_8bit(self): r""" Test that loading the model with the config and `load_in_8bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, ) def test_device_and_dtype_assignment(self): r""" Test whether attempting to change the device or cast the dtype of a model after converting it to 8-bit precision will raise an appropriate error. The test ensures that such operations are prohibited on 8-bit models to prevent invalid conversions. """ with self.assertRaises(ValueError): # Tries with `str` self.model_8bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.to(torch.device(torch_device)) with self.assertRaises(ValueError): # Tries to cast the 8-bit model to float32 using `float()` self.model_8bit.float() with self.assertRaises(ValueError): # Tries to cast the 4-bit model to float16 using `half()` self.model_8bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate( input_ids=encoded_input["input_ids"].to(self.model_fp16.device), max_new_tokens=10 ) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_int8_conversion(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(model_from_saved.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_regression(self): r""" Test whether it is possible to serialize a model in 8-bit - using not safetensors """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(model_from_saved.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow class MixedInt8T5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "google-t5/t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() backend_empty_cache(torch_device) def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) def test_inference_with_keep_in_fp32_serialized(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) class MixedInt8ModelClassesTest(BaseMixedInt8Test): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "google-t5/t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) # CausalLM model self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_8bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() backend_empty_cache(torch_device) def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) @apply_skip_if_not_implemented class MixedInt8TestPipeline(BaseMixedInt8Test): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ if hasattr(self, "pipe"): del self.pipe gc.collect() backend_empty_cache(torch_device) def test_pipeline(self): r""" The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything on pipeline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Avoid sampling different outputs set_seed(42) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_accelerator @apply_skip_if_not_implemented class MixedInt8TestMultiGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": 0, "transformer.h.19": 0, "transformer.h.20": 0, "transformer.h.21": 0, "transformer.h.22": 0, "transformer.h.23": 1, "transformer.ln_f": 0, } model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map=device_map ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_torch_multi_accelerator @apply_skip_if_not_implemented class MixedInt8TestCpuGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def check_inference_correctness(self, model): # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertIn(output_text, self.EXPECTED_OUTPUTS) def test_cpu_accelerator_loading_random_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_accelerator_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_accelerator_disk_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) def test_cpu_accelerator_disk_loading_custom_device_map_kwargs(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) @apply_skip_if_not_implemented class MixedInt8TestTraining(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): self.skipTest(reason="This test requires bitsandbytes>=0.37.0") # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) model.train() if torch_device in ["cuda", "xpu"]: self.assertEqual( set(model.hf_device_map.values()), {backend_torch_accelerator_module(torch_device).current_device()} ) else: self.assertTrue(all(param.device.type == "cpu" for param in model.parameters())) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later # cast all non INT8 parameters to fp32 if param.dtype in (torch.float16, torch.bfloat16) and param.__class__.__name__ != "Params4bit": param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if isinstance(module, OPTAttention): module.q_proj = LoRALayer(module.q_proj, rank=16, dtype=model.dtype) module.k_proj = LoRALayer(module.k_proj, rank=16, dtype=model.dtype) module.v_proj = LoRALayer(module.v_proj, rank=16, dtype=model.dtype) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(torch_device) # Step 4: Check if the gradient is not None with torch.autocast(torch_device): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) @apply_skip_if_not_implemented class MixedInt8GPT2Test(MixedInt8Test): model_name = "openai-community/gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a big fan of") EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a fan of the") # Expected values on a A10 EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I am a member of the") # Expected values on Intel CPU EXPECTED_OUTPUTS.add("Hello my name is John Doe. I am a man. I am") EXPECTED_OUTPUTS.add("Hello my name is John, and I'm a writer. I'm") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) class MixedInt8LlamaTest(MixedInt8Test): model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" EXPECTED_RELATIVE_DIFFERENCE = 1.7869331026479096 EXPECTED_OUTPUTS = set() # Expected on Intel XPU EXPECTED_OUTPUTS.add("Hello my name is John Smith and I am a software engineer. I") # Expected on NVIDIA T4 EXPECTED_OUTPUTS.add("Hello my name is John and I am a software engineer. I have") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "Jiqing/TinyLlama-1.1B-Chat-v1.0-bnb-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow @apply_skip_if_not_implemented class Bnb8bitCompile(unittest.TestCase): model_name = "hf-internal-testing/tiny-random-LlamaForCausalLM" input_text = "Hello my name is" def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) @pytest.mark.torch_compile_test def test_generate_compile(self): encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # if nothing is set, compile will be disabled for bnb self.model_8bit.generate( input_ids=encoded_input["input_ids"].to(self.model_8bit.device), max_new_tokens=10, cache_implementation="static", ) with self.assertRaises(Exception): object.__setattr__(self.model_8bit.hf_quantizer, "is_compileable", True) self.model_8bit.generate( input_ids=encoded_input["input_ids"].to(self.model_8bit.device), max_new_tokens=10, cache_implementation="static", )
transformers/tests/quantization/bnb/test_mixed_int8.py/0
{ "file_path": "transformers/tests/quantization/bnb/test_mixed_int8.py", "repo_id": "transformers", "token_count": 18260 }
615
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 from check_copies import convert_to_localized_md, find_code_in_transformers, is_copy_consistent # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. REFERENCE_CODE = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ MOCK_BERT_CODE = """from ...modeling_utils import PreTrainedModel def bert_function(x): return x class BertAttention(nn.Module): def __init__(self, config): super().__init__() class BertModel(BertPreTrainedModel): def __init__(self, config): super().__init__() self.bert = BertEncoder(config) @add_docstring(BERT_DOCSTRING) def forward(self, x): return self.bert(x) """ MOCK_BERT_COPY_CODE = """from ...modeling_utils import PreTrainedModel # Copied from transformers.models.bert.modeling_bert.bert_function def bert_copy_function(x): return x # Copied from transformers.models.bert.modeling_bert.BertAttention class BertCopyAttention(nn.Module): def __init__(self, config): super().__init__() # Copied from transformers.models.bert.modeling_bert.BertModel with Bert->BertCopy all-casing class BertCopyModel(BertCopyPreTrainedModel): def __init__(self, config): super().__init__() self.bertcopy = BertCopyEncoder(config) @add_docstring(BERTCOPY_DOCSTRING) def forward(self, x): return self.bertcopy(x) """ MOCK_DUMMY_BERT_CODE_MATCH = """ class BertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def existing_common(self, c): return 4 def existing_diff_to_be_ignored(self, c): return 9 """ MOCK_DUMMY_ROBERTA_CODE_MATCH = """ # Copied from transformers.models.dummy_bert_match.modeling_dummy_bert_match.BertDummyModel with BertDummy->RobertaBertDummy class RobertaBertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Ignore copy def only_in_roberta_to_be_ignored(self, c): return 3 # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def existing_common(self, c): return 4 # Ignore copy def existing_diff_to_be_ignored(self, c): return 6 """ MOCK_DUMMY_BERT_CODE_NO_MATCH = """ class BertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def only_in_bert(self, c): return 7 def existing_common(self, c): return 4 def existing_diff_not_ignored(self, c): return 8 def existing_diff_to_be_ignored(self, c): return 9 """ MOCK_DUMMY_ROBERTA_CODE_NO_MATCH = """ # Copied from transformers.models.dummy_bert_no_match.modeling_dummy_bert_no_match.BertDummyModel with BertDummy->RobertaBertDummy class RobertaBertDummyModel: attr_1 = 1 attr_2 = 3 def __init__(self, a=1, b=2): self.a = a self.b = b # Ignore copy def only_in_roberta_to_be_ignored(self, c): return 3 # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def only_in_roberta_not_ignored(self, c): return 2 def existing_common(self, c): return 4 def existing_diff_not_ignored(self, c): return 5 # Ignore copy def existing_diff_to_be_ignored(self, c): return 6 """ EXPECTED_REPLACED_CODE = """ # Copied from transformers.models.dummy_bert_no_match.modeling_dummy_bert_no_match.BertDummyModel with BertDummy->RobertaBertDummy class RobertaBertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def only_in_bert(self, c): return 7 def existing_common(self, c): return 4 def existing_diff_not_ignored(self, c): return 8 # Ignore copy def existing_diff_to_be_ignored(self, c): return 6 # Ignore copy def only_in_roberta_to_be_ignored(self, c): return 3 """ def replace_in_file(filename, old, new): with open(filename, encoding="utf-8") as f: content = f.read() content = content.replace(old, new) with open(filename, "w", encoding="utf-8", newline="\n") as f: f.write(content) def create_tmp_repo(tmp_dir): """ Creates a mock repository in a temporary folder for testing. """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) models = { "bert": MOCK_BERT_CODE, "bertcopy": MOCK_BERT_COPY_CODE, "dummy_bert_match": MOCK_DUMMY_BERT_CODE_MATCH, "dummy_roberta_match": MOCK_DUMMY_ROBERTA_CODE_MATCH, "dummy_bert_no_match": MOCK_DUMMY_BERT_CODE_NO_MATCH, "dummy_roberta_no_match": MOCK_DUMMY_ROBERTA_CODE_NO_MATCH, } for model, code in models.items(): model_subdir = model_dir / model model_subdir.mkdir(exist_ok=True) with open(model_subdir / f"modeling_{model}.py", "w", encoding="utf-8", newline="\n") as f: f.write(code) @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `check_copies` to use a different location for the repo. """ old_repo_path = check_copies.REPO_PATH old_doc_path = check_copies.PATH_TO_DOCS old_transformer_path = check_copies.TRANSFORMERS_PATH repo_path = Path(new_folder).resolve() check_copies.REPO_PATH = str(repo_path) check_copies.PATH_TO_DOCS = str(repo_path / "docs" / "source" / "en") check_copies.TRANSFORMERS_PATH = str(repo_path / "src" / "transformers") try: yield finally: check_copies.REPO_PATH = old_repo_path check_copies.PATH_TO_DOCS = old_doc_path check_copies.TRANSFORMERS_PATH = old_transformer_path class CopyCheckTester(unittest.TestCase): def test_find_code_in_transformers(self): with tempfile.TemporaryDirectory() as tmp_folder: create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): code = find_code_in_transformers("models.bert.modeling_bert.BertAttention") reference_code = ( "class BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n" ) self.assertEqual(code, reference_code) def test_is_copy_consistent(self): path_to_check = ["src", "transformers", "models", "bertcopy", "modeling_bertcopy.py"] with tempfile.TemporaryDirectory() as tmp_folder: # Base check create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, []) # Base check with an inconsistency create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) replace_in_file(file_to_check, "self.bertcopy(x)", "self.bert(x)") diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, [["models.bert.modeling_bert.BertModel", 22]]) _ = is_copy_consistent(file_to_check, overwrite=True) with open(file_to_check, encoding="utf-8") as f: self.assertEqual(f.read(), MOCK_BERT_COPY_CODE) def test_is_copy_consistent_with_ignored_match(self): path_to_check = ["src", "transformers", "models", "dummy_roberta_match", "modeling_dummy_roberta_match.py"] with tempfile.TemporaryDirectory() as tmp_folder: # Base check create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, []) def test_is_copy_consistent_with_ignored_no_match(self): path_to_check = [ "src", "transformers", "models", "dummy_roberta_no_match", "modeling_dummy_roberta_no_match.py", ] with tempfile.TemporaryDirectory() as tmp_folder: # Base check with an inconsistency create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) diffs = is_copy_consistent(file_to_check) # line 6: `attr_2 = 3` in `MOCK_DUMMY_ROBERTA_CODE_NO_MATCH`. # (which has a leading `\n`.) self.assertEqual( diffs, [["models.dummy_bert_no_match.modeling_dummy_bert_no_match.BertDummyModel", 6]] ) _ = is_copy_consistent(file_to_check, overwrite=True) with open(file_to_check, encoding="utf-8") as f: self.assertEqual(f.read(), EXPECTED_REPLACED_CODE) def test_convert_to_localized_md(self): localized_readme = check_copies.LOCALIZED_READMES["README_zh-hans.md"] md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://huggingface.co/papers/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://huggingface.co/papers/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://huggingface.co/papers/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) localized_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://huggingface.co/papers/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) converted_md_list_sample = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://huggingface.co/papers/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://huggingface.co/papers/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://huggingface.co/papers/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) num_models_equal, converted_md_list = convert_to_localized_md( md_list, localized_md_list, localized_readme["format_model_list"] ) self.assertFalse(num_models_equal) self.assertEqual(converted_md_list, converted_md_list_sample) num_models_equal, converted_md_list = convert_to_localized_md( md_list, converted_md_list, localized_readme["format_model_list"] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(num_models_equal) link_changed_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://huggingface.co/papers/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) link_unchanged_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://huggingface.co/papers/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) converted_md_list_sample = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://huggingface.co/papers/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) num_models_equal, converted_md_list = convert_to_localized_md( link_changed_md_list, link_unchanged_md_list, localized_readme["format_model_list"] ) # Check if the model link is synchronized. self.assertEqual(converted_md_list, converted_md_list_sample)
transformers/tests/repo_utils/test_check_copies.py/0
{ "file_path": "transformers/tests/repo_utils/test_check_copies.py", "repo_id": "transformers", "token_count": 7782 }
616
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import tempfile from transformers.testing_utils import require_torch, torch_device from transformers.utils.backbone_utils import BackboneType @require_torch class BackboneTesterMixin: all_model_classes = () has_attentions = True def test_config(self): config_class = self.config_class # test default config config = config_class() self.assertIsNotNone(config) num_stages = len(config.depths) if hasattr(config, "depths") else config.num_hidden_layers expected_stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_stages + 1)] self.assertEqual(config.stage_names, expected_stage_names) self.assertTrue(set(config.out_features).issubset(set(config.stage_names))) # Test out_features and out_indices are correctly set # out_features and out_indices both None config = config_class(out_features=None, out_indices=None) self.assertEqual(config.out_features, [config.stage_names[-1]]) self.assertEqual(config.out_indices, [len(config.stage_names) - 1]) # out_features and out_indices both set config = config_class(out_features=["stem", "stage1"], out_indices=[0, 1]) self.assertEqual(config.out_features, ["stem", "stage1"]) self.assertEqual(config.out_indices, [0, 1]) # Only out_features set config = config_class(out_features=["stage1", "stage3"]) self.assertEqual(config.out_features, ["stage1", "stage3"]) self.assertEqual(config.out_indices, [1, 3]) # Only out_indices set config = config_class(out_indices=[0, 2]) self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]]) self.assertEqual(config.out_indices, [0, 2]) # Error raised when out_indices do not correspond to out_features with self.assertRaises(ValueError): config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2]) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_config_save_pretrained(self): config_class = self.config_class config_first = config_class(out_indices=[0, 1, 2, 3]) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) self.assertEqual(config_second.to_dict(), config_first.to_dict()) def test_channels(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertEqual(len(model.channels), len(config.out_features)) num_features = model.num_features out_indices = [config.stage_names.index(feat) for feat in config.out_features] out_channels = [num_features[idx] for idx in out_indices] self.assertListEqual(model.channels, out_channels) new_config = copy.deepcopy(config) new_config.out_features = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) new_config = copy.deepcopy(config) new_config.out_indices = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_features)) self.assertEqual(len(model.channels), len(config.out_features)) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_features = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) def test_backbone_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for backbone_class in self.all_model_classes: backbone = backbone_class(config) self.assertTrue(hasattr(backbone, "backbone_type")) self.assertTrue(hasattr(backbone, "stage_names")) self.assertTrue(hasattr(backbone, "num_features")) self.assertTrue(hasattr(backbone, "out_indices")) self.assertTrue(hasattr(backbone, "out_features")) self.assertTrue(hasattr(backbone, "out_feature_channels")) self.assertTrue(hasattr(backbone, "channels")) self.assertIsInstance(backbone.backbone_type, BackboneType) # Verify num_features has been initialized in the backbone init self.assertIsNotNone(backbone.num_features) self.assertTrue(len(backbone.channels) == len(backbone.out_indices)) self.assertTrue(len(backbone.stage_names) == len(backbone.num_features)) self.assertTrue(len(backbone.channels) <= len(backbone.num_features)) self.assertTrue(len(backbone.out_feature_channels) == len(backbone.stage_names)) def test_backbone_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() batch_size = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: backbone = backbone_class(config) backbone.to(torch_device) backbone.eval() outputs = backbone(**inputs_dict) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps, tuple) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True outputs = backbone(**inputs_dict, output_hidden_states=True) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states), len(backbone.stage_names)) for hidden_state, n_channels in zip(outputs.hidden_states, backbone.channels): self.assertTrue(hidden_state.shape[:2], (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: outputs = backbone(**inputs_dict, output_attentions=True) self.assertIsNotNone(outputs.attentions) def test_backbone_stage_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() batch_size = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: config.out_indices = [-2, -1] backbone = backbone_class(config) backbone.to(torch_device) backbone.eval() outputs = backbone(**inputs_dict) # Test number of feature maps returned self.assertIsInstance(outputs.feature_maps, tuple) self.assertTrue(len(outputs.feature_maps) == 2) # Order of channels returned is same as order of channels iterating over stage names channels_from_stage_names = [ backbone.out_feature_channels[name] for name in backbone.stage_names if name in backbone.out_features ] self.assertEqual(backbone.channels, channels_from_stage_names) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels))
transformers/tests/test_backbone_common.py/0
{ "file_path": "transformers/tests/test_backbone_common.py", "repo_id": "transformers", "token_count": 4368 }
617
# Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ruff: isort: skip_file """ import os import pickle import tempfile import unittest from typing import Callable, Optional import numpy as np from transformers import ( AutoTokenizer, BatchEncoding, BertTokenizer, BertTokenizerFast, LlamaTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, TensorType, TokenSpan, is_tokenizers_available, ) from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import ( CaptureStderr, require_sentencepiece, require_tokenizers, require_torch, slow, ) if is_tokenizers_available(): import tokenizers from tokenizers import Tokenizer from tokenizers.models import WordPiece class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) # Ensure is_fast is correctly restored self.assertEqual(be_restored.is_fast, be_original.is_fast) # Ensure encodings are potentially correctly restored if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) # Ensure the keys are the same for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") # Python no tensor with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_decoding_single_token(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast]: with self.subTest(f"{tokenizer_class}"): tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased") token_id = 2300 decoded_flat = tokenizer.decode(token_id) decoded_list = tokenizer.decode([token_id]) self.assertEqual(decoded_flat, "Force") self.assertEqual(decoded_list, "Force") token_id = 0 decoded_flat = tokenizer.decode(token_id) decoded_list = tokenizer.decode([token_id]) self.assertEqual(decoded_flat, "[PAD]") self.assertEqual(decoded_list, "[PAD]") last_item_id = tokenizer.vocab_size - 1 decoded_flat = tokenizer.decode(last_item_id) decoded_list = tokenizer.decode([last_item_id]) self.assertEqual(decoded_flat, "##:") self.assertEqual(decoded_list, "##:") def test_extra_special_tokens_multimodal(self): special_tokens_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] llama_tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b") llama_tokenizer.extra_special_tokens = { "boi_token": "<image_start>", "eoi_token": "<image_end>", "image_token": "<image>", } self.assertListEqual(llama_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, special_tokens_list) with tempfile.TemporaryDirectory() as tmpdirname: llama_tokenizer.save_pretrained(tmpdirname) # load back and check we have extra special tokens set loaded_tokenizer = LlamaTokenizerFast.from_pretrained(tmpdirname) multimodal_special_tokens_list = special_tokens_list + ["boi_token", "eoi_token", "image_token"] self.assertListEqual(loaded_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, multimodal_special_tokens_list) # We set an image_token_id before, so we can get an "image_token" as str that matches the id self.assertTrue(loaded_tokenizer.image_token == "<image>") self.assertTrue(loaded_tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>")) # save one more time and make sure the image token can get loaded back with tempfile.TemporaryDirectory() as tmpdirname: loaded_tokenizer.save_pretrained(tmpdirname) loaded_tokenizer_with_extra_tokens = LlamaTokenizerFast.from_pretrained(tmpdirname) self.assertTrue(loaded_tokenizer_with_extra_tokens.image_token == "<image>") # test that we can also indicate extra tokens during load time extra_special_tokens = { "boi_token": "<image_start>", "eoi_token": "<image_end>", "image_token": "<image>", } tokenizer = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b", extra_special_tokens=extra_special_tokens ) self.assertTrue(tokenizer.image_token == "<image>") self.assertTrue(tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>")) @require_tokenizers def test_decoding_skip_special_tokens(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast]: with self.subTest(f"{tokenizer_class}"): tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased") tokenizer.add_tokens(["ஐ"], special_tokens=True) # test special token with other tokens, skip the special tokens sentence = "This is a beautiful flower ஐ" ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=True) self.assertEqual(decoded_sent, "This is a beautiful flower") # test special token with other tokens, do not skip the special tokens ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=False) self.assertEqual(decoded_sent, "[CLS] This is a beautiful flower ஐ [SEP]") # test special token stand alone, skip the special tokens sentence = "ஐ" ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=True) self.assertEqual(decoded_sent, "") # test special token stand alone, do not skip the special tokens ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=False) self.assertEqual(decoded_sent, "[CLS] ஐ [SEP]") # test single special token alone, skip pad_id = 0 decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=True) self.assertEqual(decoded_sent, "") # test single special token alone, do not skip decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=False) self.assertEqual(decoded_sent, "[PAD]") @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_instantiation_from_tokenizers(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer) @require_tokenizers def test_instantiation_from_tokenizers_json_file(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) with tempfile.TemporaryDirectory() as tmpdirname: bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json")) PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json")) def test_len_tokenizer(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast]: with self.subTest(f"{tokenizer_class}"): tokenizer = tokenizer_class.from_pretrained("bert-base-uncased") added_tokens_size = len(tokenizer.added_tokens_decoder) self.assertEqual(len(tokenizer), tokenizer.vocab_size) tokenizer.add_tokens(["<test_token>"]) self.assertEqual(len(tokenizer), tokenizer.vocab_size + 1) self.assertEqual(len(tokenizer.added_tokens_decoder), added_tokens_size + 1) self.assertEqual(len(tokenizer.added_tokens_encoder), added_tokens_size + 1) @require_sentencepiece def test_sentencepiece_cohabitation(self): from sentencepiece import sentencepiece_model_pb2 as _original_protobuf # noqa: F401 from transformers.convert_slow_tokenizer import import_protobuf # noqa: F401 # Now this will try to import sentencepiece_model_pb2_new.py. This should not fail even if the protobuf # was already imported. import_protobuf() def test_training_new_tokenizer_edge_cases(self): _tokenizer = Tokenizer(tokenizers.models.BPE(vocab={"a": 1, "b": 2, "ab": 3}, merges=[("a", "b")])) _tokenizer.pre_tokenizer = None tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer) toy_text_iterator = ("a" for _ in range(1000)) tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) _tokenizer.normalizer = None tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer) toy_text_iterator = ("a" for _ in range(1000)) tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) _tokenizer.post_processor = None tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer) toy_text_iterator = ("a" for _ in range(1000)) tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) def test_encode_message(self): tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") conversation = [ {"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": "Hey there, how are you?"}, {"role": "assistant", "content": "Thank you for asking, I am doing well"}, {"role": "user", "content": "What's the weather like today?"}, {"role": "assistant", "content": "Today the weather is nice"}, ] # First, test the default case, where we encode the whole conversation at once whole_conversation_tokens = tokenizer.apply_chat_template(conversation, tokenize=True) # Now, test the message-by-message encoding tokens = [] for i, message in enumerate(conversation): tokens += tokenizer.encode_message_with_chat_template(message, conversation_history=conversation[:i]) self.assertEqual(whole_conversation_tokens, tokens) def test_encode_message_raises_on_add_generation_prompt(self): tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") conversation = [ {"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": "Hey there, how are you?"}, ] with self.assertRaises(ValueError): tokenizer.encode_message_with_chat_template(conversation[0], add_generation_prompt=True)
transformers/tests/tokenization/test_tokenization_utils.py/0
{ "file_path": "transformers/tests/tokenization/test_tokenization_utils.py", "repo_id": "transformers", "token_count": 8251 }
618
import unittest from transformers.testing_utils import Expectations class ExpectationsTest(unittest.TestCase): def test_expectations(self): # We use the expectations below to make sure the right expectations are found for the right devices. # Each value is just a unique ID. expectations = Expectations( { (None, None): 1, ("cuda", 8): 2, ("cuda", 7): 3, ("rocm", 8): 4, ("rocm", None): 5, ("cpu", None): 6, ("xpu", 3): 7, } ) def check(expected_id, device_prop): found_id = expectations.find_expectation(device_prop) assert found_id == expected_id, f"Expected {expected_id} for {device_prop}, found {found_id}" # npu has no matches so should find default expectation check(1, ("npu", None, None)) check(7, ("xpu", 3, None)) check(2, ("cuda", 8, None)) check(3, ("cuda", 7, None)) check(4, ("rocm", 9, None)) check(4, ("rocm", None, None)) check(2, ("cuda", 2, None)) # We also test that if there is no default excpectation and no match is found, a ValueError is raised. expectations = Expectations({("cuda", 8): 1}) with self.assertRaises(ValueError): expectations.find_expectation(("xpu", None))
transformers/tests/utils/test_expectations.py/0
{ "file_path": "transformers/tests/utils/test_expectations.py", "repo_id": "transformers", "token_count": 648 }
619
# Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import glob import json import os import os.path import subprocess import sys import tempfile import textwrap import threading import unittest import unittest.mock as mock import uuid import warnings from pathlib import Path import pytest import requests from huggingface_hub import HfApi, HfFolder from parameterized import parameterized from pytest import mark from requests.exceptions import HTTPError from transformers import ( AutoConfig, AutoModel, AutoModelForImageClassification, AutoModelForSequenceClassification, CLIPTextModelWithProjection, DynamicCache, LlavaForConditionalGeneration, MistralForCausalLM, OwlViTForObjectDetection, PretrainedConfig, is_torch_available, logging, ) from transformers.modeling_flash_attention_utils import is_flash_attn_available from transformers.testing_utils import ( TOKEN, CaptureLogger, LoggingLevel, TemporaryHubRepo, TestCasePlus, hub_retry, is_staging_test, require_accelerate, require_non_hpu, require_read_token, require_safetensors, require_torch, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, check_torch_load_is_safe, ) from transformers.utils.import_utils import ( is_flash_attn_2_available, is_flash_attn_3_available, is_torch_npu_available, ) sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from test_module.custom_modeling import CustomModel from torch import nn from transformers import ( AutoModelForCausalLM, AutoTokenizer, BertConfig, BertModel, CLIPTextModel, GenerationMixin, PreTrainedModel, T5Config, T5ForConditionalGeneration, ) from transformers.modeling_attn_mask_utils import ( AttentionMaskConverter, _create_4d_causal_attention_mask, _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, ) from transformers.modeling_utils import ( _find_disjoint, _find_identical, ) from transformers.pytorch_utils import isin_mps_friendly # Fake pretrained models for tests class BaseModel(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) class BaseModelWithTiedWeights(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) def tie_weights(self): self.linear_2.weight = self.linear.weight class ModelWithHead(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) # linear is a common name between Base and Head on purpose. self.linear = nn.Linear(5, 5) self.linear2 = nn.Linear(5, 5) def forward(self, x): return self.linear2(self.linear(self.base(x))) class ModelWithDirectParam(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) # direct params and submodules is helpful for testing offloading logic self.weight = nn.Parameter(torch.rand((5, 5))) self.base = BaseModel(config) def forward(self, x): return self.base(x @ self.weight.T) class ModelWithDirectParamSubmodule(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.submodule = ModelWithDirectParam(config) # needed so model can have at least one module on accelerator self.linear = nn.Linear(5, 5) def forward(self, x): return self.linear(self.submodule(x)) class ModelWithHeadAndTiedWeights(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) self.decoder = nn.Linear(5, 5) def forward(self, x): return self.decoder(self.base(x)) def tie_weights(self): self.decoder.weight = self.base.linear.weight class Prepare4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _prepare_4d_causal_attention_mask( None, (batch_size, seq_length), inputs_embeds, past_key_values_length ) return attention_mask class Create4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _create_4d_causal_attention_mask( (batch_size, seq_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) return attention_mask class Prepare4dAttentionMaskModel(nn.Module): def forward(self, mask, inputs_embeds): attention_mask = _prepare_4d_attention_mask(mask, dtype=inputs_embeds.dtype) return attention_mask class TestOffline(unittest.TestCase): def test_offline(self): # Ugly setup with monkeypatches, amending env vars here is too late as libs have already been imported from huggingface_hub import constants from transformers.utils import hub offlfine_env = hub._is_offline_mode hub_cache_env = constants.HF_HUB_CACHE hub_cache_env1 = constants.HUGGINGFACE_HUB_CACHE default_cache = constants.default_cache_path transformers_cache = hub.TRANSFORMERS_CACHE try: hub._is_offline_mode = True with tempfile.TemporaryDirectory() as tmpdir: LOG.info("Temporary cache dir %s", tmpdir) constants.HF_HUB_CACHE = tmpdir constants.HUGGINGFACE_HUB_CACHE = tmpdir constants.default_cache_path = tmpdir hub.TRANSFORMERS_CACHE = tmpdir # First offline load should fail try: AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None ) except OSError: LOG.info("Loading model %s in offline mode failed as expected", TINY_IMAGE_CLASSIF) else: self.fail(f"Loading model {TINY_IMAGE_CLASSIF} in offline mode should fail") # Download model -> Huggingface Hub not concerned by our offline mode LOG.info("Downloading %s for offline tests", TINY_IMAGE_CLASSIF) hub_api = HfApi() local_dir = hub_api.snapshot_download(TINY_IMAGE_CLASSIF, cache_dir=tmpdir) LOG.info("Model %s downloaded in %s", TINY_IMAGE_CLASSIF, local_dir) AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None ) finally: # Tear down: reset env as it was before calling this test hub._is_offline_mode = offlfine_env constants.HF_HUB_CACHE = hub_cache_env constants.HUGGINGFACE_HUB_CACHE = hub_cache_env1 constants.default_cache_path = default_cache hub.TRANSFORMERS_CACHE = transformers_cache def test_local_files_only(self): # Ugly setup with monkeypatches, amending env vars here is too late as libs have already been imported from huggingface_hub import constants from transformers.utils import hub hub_cache_env = constants.HF_HUB_CACHE hub_cache_env1 = constants.HUGGINGFACE_HUB_CACHE default_cache = constants.default_cache_path transformers_cache = hub.TRANSFORMERS_CACHE try: with tempfile.TemporaryDirectory() as tmpdir: LOG.info("Temporary cache dir %s", tmpdir) constants.HF_HUB_CACHE = tmpdir constants.HUGGINGFACE_HUB_CACHE = tmpdir constants.default_cache_path = tmpdir hub.TRANSFORMERS_CACHE = tmpdir try: AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None, local_files_only=True ) except OSError: LOG.info("Loading model %s in offline mode failed as expected", TINY_IMAGE_CLASSIF) else: self.fail(f"Loading model {TINY_IMAGE_CLASSIF} in offline mode should fail") LOG.info("Downloading %s for offline tests", TINY_IMAGE_CLASSIF) hub_api = HfApi() local_dir = hub_api.snapshot_download(TINY_IMAGE_CLASSIF, cache_dir=tmpdir) LOG.info("Model %s downloaded in %s", TINY_IMAGE_CLASSIF, local_dir) AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None, local_files_only=True ) finally: # Tear down: reset env as it was before calling this test constants.HF_HUB_CACHE = hub_cache_env constants.HUGGINGFACE_HUB_CACHE = hub_cache_env1 constants.default_cache_path = default_cache hub.TRANSFORMERS_CACHE = transformers_cache # Need to be serializable, which means they cannot be in a test class method class TestGammaBetaNorm(torch.nn.Module): def __init__(self): super().__init__() self.gamma = torch.nn.Parameter(torch.ones(1)) self.beta = torch.nn.Parameter(torch.zeros(1)) def forward(self): return self.gamma.sum() + self.beta.sum() class TestModelGammaBeta(PreTrainedModel): def __init__(self, config): super().__init__(config) self.LayerNorm = TestGammaBetaNorm() self.post_init() def forward(self): return self.LayerNorm() TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" TINY_MISTRAL = "hf-internal-testing/tiny-random-MistralForCausalLM" TINY_IMAGE_CLASSIF = "hf-internal-testing/tiny-random-SiglipForImageClassification" TINY_LLAVA = "hf-internal-testing/tiny-random-LlavaForConditionalGeneration" LOG = logging.get_logger(__name__) def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch class ModelUtilsTest(TestCasePlus): def setUp(self): self.old_dtype = torch.get_default_dtype() super().setUp() def tearDown(self): torch.set_default_dtype(self.old_dtype) super().tearDown() def test_hub_retry(self): @hub_retry(max_attempts=2) def test_func(): # First attempt will fail with a connection error if not hasattr(test_func, "attempt"): test_func.attempt = 1 raise requests.exceptions.ConnectionError("Connection failed") # Second attempt will succeed return True self.assertTrue(test_func()) @slow def test_model_from_pretrained(self): model_name = "google-bert/bert-base-uncased" config = BertConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, PretrainedConfig) model = BertModel.from_pretrained(model_name) model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, PreTrainedModel) self.assertEqual(len(loading_info["missing_keys"]), 0) self.assertEqual(len(loading_info["unexpected_keys"]), 8) self.assertEqual(len(loading_info["mismatched_keys"]), 0) self.assertEqual(len(loading_info["error_msgs"]), 0) config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) # Not sure this is the intended behavior. TODO fix Lysandre & Thom config.name_or_path = model_name model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) self.assertEqual(model.config.output_hidden_states, True) self.assertEqual(model.config, config) def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_manually_shared_disjointed_tensors_optimum(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) # Let's fuse qkv attn = model.encoder.layer[0].attention.self q = attn.query.weight k = attn.key.weight v = attn.value.weight # Force some shared storage qkv = torch.stack([q, k, v], dim=0) attn.query.weight = torch.nn.Parameter(qkv[0]) attn.key.weight = torch.nn.Parameter(qkv[1]) attn.value.weight = torch.nn.Parameter(qkv[2]) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_with_different_pretrained_model_name(self): model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertIsNotNone(model) logger = logging.get_logger("transformers.configuration_utils") with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: BertModel.from_pretrained(TINY_T5) self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) @require_accelerate def test_model_from_pretrained_with_none_quantization_config(self): # Needs a device_map for to enter the low_cpu_mem branch. We also load AutoModelForSequenceClassification # deliberately to enter the missing keys branch. model = AutoModelForSequenceClassification.from_pretrained( TINY_MISTRAL, device_map="auto", quantization_config=None ) self.assertIsNotNone(model) def test_model_from_config_dtype(self): # test that the model can be instantiated with dtype of user's choice - as long as it's a # float dtype. To make it happen config.dtype needs to be set before instantiating the # model from the config object. config = T5Config.from_pretrained(TINY_T5) model = AutoModel.from_config(config) # XXX: isn't supported # model = T5ForConditionalGeneration.from_config(config) self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_config(config, dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_config(config, dtype=torch.int64) def test_model_from_config_dtype_str(self): # test that from_pretrained works with dtype being strings like "float32" for PyTorch backend model = AutoModel.from_pretrained(TINY_T5, dtype="float32") self.assertEqual(model.dtype, torch.float32) self.assertIsInstance(model.config.dtype, torch.dtype) model = AutoModel.from_pretrained(TINY_T5, dtype="float16") self.assertEqual(model.dtype, torch.float16) self.assertIsInstance(model.config.dtype, torch.dtype) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_pretrained(TINY_T5, dtype="int64") def test_model_from_config_dtype_composite(self): """ Test that from_pretrained works with dtype being as a dict per each sub-config in composite config Tiny-Llava has saved auto dtype as `torch.float32` for all modules. """ # Load without dtype specified model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA) self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.float32) self.assertIsInstance(model.config.dtype, torch.dtype) # should be able to set dtype as a simple string and the model loads it correctly model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, dtype="float32") self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.float32) self.assertIsInstance(model.config.dtype, torch.dtype) model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, dtype=torch.float16) self.assertEqual(model.language_model.dtype, torch.float16) self.assertEqual(model.vision_tower.dtype, torch.float16) self.assertIsInstance(model.config.dtype, torch.dtype) # should be able to set dtype as a dict for each sub-config model = LlavaForConditionalGeneration.from_pretrained( TINY_LLAVA, dtype={"text_config": "float32", "vision_config": "float16", "": "bfloat16"} ) self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.float16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.bfloat16) self.assertIsInstance(model.config.dtype, torch.dtype) # should be able to set the values as torch.dtype (not str) model = LlavaForConditionalGeneration.from_pretrained( TINY_LLAVA, dtype={"text_config": torch.float32, "vision_config": torch.float16, "": torch.bfloat16} ) self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.float16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.bfloat16) self.assertIsInstance(model.config.dtype, torch.dtype) # should be able to set the values in configs directly and pass it to `from_pretrained` config = copy.deepcopy(model.config) config.text_config.dtype = torch.float32 config.vision_config.dtype = torch.bfloat16 config.dtype = torch.float16 model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, config=config, dtype="auto") self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.bfloat16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.float16) self.assertIsInstance(model.config.dtype, torch.dtype) # but if the model has `_keep_in_fp32_modules` then those modules should be in fp32 no matter what LlavaForConditionalGeneration._keep_in_fp32_modules = ["multi_modal_projector"] model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, config=config, dtype="auto") self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.bfloat16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.float32) self.assertIsInstance(model.config.dtype, torch.dtype) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, dtype="int64") model = LlavaForConditionalGeneration.from_pretrained( TINY_LLAVA, dtype={"text_config": "float32", "vision_config": "int64", "": "float16"} ) def test_model_from_pretrained_dtype(self): # test that the model can be instantiated with dtype of either # 1. explicit from_pretrained's dtype argument # 2. via autodiscovery by looking at model weights (dtype="auto") # so if a model.half() was saved, we want it to be instantiated as such. # # test an explicit model class, but also AutoModel separately as the latter goes through a different code path model_path = self.get_auto_remove_tmp_dir() # baseline - we know TINY_T5 is fp32 model model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertEqual(model.dtype, torch.float32) def remove_dtype(model_path): file = f"{model_path}/config.json" with open(file, encoding="utf-8") as f: s = json.load(f) s.pop("dtype") with open(file, "w", encoding="utf-8") as f: json.dump(s, f) # test the default fp32 save_pretrained => from_pretrained cycle model.save_pretrained(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path) self.assertEqual(model.dtype, torch.float32) # 1. test dtype="auto" via `config.dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, dtype="auto") self.assertEqual(model.dtype, torch.float32) # 2. test dtype="auto" via auto-derivation # now remove the dtype entry from config.json and try "auto" again which should # perform auto-derivation from weights remove_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, dtype="auto") self.assertEqual(model.dtype, torch.float32) # test forced loading in fp16 (even though the weights are in fp32) model = T5ForConditionalGeneration.from_pretrained(model_path, dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with auto-detection model = model.half() model.save_pretrained(model_path) # 1. test dtype="auto" via `config.dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, dtype="auto") self.assertEqual(model.config.dtype, torch.float16) self.assertEqual(model.dtype, torch.float16) # tests `config.dtype` saving with open(f"{model_path}/config.json") as f: config_dict = json.load(f) self.assertEqual(config_dict["dtype"], "float16") # 2. test dtype="auto" via auto-derivation # now same with using config info remove_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, dtype="auto") self.assertEqual(model.dtype, torch.float16) # 3. now retest that AutoModel behaves the same wrt dtype="auto" as T5ForConditionalGeneration model = AutoModel.from_pretrained(model_path, dtype="auto") self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with the explicit fp16 model = T5ForConditionalGeneration.from_pretrained(model_path, dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test AutoModel separately as it goes through a different path # test auto-detection - as currently TINY_T5 doesn't have dtype entry model = AutoModel.from_pretrained(TINY_T5, dtype="auto") # test that the config object didn't get polluted with dtype="auto" # there was a bug that after this call we ended up with config.dtype=="auto" self.assertNotEqual(model.config.dtype, "auto") # now test the outcome self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_pretrained(TINY_T5, dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test model whose first param is not of a floating type, but int model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, dtype="auto") self.assertEqual(model.dtype, torch.float32) # test model that init the model with _from_config model = CLIPTextModelWithProjection.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", dtype=torch.bfloat16, ) self.assertEqual(model.dtype, torch.bfloat16) def test_model_from_pretrained_attn_implementation(self): # test that the model can be instantiated with attn_implementation of either # 1. explicit from_pretrained's attn_implementation argument # 2. explicit from_pretrained's attn_implementation argument with a config argument attn_implementation_available = ["eager", "sdpa"] if is_flash_attn_available(): attn_implementation_available.append("flash_attention_2") if is_flash_attn_3_available(): attn_implementation_available.append("flash_attention_3") for requested_attn_implementation in attn_implementation_available: model = AutoModelForCausalLM.from_pretrained( TINY_MISTRAL, attn_implementation=requested_attn_implementation ) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) config = AutoConfig.from_pretrained(TINY_MISTRAL) model = AutoModelForCausalLM.from_pretrained( TINY_MISTRAL, config=config, attn_implementation=requested_attn_implementation ) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) def test_model_from_config_attn_implementation(self): # test that the model can be instantiated with attn_implementation of either # 1. config created with explicit attn_implementatation and from_config # 2. explicit from_config's attn_implementation argument with a config argument # 3. config created with explicit attn_implementatation and from_config overriding with explicit attn_implementation argument attn_implementation_available = ["eager", "sdpa"] if is_flash_attn_available(): attn_implementation_available.append("flash_attention_2") if is_flash_attn_3_available(): attn_implementation_available.append("flash_attention_3") for requested_attn_implementation in attn_implementation_available: config = AutoConfig.from_pretrained(TINY_MISTRAL, attn_implementation=requested_attn_implementation) # Ensure the config was set correctly self.assertEqual(config._attn_implementation, requested_attn_implementation) model = AutoModelForCausalLM.from_config(config) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) config = AutoConfig.from_pretrained(TINY_MISTRAL) # When the config is not set, the default is "eager" self.assertEqual(config._attn_implementation, None) model = AutoModelForCausalLM.from_config(config=config, attn_implementation=requested_attn_implementation) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) # Set a nonsense attn_implementation in the config, which should be overridden by the explicit argument config = AutoConfig.from_pretrained(TINY_MISTRAL, attn_implementation="foo-bar-baz") self.assertEqual(config._attn_implementation, "foo-bar-baz") model = AutoModelForCausalLM.from_config(config=config, attn_implementation=requested_attn_implementation) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) def test_checkpoint_sharding_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["50kB", "100kB", "200kB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=False) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".bin"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: check_torch_load_is_safe() state_dict = torch.load(shard_file, weights_only=True) self.assertEqual(len(state_dict), 1) # Check the index and the shard files found match with open(index_file, encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_sharding_from_hub(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.parameters(), ref_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_variant_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_variant_local_sharded_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["bin"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_checkpoint_variant_local_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True) weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_checkpoint_variant_local_sharded_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["safetensors"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_loading_only_safetensors_available(self): # Test that the loading behaviour is as expected when only safetensor checkpoints are available # - We can load the model with use_safetensors=True # - We can load the model without specifying use_safetensors i.e. we search for the available checkpoint, # preferring safetensors # - We cannot load the model with use_safetensors=False model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) for i in range(1, 5): weights_name = f"model-0000{i}-of-00005" + ".safetensors" weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) # Setting use_safetensors=False should raise an error as the checkpoint was saved with safetensors=True with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir, use_safetensors=False) # We can load the model with use_safetensors=True new_model = BertModel.from_pretrained(tmp_dir, use_safetensors=True) # We can load the model without specifying use_safetensors new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_loading_only_pytorch_bin_available(self): # Test that the loading behaviour is as expected when only pytorch checkpoints are available # - We can load the model with use_safetensors=False # - We can load the model without specifying use_safetensors i.e. we search for the available checkpoint, # preferring safetensors but falling back to pytorch # - We cannot load the model with use_safetensors=True model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) for i in range(1, 5): weights_name = WEIGHTS_NAME.split(".")[0].split("_")[0] + f"_model-0000{i}-of-00005" + ".bin" weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) # Setting use_safetensors=True should raise an error as the checkpoint was saved with safetensors=False with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir, use_safetensors=True) # We can load the model with use_safetensors=False new_model = BertModel.from_pretrained(tmp_dir, use_safetensors=False) # We can load the model without specifying use_safetensors new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_variant_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_hub_sharded(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_sharded_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_save_load_bin(self): with tempfile.TemporaryDirectory() as tmp_dir: model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) # saving will create a variant checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) model.save_pretrained(tmp_dir, safe_serialization=False) # saving shouldn't delete variant checkpoints weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) # there should be a normal checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertIsNotNone(model) @require_non_hpu @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator @slow def test_model_parallelism_gpt2(self): device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2", device_map=device_map) tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my name is", return_tensors="pt") output = model.generate(inputs["input_ids"].to(f"{torch_device}:0")) text_output = tokenizer.decode(output[0].tolist()) self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_task_model(self): model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "transformer.wte": f"{torch_device}:0", "transformer.wpe": f"{torch_device}:0", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": f"{torch_device}:0", "lm_head": f"{torch_device}:0", } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(f"{torch_device}:0") model.save_pretrained(tmp_dir) new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(f"{torch_device}:0") outputs1 = new_model.to(f"{torch_device}:0")(inputs) offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = new_model_with_offload(inputs) torch.testing.assert_close(outputs1.logits.cpu(), outputs2.logits.cpu()) # With state dict temp offload new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) torch.testing.assert_close(outputs1.logits.cpu(), outputs2.logits.cpu()) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_derived_to_base_model(self): derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "wte": f"{torch_device}:0", "wpe": f"{torch_device}:0", "h.0": "cpu", "h.1": "cpu", "h.2": "cpu", "h.3": "disk", "h.4": "disk", "ln_f": f"{torch_device}:0", } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(f"{torch_device}:0") derived_model.save_pretrained(tmp_dir, use_safetensors=True) base_model = AutoModel.from_pretrained(tmp_dir) outputs1 = base_model.to(f"{torch_device}:0")(inputs) # with disk offload offload_folder = os.path.join(tmp_dir, "offload") base_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = base_model_with_offload(inputs) torch.testing.assert_close(outputs1[0].cpu(), outputs2[0].cpu()) # With state dict temp offload new_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) torch.testing.assert_close(outputs1[0].cpu(), outputs2[0].cpu()) @slow @require_torch def test_from_pretrained_non_contiguous_checkpoint(self): # See: https://github.com/huggingface/transformers/pull/28414 # Tiny models on the Hub have contiguous weights, contrarily to google/owlvit model = OwlViTForObjectDetection.from_pretrained("fxmarty/owlvit-tiny-non-contiguous-weight") self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) model = OwlViTForObjectDetection.from_pretrained( "fxmarty/owlvit-tiny-non-contiguous-weight", device_map="auto" ) self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=False) model.save_pretrained(tmp_dir, safe_serialization=True) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() @require_accelerate @mark.accelerate_tests def test_save_model_with_device_map_cpu(self): model_id = "hf-internal-testing/tiny-random-gpt2" inputs = torch.tensor([[1, 2, 3]]) with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cpu") output = model(inputs)[0] model.save_pretrained( tmp_dir, max_shard_size="200KB" ) # model is 1.6MB, max shard size is allocated to cpu by default saved_model = AutoModelForCausalLM.from_pretrained(tmp_dir, device_map="cpu") saved_model_output = saved_model(inputs)[0] torch.testing.assert_close(output, saved_model_output) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_save_offloaded_model(self): device_map = { "transformer.wte": f"{torch_device}:0", "transformer.wpe": f"{torch_device}:0", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": f"{torch_device}:0", "lm_head": f"{torch_device}:0", } # check_models_equal requires onloaded tensors model_id = "hf-internal-testing/tiny-random-gpt2" onloaded_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cpu").to(f"{torch_device}:0") inputs = torch.tensor([[1, 2, 3]]).to(f"{torch_device}:0") output = onloaded_model(inputs)[0] with tempfile.TemporaryDirectory() as tmp_dir: offload_folder = os.path.join(tmp_dir, "offload") offloaded_model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, offload_folder=offload_folder ) presaved_output = offloaded_model(inputs)[0] offloaded_model.save_pretrained( tmp_dir, max_shard_size="200KB" ) # model is 1.6MB, max shard size is allocated to cpu by default saved_model = AutoModelForCausalLM.from_pretrained(tmp_dir, device_map=device_map) postsaved_output = saved_model(inputs)[0] torch.testing.assert_close(output, presaved_output, rtol=1e-4, atol=1e-4) torch.testing.assert_close(presaved_output, postsaved_output) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_save_offloaded_model_with_direct_params(self): from accelerate import dispatch_model device_map = {"submodule": "cpu", "linear": f"{torch_device}:0"} model = ModelWithDirectParamSubmodule(PretrainedConfig()) dispatch_model(model, device_map) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_save_offloaded_model_dynamic_tied_weights_keys(self): from accelerate import dispatch_model device_map = {"base": f"{torch_device}:0", "linear": "cpu", "linear2": "cpu"} model = ModelWithHead(PretrainedConfig()) dispatch_model(model, device_map) transform_a = torch.nn.Linear(1, 1, bias=False) transform_a._dynamic_tied_weights_keys = ["weight"] transform_b = torch.nn.Linear(1, 1, bias=False) transform_b._dynamic_tied_weights_keys = ["weight"] model.linear.register_module("transform_a", transform_a) model.linear.register_module("transform_b", transform_b) model.linear2.register_module("transform_a", transform_a) model.linear2.register_module("transform_b", transform_b) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) @require_safetensors def test_use_safetensors(self): # Should not raise anymore AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True) # test that error if only safetensors is available with self.assertRaises(OSError) as env_error: BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False) self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception)) # test that only safetensors if both available and use_safetensors=False with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=False, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files)) # test that no safetensors if both available and use_safetensors=True with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=True, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files)) # test no model file found when use_safetensors=None (default when safetensors package available) with self.assertRaises(OSError) as missing_model_file_error: BertModel.from_pretrained("hf-internal-testing/config-no-model") self.assertTrue( "does not appear to have a file named pytorch_model.bin, model.safetensors," in str(missing_model_file_error.exception) ) with self.assertRaises(OSError) as missing_model_file_error: with tempfile.TemporaryDirectory() as tmp_dir: with open(os.path.join(tmp_dir, "config.json"), "w") as f: f.write("{}") f.close() BertModel.from_pretrained(tmp_dir) self.assertTrue( "Error no file named pytorch_model.bin, model.safetensors" in str(missing_model_file_error.exception) ) @require_safetensors def test_safetensors_save_and_load(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No pytorch_model.bin file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_safetensors_load_from_hub(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_safetensors_save_and_load_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") # No pytorch_model.bin index file, only a model.safetensors index self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # No regular weights file self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_safetensors_load_from_hub_sharded(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): torch.testing.assert_close(p1, p2) def test_base_model_to_head_model_load(self): base_model = BaseModel(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: base_model.save_pretrained(tmp_dir, safe_serialization=False) # Can load a base model in a model with head model = ModelWithHead.from_pretrained(tmp_dir) for p1, p2 in zip(model.base.parameters(), base_model.parameters()): torch.testing.assert_close(p1, p2) # It doesn't work if the state dict has a mix of keys of the head and base without prefix though. base_state_dict = base_model.state_dict() head_state_dict = model.state_dict() base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] safe_save_file(base_state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with self.assertRaisesRegex( ValueError, "The state dictionary of the model you are trying to load is corrupted." ): _ = ModelWithHead.from_pretrained(tmp_dir) def test_tied_weights_reload(self): # Base model = BaseModelWithTiedWeights(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = BaseModelWithTiedWeights.from_pretrained(tmp_dir) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) state_dict = model.state_dict() # Remove tied weight from state_dict -> model should load with no complain of missing keys del state_dict["linear_2.weight"] torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) new_model, load_info = BaseModelWithTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertListEqual(load_info["missing_keys"], []) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) # With head model.save_pretrained(tmp_dir) new_model, load_info = ModelWithHeadAndTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertIs(new_model.base.linear.weight, new_model.decoder.weight) # Should only complain about the missing bias self.assertListEqual(load_info["missing_keys"], ["decoder.bias"]) def test_unexpected_keys_warnings(self): model = ModelWithHead(PretrainedConfig()) logger = logging.get_logger("transformers.modeling_utils") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # Loading the model with a new class, we don't get a warning for unexpected weights, just an info with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True) self.assertNotIn("were not used when initializing ModelWithHead", cl.out) self.assertEqual( set(loading_info["unexpected_keys"]), {"linear.weight", "linear.bias", "linear2.weight", "linear2.bias"}, ) # Loading the model with the same class, we do get a warning for unexpected weights state_dict = model.state_dict() state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"]) safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) self.assertEqual(loading_info["unexpected_keys"], ["added_key"]) def test_warn_if_padding_and_no_attention_mask(self): logger = logging.get_logger("transformers.modeling_utils") with self.subTest("Ensure no warnings when pad_token_id is None."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config_no_pad_token = PretrainedConfig() config_no_pad_token.pad_token_id = None model = ModelWithHead(config_no_pad_token) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there is an attention_mask."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure that the warning is shown at most once."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1) with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 config.bos_token_id = config.pad_token_id model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out) with self.subTest("Ensure that the warning code is skipped when compiling with torchdynamo."): logger.warning_once.cache_clear() from torch._dynamo import config, testing config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) def f(input_ids): model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) compile_counter = testing.CompileCounter() opt_fn = torch.compile(f, dynamic=True, backend=compile_counter) opt_fn(input_ids) self.assertEqual(compile_counter.frame_count, 0) @require_torch_accelerator @slow def test_pretrained_low_mem_new_config(self): # Checking for 1 model(the same one which was described in the issue) . model_ids = ["openai-community/gpt2"] for model_id in model_ids: model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id) model_config.n_layer = 48 model_config.n_head = 25 model_config.n_embd = 1600 model = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=model_id, config=model_config, ignore_mismatched_sizes=True, dtype=torch.float16, ) model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id) self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__) def test_generation_config_is_loaded_with_model(self): # Note: `hf-internal-testing/tiny-random-MistralForCausalLM` has a `generation_config.json` # containing `bos_token_id: 1` # 1. Load without further parameters model = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL) self.assertEqual(model.generation_config.bos_token_id, 1) # 2. Load with `device_map` model = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL, device_map="auto") self.assertEqual(model.generation_config.bos_token_id, 1) @require_safetensors def test_safetensors_torch_from_torch(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors def test_safetensors_torch_from_torch_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_modifying_model_config_gets_moved_to_generation_config(self): """ Calling `model.save_pretrained` should move the changes made to `generate` parameterization in the model config to the generation config. """ model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") # Initially, the repetition penalty has its default value in `model.config`. The `model.generation_config` will # have the exact same default self.assertTrue(model.config.repetition_penalty == 1.0) self.assertTrue(model.generation_config.repetition_penalty == 1.0) # If the user attempts to save a custom generation parameter: model.config.repetition_penalty = 3.0 with warnings.catch_warnings(record=True) as warning_list: with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # 1 - That parameter will be removed from `model.config`. We don't want to use `model.config` to store # generative parameters, and the old default (1.0) would no longer relect the user's wishes. self.assertTrue(model.config.repetition_penalty is None) # 2 - That parameter will be set in `model.generation_config` instead. self.assertTrue(model.generation_config.repetition_penalty == 3.0) # 3 - The user will see a warning regarding the custom parameter that has been moved. self.assertTrue(len(warning_list) == 1) self.assertTrue("Moving the following attributes" in str(warning_list[0].message)) self.assertTrue("repetition_penalty" in str(warning_list[0].message)) @require_safetensors def test_model_from_pretrained_from_mlx(self): from safetensors import safe_open model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-mistral-mlx") self.assertIsNotNone(model) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) with safe_open(os.path.join(tmp_dir, "model.safetensors"), framework="pt") as f: metadata = f.metadata() self.assertEqual(metadata.get("format"), "pt") new_model = AutoModelForCausalLM.from_pretrained(tmp_dir) input_ids = torch.randint(100, 1000, (1, 10)) with torch.no_grad(): outputs = model(input_ids) outputs_from_saved = new_model(input_ids) torch.testing.assert_close(outputs_from_saved["logits"], outputs["logits"]) def test_warning_for_beta_gamma_parameters(self): logger = logging.get_logger("transformers.modeling_utils") config = PretrainedConfig() warning_msg_gamma = "`LayerNorm.gamma` -> `LayerNorm.weight`" warning_msg_beta = "`LayerNorm.beta` -> `LayerNorm.bias`" model = TestModelGammaBeta(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) with LoggingLevel(logging.INFO): with CaptureLogger(logger) as cl1: _, loading_info = TestModelGammaBeta.from_pretrained( tmp_dir, config=config, output_loading_info=True ) missing_keys = loading_info["missing_keys"] unexpected_keys = loading_info["unexpected_keys"] self.assertIn("`TestModelGammaBeta`", cl1.out) self.assertIn(warning_msg_gamma, cl1.out) self.assertIn(warning_msg_beta, cl1.out) self.assertIn("LayerNorm.gamma", missing_keys) self.assertIn("LayerNorm.weight", unexpected_keys) self.assertIn("LayerNorm.beta", missing_keys) self.assertIn("LayerNorm.bias", unexpected_keys) def test_isin_mps_friendly(self): """tests that our custom `isin_mps_friendly` matches `torch.isin`""" random_ids = torch.randint(0, 100, (100,)) # We can match against an integer random_test_integer = torch.randint(0, 100, (1,)).item() self.assertTrue( torch.equal( torch.isin(random_ids, random_test_integer), isin_mps_friendly(random_ids, random_test_integer) ) ) # We can match against an 0D tensor random_test_tensor = torch.randint(0, 100, (1,)).squeeze() self.assertTrue( torch.equal(torch.isin(random_ids, random_test_tensor), isin_mps_friendly(random_ids, random_test_tensor)) ) # We can match against an 1D tensor (with many items) random_test_tensor = torch.randint(0, 100, (10,)) self.assertTrue( torch.equal(torch.isin(random_ids, random_test_tensor), isin_mps_friendly(random_ids, random_test_tensor)) ) def test_can_generate(self): """Tests the behavior of `PreTrainedModel.can_generate` method.""" logger = logging.get_logger("transformers.modeling_utils") logger.warning_once.cache_clear() # 1 - By default, a model CAN'T generate can_generate = BertModel.can_generate() self.assertFalse(can_generate) # 2 - The most common case for a model to be able to generate is to inherit from `GenerationMixin` directly class DummyBertWithMixin(BertModel, GenerationMixin): pass with CaptureLogger(logger) as cl: can_generate = DummyBertWithMixin.can_generate() self.assertTrue("" == cl.out) self.assertTrue(can_generate) # 3 - Finally, it can inherit from a model that can generate class DummyBertWithParent(DummyBertWithMixin): pass with CaptureLogger(logger) as cl: can_generate = DummyBertWithParent.can_generate() self.assertTrue("" == cl.out) self.assertTrue(can_generate) # 4 - Legacy: models with a custom `prepare_inputs_for_generation` can generate (it was assumed # they inherited `GenerationMixin`). Deprecated in v4.45 and removed in v4.51. class DummyBertWithPrepareInputs(BertModel): def prepare_inputs_for_generation(self): pass with CaptureLogger(logger) as cl: can_generate = DummyBertWithPrepareInputs.can_generate() self.assertTrue("it doesn't directly inherit from `GenerationMixin`" in cl.out) self.assertFalse(can_generate) def test_save_and_load_config_with_custom_generation(self): """ Regression test for the ability to save and load a config with a custom generation kwarg (i.e. a parameter that gets moved to the generation config and reset on the model config) """ model = T5ForConditionalGeneration.from_pretrained(TINY_T5) # The default for `num_beams` is 1 and `early_stopping` is False self.assertTrue(model.config.num_beams == 1) self.assertTrue(model.config.early_stopping is False) # When we save the model, this custom parameter should be moved to the generation config AND the model # config should contain `None` model.config.num_beams = 2 model.config.early_stopping = True self.assertTrue(model.generation_config.num_beams == 1) # unmodified generation config with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # moved to generation config self.assertTrue(new_model.generation_config.num_beams == 2) self.assertTrue(new_model.generation_config.early_stopping is True) # reset in the model config self.assertTrue(new_model.config.num_beams is None) self.assertTrue(new_model.config.early_stopping is None) # Sanity check: We can run `generate` with the new model without any warnings random_ids = torch.randint(0, 100, (1, 5)) with warnings.catch_warnings(record=True) as w: new_model.generate(random_ids, max_new_tokens=3) self.assertTrue(len(w) == 0) def test_load_model_with_state_dict_only(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") state_dict = model.state_dict() config = model.config model_loaded = BertModel.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) self.assertTrue(check_models_equal(model, model_loaded)) def test_cache_when_needed_at_train_time(self): """ Some fine-tuning methods require the use of cache, like prefix tuning in PEFT. This test checks that a cache is at train time used if we request it. Related issue: #35648 """ model = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL) tokenizer = AutoTokenizer.from_pretrained(TINY_MISTRAL) model_inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") # By default it is not training, we have to set it self.assertFalse(model.training) model.train() # If we set `use_cache=True` while training, then a cache is returned model_outputs = model(**model_inputs, use_cache=True) self.assertIsInstance(model_outputs.past_key_values, DynamicCache) self.assertTrue(model.training) # simulate injecting virtual tokens like in prefix tuning num_virtual_tokens = 3 past_key_values = [torch.randn(2, 1, 2, num_virtual_tokens, 8)] * 2 past_key_values = DynamicCache.from_legacy_cache(past_key_values) model_inputs["attention_mask"] = torch.cat( ( model_inputs["attention_mask"], torch.ones(1, num_virtual_tokens).to(model_inputs["attention_mask"].device), ), dim=1, ) model_outputs = model(**model_inputs, past_key_values=past_key_values, use_cache=True) self.assertTrue(model.training) # We can also disable the cache to skip a few operations, if the training loop doesn't need cache model_outputs = model(**model_inputs, use_cache=False) self.assertIsNone(model_outputs.past_key_values) self.assertTrue(model.training) def test_restore_default_dtype_from_pretrained(self): """ Tests that the default torch dtype is restored when an error happens during the loading of a model. """ old_dtype = torch.get_default_dtype() # set default type to float32 torch.set_default_dtype(torch.float32) # Mock injection point which is right after the call to `_set_default_dtype` original_set_default_dtype = MistralForCausalLM._set_default_dtype def debug(*args, **kwargs): # call the method as usual, than raise a RuntimeError original_set_default_dtype(*args, **kwargs) raise RuntimeError with mock.patch( "transformers.models.mistral.modeling_mistral.MistralForCausalLM._set_default_dtype", side_effect=debug, ): with self.assertRaises(RuntimeError): _ = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL, device_map="auto", dtype=torch.float16) # default should still be float32 assert torch.get_default_dtype() == torch.float32 torch.set_default_dtype(old_dtype) def test_restore_default_dtype_from_config(self): """ Tests that the default torch dtype is restored when an error happens during the loading of a model. """ old_dtype = torch.get_default_dtype() # set default type to float32 torch.set_default_dtype(torch.float32) config = AutoConfig.from_pretrained( TINY_MISTRAL, ) # Mock injection point which is right after the call to `_set_default_dtype` original_set_default_dtype = MistralForCausalLM._set_default_dtype def debug(*args, **kwargs): # call the method as usual, than raise a RuntimeError original_set_default_dtype(*args, **kwargs) raise RuntimeError with mock.patch( "transformers.models.mistral.modeling_mistral.MistralForCausalLM._set_default_dtype", side_effect=debug, ): with self.assertRaises(RuntimeError): config.dtype = torch.float16 _ = AutoModelForCausalLM.from_config( config, ) # default should still be float32 assert torch.get_default_dtype() == torch.float32 torch.set_default_dtype(old_dtype) def test_unknown_quantization_config(self): with tempfile.TemporaryDirectory() as tmpdir: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) config.quantization_config = {"quant_method": "unknown"} model.save_pretrained(tmpdir) with self.assertLogs("transformers", level="WARNING") as cm: BertModel.from_pretrained(tmpdir) self.assertEqual(len(cm.records), 1) self.assertTrue(cm.records[0].message.startswith("Unknown quantization type, got")) @parameterized.expand([("Qwen/Qwen2.5-3B-Instruct", 10), ("meta-llama/Llama-2-7b-chat-hf", 10)]) @slow @require_read_token @require_torch_accelerator def test_loading_is_fast_on_gpu(self, model_id: str, max_loading_time: float): """ This test is used to avoid regression on https://github.com/huggingface/transformers/pull/36380. 10s should be more than enough for both models, and allows for some margin as loading time are quite unstable. Before #36380, it used to take more than 40s, so 10s is still reasonable. Note that we run this test in a subprocess, to ensure that cuda is not already initialized/warmed-up. """ # First download the weights if not already on disk _ = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16) script_to_run = textwrap.dedent( """ import torch import time import argparse from transformers import AutoModelForCausalLM from transformers.utils import is_torch_accelerator_available parser = argparse.ArgumentParser() parser.add_argument("model_id", type=str) parser.add_argument("max_loading_time", type=float) args = parser.parse_args() device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda" device = torch.device(f"{device_type}:0") torch_accelerator_module = getattr(torch, device_type, torch.cuda) torch_accelerator_module.synchronize(device) t0 = time.time() model = AutoModelForCausalLM.from_pretrained(args.model_id, dtype=torch.float16, device_map=device) torch_accelerator_module.synchronize(device) dt = time.time() - t0 # Assert loading is faster (it should be more than enough in both cases) if dt > args.max_loading_time: raise ValueError(f"Loading took {dt:.2f}s! It should not take more than {args.max_loading_time}s") # Ensure everything is correctly loaded on accelerator bad_device_params = {k for k, v in model.named_parameters() if v.device != device} if len(bad_device_params) > 0: raise ValueError(f"The following parameters are not on accelerator: {bad_device_params}") """ ) with tempfile.NamedTemporaryFile(mode="w+", suffix=".py") as tmp: tmp.write(script_to_run) tmp.flush() tmp.seek(0) cmd = f"python {tmp.name} {model_id} {max_loading_time}".split() try: # We cannot use a timeout of `max_loading_time` as cuda initialization can take up to 15-20s _ = subprocess.run(cmd, capture_output=True, env=self.get_env(), text=True, check=True, timeout=60) except subprocess.CalledProcessError as e: raise Exception(f"The following error was captured: {e.stderr}") def test_explicit_transformers_weights(self): """ Transformers supports loading from repos where the weights file is explicitly set in the config. When loading a config file, transformers will see whether `transformers_weights` is defined in the config. If so, it will load from that file. Here, we ensure that the correct file is loaded. """ model = BertModel.from_pretrained("hf-internal-testing/explicit_transformers_weight_in_config") self.assertEqual(model.num_parameters(), 87929) def test_explicit_transformers_weights_index(self): """ Transformers supports loading from repos where the weights file is explicitly set in the config. When loading a config file, transformers will see whether `transformers_weights` is defined in the config. If so, it will load from that file. Here, we ensure that the correct file is loaded, given the file is an index of multiple weights. """ model = BertModel.from_pretrained("hf-internal-testing/explicit_transformers_weight_in_config_sharded") self.assertEqual(model.num_parameters(), 87929) def test_explicit_transformers_weights_save_and_reload(self): """ Transformers supports loading from repos where the weights file is explicitly set in the config. When loading a config file, transformers will see whether `transformers_weights` is defined in the config. If so, it will load from that file. When saving the model, we should be careful not to safe the `transformers_weights` attribute in the config; otherwise, transformers will try to load from that file whereas it should simply load from the default file. We test that for a non-sharded repo. """ model = BertModel.from_pretrained("hf-internal-testing/explicit_transformers_weight_in_config") explicit_transformers_weights = model.config.transformers_weights with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # The config should not have a mention of transformers_weights with open(os.path.join(tmpdirname, "config.json")) as f: config = json.loads(f.read()) self.assertFalse("transformers_weights" in config) # The serialized weights should be in model.safetensors and not the transformers_weights self.assertTrue(explicit_transformers_weights not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" in os.listdir(tmpdirname)) def test_explicit_transformers_weights_index_save_and_reload(self): """ Transformers supports loading from repos where the weights file is explicitly set in the config. When loading a config file, transformers will see whether `transformers_weights` is defined in the config. If so, it will load from that file. When saving the model, we should be careful not to safe the `transformers_weights` attribute in the config; otherwise, transformers will try to load from that file whereas it should simply load from the default file. We test that for a sharded repo. """ model = BertModel.from_pretrained("hf-internal-testing/explicit_transformers_weight_in_config_sharded") explicit_transformers_weights = model.config.transformers_weights with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, max_shard_size="100kb") # The config should not have a mention of transformers_weights with open(os.path.join(tmpdirname, "config.json")) as f: config = json.loads(f.read()) self.assertFalse("transformers_weights" in config) # The serialized weights should be in model.safetensors and not the transformers_weights self.assertTrue(explicit_transformers_weights not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors.index.json" in os.listdir(tmpdirname)) def test_config_class_attribute(self): # custom configs class MyConfigA(PretrainedConfig): pass class MyConfigB(PretrainedConfig): pass class MyConfigC(PretrainedConfig): pass # custom models class MyModelA(PreTrainedModel): config: dict config_class = MyConfigA class MyModelB(MyModelA): config: MyConfigB class MyModelC(MyModelA): config_class = MyConfigC class MyModelD(MyModelA): pass # child config_class > child 'config:' > parent config_class > parent 'config:' self.assertIs(MyModelA.config_class, MyConfigA) self.assertIs(MyModelB.config_class, MyConfigB) self.assertIs(MyModelC.config_class, MyConfigC) self.assertIs(MyModelD.config_class, MyConfigA) @slow @require_torch class ModelOnTheFlyConversionTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.user = "huggingface-hub-ci" cls.token = os.getenv("HUGGINGFACE_PRODUCTION_USER_TOKEN", None) if cls.token is None: raise ValueError("Cannot run tests as secret isn't setup.") cls.api = HfApi(token=cls.token) def setUp(self) -> None: self.repo_name = f"{self.user}/test-model-on-the-fly-{uuid.uuid4()}" def tearDown(self) -> None: self.api.delete_repo(self.repo_name) def test_safetensors_on_the_fly_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name, token=self.token) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb") converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub( self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb", private=True ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, max_shard_size="200kb", safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") @unittest.skip(reason="Edge case, should work once the Space is updated`") def test_safetensors_on_the_fly_wrong_user_opened_pr(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) # This should have opened a PR with the user's account with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") # We now switch the repo visibility to public self.api.update_repo_settings(self.repo_name, private=False) # We once again call from_pretrained, which should call the bot to open a PR BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) bot_opened_pr = None bot_opened_pr_title = None for discussion in discussions: if discussion.author == "SFconvertbot": bot_opened_pr = True bot_opened_pr_title = discussion.title self.assertTrue(bot_opened_pr) self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_specific_revision(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # Push a model on a given revision initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, revision="new-branch") # Try to convert the model on that revision should raise with self.assertRaises(EnvironmentError): BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token, revision="new-branch") def test_absence_of_safetensors_triggers_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # Download the model that doesn't have safetensors BertModel.from_pretrained(self.repo_name, token=self.token) for thread in threading.enumerate(): if thread.name == "Thread-autoconversion": thread.join(timeout=10) discussions = self.api.get_repo_discussions(self.repo_name) bot_opened_pr = None bot_opened_pr_title = None for discussion in discussions: if discussion.author == "SFconvertbot": bot_opened_pr = True bot_opened_pr_title = discussion.title self.assertTrue(bot_opened_pr) self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model") @mock.patch("transformers.safetensors_conversion.spawn_conversion") def test_absence_of_safetensors_triggers_conversion_failed(self, spawn_conversion_mock): spawn_conversion_mock.side_effect = HTTPError() config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # The auto conversion is mocked to always raise; ensure that it doesn't raise in the main thread BertModel.from_pretrained(self.repo_name, token=self.token) @require_torch @is_staging_test class ModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @unittest.skip(reason="This test is flaky") def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @unittest.skip(reason="This test is flaky") def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_with_description(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) COMMIT_DESCRIPTION = """ The commit description supports markdown synthax see: ```python >>> form transformers import AutoConfig >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased") ``` """ commit_details = model.push_to_hub( tmp_repo.repo_id, use_auth_token=self._token, create_pr=True, commit_description=COMMIT_DESCRIPTION ) self.assertEqual(commit_details.commit_description, COMMIT_DESCRIPTION) @unittest.skip(reason="This test is flaky") def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @unittest.skip(reason="This test is flaky") def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id=tmp_repo.repo_id) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_dynamic_model(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) # checks self.assertDictEqual( config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"}, ) new_model = AutoModel.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_model is from the CustomModel class of a dynamic module self.assertEqual(new_model.__class__.__name__, "CustomModel") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) config = AutoConfig.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) new_model = AutoModel.from_config(config, trust_remote_code=True) self.assertEqual(new_model.__class__.__name__, "CustomModel") def test_push_to_hub_with_tags(self): with TemporaryHubRepo(token=self._token) as tmp_repo: from huggingface_hub import ModelCard new_tags = ["tag-1", "tag-2"] CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) self.assertTrue(model.model_tags is None) model.add_model_tags(new_tags) self.assertTrue(model.model_tags == new_tags) model.push_to_hub(tmp_repo.repo_id, token=self._token) loaded_model_card = ModelCard.load(tmp_repo.repo_id) self.assertEqual(loaded_model_card.data.tags, new_tags) @require_torch class AttentionMaskTester(unittest.TestCase): def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len)) mask_4d_values = mask_4d[:, 0][mask_indices] is_inf = mask_4d_values == -float("inf") is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min assert torch.logical_or(is_inf, is_min).all() def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3): mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long) if additional_mask is not None: for bsz_idx, seq_idx in additional_mask: mask_2d[bsz_idx, seq_idx] = 0 mask_4d = mask_converter.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len, dtype=torch.float32) assert mask_4d.shape == (bsz, 1, q_len, kv_len) # make sure there are no overflows assert mask_4d.min() != float("-inf") context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) if 0 not in mask_2d: assert (mask_4d != 0).sum().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif not mask_converter.is_causal and context is None: if 0 not in mask_2d: assert (mask_4d != 0).sum().item() == 0 if 0 in mask_2d: self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked if 0 not in mask_2d: assert (mask_4d != 0).sum().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) def check_to_causal(self, mask_converter, q_len, kv_len, bsz=3): mask_4d = mask_converter.to_causal_4d( bsz, query_length=q_len, key_value_length=kv_len, device=torch_device, dtype=torch.float32 ) if q_len == 1 and mask_converter.sliding_window is None: # no causal mask if q_len is 1 assert mask_4d is None return context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) assert (mask_4d != 0).sum().item() == num_tokens_masked elif not mask_converter.is_causal and context is None: assert (mask_4d != 0).sum().item() == 0 elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked assert (mask_4d != 0).sum().item() == num_tokens_masked def compute_num_context_mask(self, kv_len, context, q_len): # This function computes the # of attention tokens that are added for # the sliding window c_mask_len = kv_len - context - 1 num_mask_triangle = c_mask_len * (c_mask_len + 1) // 2 cut_mask_len = max(c_mask_len - q_len, 0) num_cut_mask = cut_mask_len * (cut_mask_len + 1) // 2 return num_mask_triangle - num_cut_mask def test_2d_to_4d_causal(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) # check that the mask does not overflow on causal masked tokens self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 0), (1, 0), (1, 1)]) def test_2d_to_4d(self): mask_converter = AttentionMaskConverter(is_causal=False) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d_causal_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=5) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_causal_mask(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_causal_mask_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=3) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) @pytest.mark.torch_compile_test def test_torch_compile_fullgraph(self): model = Prepare4dCausalAttentionMaskModel() inputs_embeds = torch.rand([1, 3, 32]) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Create4dCausalAttentionMaskModel() inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Prepare4dAttentionMaskModel() mask = torch.ones(2, 4) mask[0, :2] = 0 inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(mask, inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(mask, inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) @require_torch @slow def test_unmask_unattended_left_padding(self): attention_mask = torch.Tensor([[0, 0, 1], [1, 1, 1], [0, 1, 1]]).to(torch.int64) expanded_mask = torch.Tensor( [ [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) reference_output = torch.Tensor( [ [[[1, 1, 1], [1, 1, 1], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=1) self.assertTrue(torch.equal(result, reference_output)) attention_mask = torch.Tensor([[0, 0, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) min_inf = torch.finfo(torch.float32).min reference_output = torch.Tensor( [ [ [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [min_inf, min_inf, 0, min_inf, min_inf], [min_inf, min_inf, 0, 0, min_inf], [min_inf, min_inf, 0, 0, 0], ] ], [ [ [0, min_inf, min_inf, min_inf, min_inf], [0, 0, min_inf, min_inf, min_inf], [0, 0, 0, min_inf, min_inf], [0, 0, 0, 0, min_inf], [0, 0, 0, 0, 0], ] ], [ [ [0, 0, 0, 0, 0], [min_inf, 0, min_inf, min_inf, min_inf], [min_inf, 0, 0, min_inf, min_inf], [min_inf, 0, 0, 0, min_inf], [min_inf, 0, 0, 0, 0], ] ], ] ) self.assertTrue(torch.equal(reference_output, result)) @require_torch @slow def test_unmask_unattended_right_padding(self): attention_mask = torch.Tensor([[1, 1, 1, 0], [1, 1, 1, 1], [1, 1, 0, 0]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch @slow def test_unmask_unattended_random_mask(self): attention_mask = torch.Tensor([[1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch class TestAttentionImplementation(unittest.TestCase): @unittest.skip("Just a bit annoying") def test_error_no_sdpa_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="sdpa") self.assertTrue( "does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention" in str(cm.exception) ) _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") def test_error_no_flash_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_no_flash_available_with_config(self): with self.assertRaises(ValueError) as cm: config = AutoConfig.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", config=config, attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_wrong_attn_implementation(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo") self.assertTrue('The only possible arguments are `attn_implementation="eager"' in str(cm.exception)) def test_not_available_flash(self): if is_flash_attn_2_available(): self.skipTest(reason="Please uninstall flash-attn package to run test_not_available_flash") if is_torch_npu_available(): self.skipTest( reason="FlashAttention2 is supported on Ascend NPU without using package `flash-attn`, ignore this test case." ) with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2" ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_flash_with_config(self): if is_flash_attn_2_available(): self.skipTest(reason="Please uninstall flash-attn package to run test_not_available_flash") if is_torch_npu_available(): self.skipTest( reason="FlashAttention2 is supported on Ascend NPU without using package `flash-attn`, ignore this test case." ) config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-GPTBigCodeModel") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", config=config, attn_implementation="flash_attention_2", ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) @require_torch class TestTensorSharing(TestCasePlus): def test_disjoint(self): main = torch.zeros(10) a = main[:5] b = main[5:] state_dict = {"a": a, "b": b} shared_names, disjoint_names = _find_disjoint([{"a", "b"}], state_dict) self.assertEqual(shared_names, []) self.assertEqual(disjoint_names, ["a", "b"]) a = main[::2] b = main[1::2] state_dict = {"a": a, "b": b} shared_names, disjoint_names = _find_disjoint([{"a", "b"}], state_dict) self.assertEqual(shared_names, [{"a", "b"}]) self.assertEqual(disjoint_names, []) def test_identical(self): a = torch.zeros(10) b = a state_dict = {"a": a, "b": b} shared_names, identical_names = _find_identical([{"a", "b"}], state_dict) self.assertEqual(shared_names, []) self.assertEqual(identical_names, [{"a", "b"}]) b = a[:5] state_dict = {"a": a, "b": b} shared_names, identical_names = _find_identical([{"a", "b"}], state_dict) self.assertEqual(shared_names, [{"a", "b"}]) self.assertEqual(identical_names, []) @require_torch class TestSaveAndLoadModelWithExtraState(TestCasePlus): """ This test checks that a model can be saved and loaded that uses the torch extra state API. https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_extra_state. Currently, only tensor-valued extra_states are supported. """ def test_save_and_load_model_with_tensor_extra_state(self): class MyConfig(PretrainedConfig): def __init__(self, **kwargs): super().__init__(**kwargs) class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.some_counter = 0 self.linear = torch.nn.Linear(320, 320) def get_extra_state(self): return torch.tensor(self.some_counter) def set_extra_state(self, state): self.some_counter = state.item() class MyModel(PreTrainedModel): config_class = MyConfig def __init__(self, config: MyConfig): super().__init__(config) self.my_layer = MyModule() def forward(self, hidden_states, attention_mask): return self.my_layer(hidden_states, attention_mask) config = MyConfig() model = MyModel(config) model.my_layer.some_counter = 42 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = MyModel.from_pretrained(tmpdirname) self.assertEqual(model.my_layer.some_counter, 42) @mark.xfail(reason="save and from_pretrained currently only supports tensor extra_state") def test_save_and_load_model_with_dict_extra_state(self): class MyConfig(PretrainedConfig): def __init__(self, **kwargs): super().__init__(**kwargs) class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.some_counter = 0 self.linear = torch.nn.Linear(320, 320) def get_extra_state(self): return {"some_counter": self.some_counter} def set_extra_state(self, state): self.some_counter = state["some_counter"] class MyModel(PreTrainedModel): config_class = MyConfig def __init__(self, config: MyConfig): super().__init__(config) self.my_layer = MyModule() def forward(self, hidden_states, attention_mask): return self.my_layer(hidden_states, attention_mask) config = MyConfig() model = MyModel(config) model.my_layer.some_counter = 42 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = MyModel.from_pretrained(tmpdirname) self.assertEqual(model.my_layer.some_counter, 42)
transformers/tests/utils/test_modeling_utils.py/0
{ "file_path": "transformers/tests/utils/test_modeling_utils.py", "repo_id": "transformers", "token_count": 58665 }
620
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for cleaning the list of doctests by making sure the entries all exist and are in alphabetical order. Usage (from the root of the repo): Check that the doctest list is properly sorted and all files exist (used in `make repo-consistency`): ```bash python utils/check_doctest_list.py ``` Auto-sort the doctest list if it is not properly sorted (used in `make fix-copies`): ```bash python utils/check_doctest_list.py --fix_and_overwrite ``` """ import argparse import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py REPO_PATH = "." DOCTEST_FILE_PATHS = ["not_doctested.txt", "slow_documentation_tests.txt"] def clean_doctest_list(doctest_file: str, overwrite: bool = False): """ Cleans the doctest in a given file. Args: doctest_file (`str`): The path to the doctest file to check or clean. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix problems. If `False`, will error when the file is not clean. """ non_existent_paths = [] all_paths = [] with open(doctest_file, "r", encoding="utf-8") as f: for line in f: line = line.strip().split(" ")[0] path = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(line) if len(non_existent_paths) > 0: non_existent_paths = "\n".join([f"- {f}" for f in non_existent_paths]) raise ValueError(f"`{doctest_file}` contains non-existent paths:\n{non_existent_paths}") sorted_paths = sorted(all_paths) if all_paths != sorted_paths: if not overwrite: raise ValueError( f"Files in `{doctest_file}` are not in alphabetical order, run `make fix-copies` to fix " "this automatically." ) with open(doctest_file, "w", encoding="utf-8") as f: f.write("\n".join(sorted_paths) + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() for doctest_file in DOCTEST_FILE_PATHS: doctest_file = os.path.join(REPO_PATH, "utils", doctest_file) clean_doctest_list(doctest_file, args.fix_and_overwrite)
transformers/utils/check_doctest_list.py/0
{ "file_path": "transformers/utils/check_doctest_list.py", "repo_id": "transformers", "token_count": 1180 }
621
import argparse import os past_versions_testing = { "pytorch": { "1.13": { "torch": "1.13.1", "torchvision": "0.14.1", "torchaudio": "0.13.1", "python": 3.9, "cuda": "cu116", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1" " --extra-index-url https://download.pytorch.org/whl/cu116" ), "base_image": "nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04", }, "1.12": { "torch": "1.12.1", "torchvision": "0.13.1", "torchaudio": "0.12.1", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "1.11": { "torch": "1.11.0", "torchvision": "0.12.0", "torchaudio": "0.11.0", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "1.10": { "torch": "1.10.2", "torchvision": "0.11.3", "torchaudio": "0.10.2", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.10.2 torchvision==0.11.3 torchaudio==0.10.2" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, # torchaudio < 0.10 has no CUDA-enabled binary distributions "1.9": { "torch": "1.9.1", "torchvision": "0.10.1", "torchaudio": "0.9.1", "python": 3.9, "cuda": "cu111", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1" " --extra-index-url https://download.pytorch.org/whl/cu111" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, }, "tensorflow": { "2.11": { "tensorflow": "2.11.1", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.11.1", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.10": { "tensorflow": "2.10.1", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.10.1", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.9": { "tensorflow": "2.9.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.9.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.8": { "tensorflow": "2.8.2", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.8.2", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.7": { "tensorflow": "2.7.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.7.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.6": { "tensorflow": "2.6.5", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.6.5", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.5": { "tensorflow": "2.5.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.5.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, }, } if __name__ == "__main__": parser = argparse.ArgumentParser("Choose the framework and version to install") parser.add_argument( "--framework", help="The framework to install. Should be `torch` or `tensorflow`", type=str, required=True ) parser.add_argument("--version", help="The version of the framework to install.", type=str, required=True) args = parser.parse_args() info = past_versions_testing[args.framework][args.version] os.system(f"echo \"export INSTALL_CMD='{info['install']}'\" >> ~/.profile") print(f"echo \"export INSTALL_CMD='{info['install']}'\" >> ~/.profile") cuda = "" if args.framework == "pytorch": cuda = info["cuda"] os.system(f"echo \"export CUDA='{cuda}'\" >> ~/.profile") print(f"echo \"export CUDA='{cuda}'\" >> ~/.profile")
transformers/utils/past_ci_versions.py/0
{ "file_path": "transformers/utils/past_ci_versions.py", "repo_id": "transformers", "token_count": 2774 }
622
from transformers import Wav2Vec2FeatureExtractor class CustomFeatureExtractor(Wav2Vec2FeatureExtractor): pass
transformers/utils/test_module/custom_feature_extraction.py/0
{ "file_path": "transformers/utils/test_module/custom_feature_extraction.py", "repo_id": "transformers", "token_count": 37 }
623
.PHONY: test precommit common_tests slow_tests test_examples tests_gpu check_dirs := examples tests trl ACCELERATE_CONFIG_PATH = `pwd`/examples/accelerate_configs COMMAND_FILES_PATH = `pwd`/commands test: pytest -n auto -m "not slow and not low-priority" -s -v --reruns 5 --reruns-delay 1 --only-rerun '(OSError|Timeout|HTTPError.*502|HTTPError.*504||not less than or equal to 0.01)' tests/ precommit: python scripts/add_copyrights.py pre-commit run --all-files doc-builder style trl tests docs/source --max_len 119 slow_tests: pytest -m "slow" tests/ $(if $(IS_GITHUB_CI),--report-log "slow_tests.log",) test_examples: touch temp_results_sft_tests.txt for file in $(ACCELERATE_CONFIG_PATH)/*.yaml; do \ TRL_ACCELERATE_CONFIG=$${file} bash $(COMMAND_FILES_PATH)/run_sft.sh; \ echo $$?','$${file} >> temp_results_sft_tests.txt; \ done touch temp_results_dpo_tests.txt for file in $(ACCELERATE_CONFIG_PATH)/*.yaml; do \ TRL_ACCELERATE_CONFIG=$${file} bash $(COMMAND_FILES_PATH)/run_dpo.sh; \ echo $$?','$${file} >> temp_results_dpo_tests.txt; \ done
trl/Makefile/0
{ "file_path": "trl/Makefile", "repo_id": "trl", "token_count": 434 }
624
# Dataset formats and types This guide provides an overview of the dataset formats and types supported by each trainer in TRL. ## Overview of the dataset formats and types - The *format* of a dataset refers to how the data is structured, typically categorized as either *standard* or *conversational*. - The *type* is associated with the specific task the dataset is designed for, such as *prompt-only* or *preference*. Each type is characterized by its columns, which vary according to the task, as shown in the table. <table> <tr> <th>Type \ Format</th> <th>Standard</th> <th>Conversational</th> </tr> <tr> <td>Language modeling</td> <td> <pre><code>{"text": "The sky is blue."}</code></pre> </td> <td> <pre><code>{"messages": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}]}</code></pre> </td> </tr> <tr> <td>Prompt-only</td> <td> <pre><code>{"prompt": "The sky is"}</code></pre> </td> <td> <pre><code>{"prompt": [{"role": "user", "content": "What color is the sky?"}]}</code></pre> </td> </tr> <tr> <td>Prompt-completion</td> <td> <pre><code>{"prompt": "The sky is", "completion": " blue."}</code></pre> </td> <td> <pre><code>{"prompt": [{"role": "user", "content": "What color is the sky?"}], "completion": [{"role": "assistant", "content": "It is blue."}]}</code></pre> </td> </tr> </tr> <tr> <td>Preference</td> <td> <pre><code>{"prompt": "The sky is", "chosen": " blue.", "rejected": " green."}</code></pre> or, with implicit prompt: <pre><code>{"chosen": "The sky is blue.", "rejected": "The sky is green."}</code></pre> </td> <td> <pre><code>{"prompt": [{"role": "user", "content": "What color is the sky?"}], "chosen": [{"role": "assistant", "content": "It is blue."}], "rejected": [{"role": "assistant", "content": "It is green."}]}</code></pre> or, with implicit prompt: <pre><code>{"chosen": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}], "rejected": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}]}</code></pre> </td> </tr> <td>Unpaired preference</td> <td> <pre><code>{"prompt": "The sky is", "completion": " blue.", "label": True}</code></pre> </td> <td> <pre><code>{"prompt": [{"role": "user", "content": "What color is the sky?"}], "completion": [{"role": "assistant", "content": "It is green."}], "label": False}</code></pre> </td> </tr> </tr> <td>Stepwise supervision</td> <td> <pre><code>{"prompt": "Which number is larger, 9.8 or 9.11?", "completions": ["The fractional part of 9.8 is 0.8.", "The fractional part of 9.11 is 0.11.", "0.11 is greater than 0.8.", "Hence, 9.11 > 9.8."], "labels": [True, True, False, False]}</code></pre> </td> <td></td> </tr> </table> ### Formats #### Standard The standard dataset format typically consists of plain text strings. The columns in the dataset vary depending on the task. This is the format expected by TRL trainers. Below are examples of standard dataset formats for different tasks: ```python # Language modeling language_modeling_example = {"text": "The sky is blue."} # Preference preference_example = {"prompt": "The sky is", "chosen": " blue.", "rejected": " green."} # Unpaired preference unpaired_preference_example = {"prompt": "The sky is", "completion": " blue.", "label": True} ``` #### Conversational Conversational datasets are used for tasks involving dialogues or chat interactions between users and assistants. Unlike standard dataset formats, these contain sequences of messages where each message has a `role` (e.g., `"user"` or `"assistant"`) and `content` (the message text). ```python messages = [ {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] ``` Just like standard datasets, the columns in conversational datasets vary depending on the task. Below are examples of conversational dataset formats for different tasks: ```python # Prompt-completion prompt_completion_example = {"prompt": [{"role": "user", "content": "What color is the sky?"}], "completion": [{"role": "assistant", "content": "It is blue."}]} # Preference preference_example = { "prompt": [{"role": "user", "content": "What color is the sky?"}], "chosen": [{"role": "assistant", "content": "It is blue."}], "rejected": [{"role": "assistant", "content": "It is green."}], } ``` Conversational datasets are useful for training chat models, but must be converted into a standard format before being used with TRL trainers. This is typically done using chat templates specific to the model being used. For more information, refer to the [Working with conversational datasets in TRL](#working-with-conversational-datasets-in-trl) section. #### Tool Calling Some chat templates support *tool calling*, which allows the model to interact with external functions—referred to as **tools**—during generation. This extends the conversational capabilities of the model by enabling it to output a `"tool_calls"` field instead of a standard `"content"` message whenever it decides to invoke a tool. After the assistant initiates a tool call, the tool executes and returns its output. The assistant can then process this output and continue the conversation accordingly. Here’s a simple example of a tool-calling interaction: ```python messages = [ {"role": "user", "content": "Turn on the living room lights."}, {"role": "assistant", "tool_calls": [ {"type": "function", "function": { "name": "control_light", "arguments": {"room": "living room", "state": "on"} }}] }, {"role": "tool", "name": "control_light", "content": "The lights in the living room are now on."}, {"role": "assistant", "content": "Done!"} ] ``` When preparing datasets for Supervised Fine-Tuning (SFT) with tool calling, it is important that your dataset includes an additional column named `tools`. This column contains the list of available tools for the model, which is usually used by the chat template to construct the system prompt. The tools must be specified in a codified JSON schema format. You can automatically generate this schema from Python function signatures using the [`~transformers.utils.get_json_schema`] utility: ```python from transformers.utils import get_json_schema def control_light(room: str, state: str) -> str: """ Controls the lights in a room. Args: room: The name of the room. state: The desired state of the light ("on" or "off"). Returns: str: A message indicating the new state of the lights. """ return f"The lights in {room} are now {state}." # Generate JSON schema json_schema = get_json_schema(control_light) ``` The generated schema would look like: ```python { "type": "function", "function": { "name": "control_light", "description": "Controls the lights in a room.", "parameters": { "type": "object", "properties": { "room": {"type": "string", "description": "The name of the room."}, "state": {"type": "string", "description": 'The desired state of the light ("on" or "off").'}, }, "required": ["room", "state"], }, "return": {"type": "string", "description": "str: A message indicating the new state of the lights."}, }, } ``` A complete dataset entry for SFT might look like: ```python {"messages": messages, "tools": [json_schema]} ``` For more detailed information on tool calling, refer to the [Tool Calling section in the `transformers` documentation](https://huggingface.co/docs/transformers/chat_extras#tools-and-rag) and the blog post [Tool Use, Unified](https://huggingface.co/blog/unified-tool-use). ### Harmony The [Harmony response format](https://cookbook.openai.com/articles/openai-harmony) was introduced with the [OpenAI GPT OSS models](https://huggingface.co/collections/openai/gpt-oss-68911959590a1634ba11c7a4). It extends the conversational format by adding richer structure for reasoning, function calls, and metadata about the model’s behavior. Key features include: - **Developer role** – Provides high level instructions (similar to a system prompt) and lists available tools. - **Channels** – Separate types of assistant output into distinct streams: - `analysis` – for internal reasoning, from the key `"thinking"` - `final` – for the user-facing answer, from the key `"content"` - `commentary` – for tool calls or meta notes - **Reasoning effort** – Signals how much thinking the model should show (e.g., `"low"`, `"medium"`, `"high"`). - **Model identity** – Explicitly defines the assistant’s persona. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b") messages = [ {"role": "developer", "content": "Use a friendly tone."}, {"role": "user", "content": "What is the meaning of life?"}, {"role": "assistant", "thinking": "Deep reflection...", "content": "The final answer is..."}, ] print( tokenizer.apply_chat_template( messages, tokenize=False, reasoning_effort="low", model_identity="You are HuggingGPT, a large language model trained by Hugging Face." ) ) ``` This produces: ```txt <|start|>system<|message|>You are HuggingGPT, a large language model trained by Hugging Face. Knowledge cutoff: 2024-06 Current date: 2025-08-03 Reasoning: low # Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions Use a friendly tone.<|end|><|start|>user<|message|>What is the meaning of life?<|end|><|start|>assistant<|channel|>analysis<|message|>Deep reflection...<|end|><|start|>assistant<|channel|>final<|message|>The final answer is...<|return|> ``` For full details on message structure, supported fields, and advanced usage, see the [Harmony documentation](https://cookbook.openai.com/articles/openai-harmony). ### Types #### Language modeling A language modeling dataset consists of a column `"text"` (or `"messages"` for conversational datasets) containing a full sequence of text. ```python # Standard format language_modeling_example = {"text": "The sky is blue."} # Conversational format language_modeling_example = {"messages": [ {"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."} ]} ``` #### Prompt-only In a prompt-only dataset, only the initial prompt (the question or partial sentence) is provided under the key `"prompt"`. The training typically involves generating completion based on this prompt, where the model learns to continue or complete the given input. ```python # Standard format prompt_only_example = {"prompt": "The sky is"} # Conversational format prompt_only_example = {"prompt": [{"role": "user", "content": "What color is the sky?"}]} ``` For examples of prompt-only datasets, refer to the [Prompt-only datasets collection](https://huggingface.co/collections/trl-lib/prompt-only-datasets-677ea25245d20252cea00368). <Tip> While both the prompt-only and language modeling types are similar, they differ in how the input is handled. In the prompt-only type, the prompt represents a partial input that expects the model to complete or continue, while in the language modeling type, the input is treated as a complete sentence or sequence. These two types are processed differently by TRL. Below is an example showing the difference in the output of the `apply_chat_template` function for each type: ```python from transformers import AutoTokenizer from trl import apply_chat_template tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct") # Example for prompt-only type prompt_only_example = {"prompt": [{"role": "user", "content": "What color is the sky?"}]} apply_chat_template(prompt_only_example, tokenizer) # Output: {'prompt': '<|user|>\nWhat color is the sky?<|end|>\n<|assistant|>\n'} # Example for language modeling type lm_example = {"messages": [{"role": "user", "content": "What color is the sky?"}]} apply_chat_template(lm_example, tokenizer) # Output: {'text': '<|user|>\nWhat color is the sky?<|end|>\n<|endoftext|>'} ``` - The prompt-only output includes a `'<|assistant|>\n'`, indicating the beginning of the assistant’s turn and expecting the model to generate a completion. - In contrast, the language modeling output treats the input as a complete sequence and terminates it with `'<|endoftext|>'`, signaling the end of the text and not expecting any additional content. </Tip> #### Prompt-completion A prompt-completion dataset includes a `"prompt"` and a `"completion"`. ```python # Standard format prompt_completion_example = {"prompt": "The sky is", "completion": " blue."} # Conversational format prompt_completion_example = {"prompt": [{"role": "user", "content": "What color is the sky?"}], "completion": [{"role": "assistant", "content": "It is blue."}]} ``` For examples of prompt-completion datasets, refer to the [Prompt-completion datasets collection](https://huggingface.co/collections/trl-lib/prompt-completion-datasets-677ea2bb20bbb6bdccada216). #### Preference A preference dataset is used for tasks where the model is trained to choose between two or more possible completions to the same prompt. This dataset includes a `"prompt"`, a `"chosen"` completion, and a `"rejected"` completion. The model is trained to select the `"chosen"` response over the `"rejected"` response. Some datasets may not include the `"prompt"` column, in which case the prompt is implicit and directly included in the `"chosen"` and `"rejected"` completions. We recommend using explicit prompts whenever possible. ```python # Standard format ## Explicit prompt (recommended) preference_example = {"prompt": "The sky is", "chosen": " blue.", "rejected": " green."} # Implicit prompt preference_example = {"chosen": "The sky is blue.", "rejected": "The sky is green."} # Conversational format ## Explicit prompt (recommended) preference_example = {"prompt": [{"role": "user", "content": "What color is the sky?"}], "chosen": [{"role": "assistant", "content": "It is blue."}], "rejected": [{"role": "assistant", "content": "It is green."}]} ## Implicit prompt preference_example = {"chosen": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}], "rejected": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}]} ``` For examples of preference datasets, refer to the [Preference datasets collection](https://huggingface.co/collections/trl-lib/preference-datasets-677e99b581018fcad9abd82c). Some preference datasets can be found with [the tag `dpo` on Hugging Face Hub](https://huggingface.co/datasets?other=dpo). You can also explore the [librarian-bots' DPO Collections](https://huggingface.co/collections/librarian-bots/direct-preference-optimization-datasets-66964b12835f46289b6ef2fc) to identify preference datasets. #### Unpaired preference An unpaired preference dataset is similar to a preference dataset but instead of having `"chosen"` and `"rejected"` completions for the same prompt, it includes a single `"completion"` and a `"label"` indicating whether the completion is preferred or not. ```python # Standard format unpaired_preference_example = {"prompt": "The sky is", "completion": " blue.", "label": True} # Conversational format unpaired_preference_example = {"prompt": [{"role": "user", "content": "What color is the sky?"}], "completion": [{"role": "assistant", "content": "It is blue."}], "label": True} ``` For examples of unpaired preference datasets, refer to the [Unpaired preference datasets collection](https://huggingface.co/collections/trl-lib/unpaired-preference-datasets-677ea22bf5f528c125b0bcdf). #### Stepwise supervision A stepwise (or process) supervision dataset is similar to an [unpaired preference](#unpaired-preference) dataset but includes multiple steps of completions, each with its own label. This structure is useful for tasks that need detailed, step-by-step labeling, such as reasoning tasks. By evaluating each step separately and providing targeted labels, this approach helps identify precisely where the reasoning is correct and where errors occur, allowing for targeted feedback on each part of the reasoning process. ```python stepwise_example = { "prompt": "Which number is larger, 9.8 or 9.11?", "completions": ["The fractional part of 9.8 is 0.8, while the fractional part of 9.11 is 0.11.", "Since 0.11 is greater than 0.8, the number 9.11 is larger than 9.8."], "labels": [True, False] } ``` For examples of stepwise supervision datasets, refer to the [Stepwise supervision datasets collection](https://huggingface.co/collections/trl-lib/stepwise-supervision-datasets-677ea27fd4c5941beed7a96e). ## Which dataset type to use? Choosing the right dataset type depends on the task you are working on and the specific requirements of the TRL trainer you are using. Below is a brief overview of the dataset types supported by each TRL trainer. | Trainer | Expected dataset type | | ----------------------- | ------------------------------------------------------------------------------------------------------ | | [`BCOTrainer`] | [Unpaired preference](#unpaired-preference) or [Preference (explicit prompt recommended)](#preference) | | [`CPOTrainer`] | [Preference (explicit prompt recommended)](#preference) | | [`DPOTrainer`] | [Preference (explicit prompt recommended)](#preference) | | [`GKDTrainer`] | [Prompt-completion](#prompt-completion) | | [`GRPOTrainer`] | [Prompt-only](#prompt-only) | | [`IterativeSFTTrainer`] | [Unpaired preference](#unpaired-preference) | | [`KTOTrainer`] | [Unpaired preference](#unpaired-preference) or [Preference (explicit prompt recommended)](#preference) | | [`NashMDTrainer`] | [Prompt-only](#prompt-only) | | [`OnlineDPOTrainer`] | [Prompt-only](#prompt-only) | | [`ORPOTrainer`] | [Preference (explicit prompt recommended)](#preference) | | [`PPOTrainer`] | Tokenized language modeling | | [`PRMTrainer`] | [Stepwise supervision](#stepwise-supervision) | | [`RewardTrainer`] | [Preference (implicit prompt recommended)](#preference) | | [`SFTTrainer`] | [Language modeling](#language-modeling) or [Prompt-completion](#prompt-completion) | | [`XPOTrainer`] | [Prompt-only](#prompt-only) | <Tip> TRL trainers only support standard dataset formats, [for now](https://github.com/huggingface/trl/issues/2071). If you have a conversational dataset, you must first convert it into a standard format. For more information on how to work with conversational datasets, refer to the [Working with conversational datasets in TRL](#working-with-conversational-datasets-in-trl) section. </Tip> ## Working with conversational datasets in TRL Conversational datasets are increasingly common, especially for training chat models. However, some TRL trainers don't support conversational datasets in their raw format. (For more information, see [issue #2071](https://github.com/huggingface/trl/issues/2071).) These datasets must first be converted into a standard format. Fortunately, TRL offers tools to easily handle this conversion, which are detailed below. ### Converting a conversational dataset into a standard dataset To convert a conversational dataset into a standard dataset, you need to _apply a chat template_ to the dataset. A chat template is a predefined structure that typically includes placeholders for user and assistant messages. This template is provided by the tokenizer of the model you use. For detailed instructions on using chat templating, refer to the [Chat templating section in the `transformers` documentation](https://huggingface.co/docs/transformers/en/chat_templating). In TRL, the method you apply to convert the dataset will vary depending on the task. Fortunately, TRL provides a helper function called [`apply_chat_template`] to simplify this process. Here's an example of how to use it: ```python from transformers import AutoTokenizer from trl import apply_chat_template tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct") example = { "prompt": [{"role": "user", "content": "What color is the sky?"}], "completion": [{"role": "assistant", "content": "It is blue."}] } apply_chat_template(example, tokenizer) # Output: # {'prompt': '<|user|>\nWhat color is the sky?<|end|>\n<|assistant|>\n', 'completion': 'It is blue.<|end|>\n<|endoftext|>'} ``` Alternatively, you can use the [`~datasets.Dataset.map`] method to apply the template across an entire dataset: ```python from datasets import Dataset from trl import apply_chat_template dataset_dict = { "prompt": [[{"role": "user", "content": "What color is the sky?"}], [{"role": "user", "content": "Where is the sun?"}]], "completion": [[{"role": "assistant", "content": "It is blue."}], [{"role": "assistant", "content": "In the sky."}]] } dataset = Dataset.from_dict(dataset_dict) dataset = dataset.map(apply_chat_template, fn_kwargs={"tokenizer": tokenizer}) # Output: # {'prompt': ['<|user|>\nWhat color is the sky?<|end|>\n<|assistant|>\n', # '<|user|>\nWhere is the sun?<|end|>\n<|assistant|>\n'], # 'completion': ['It is blue.<|end|>\n<|endoftext|>', 'In the sky.<|end|>\n<|endoftext|>']} ``` <Tip warning={true}> We recommend using the [`apply_chat_template`] function instead of calling `tokenizer.apply_chat_template` directly. Handling chat templates for non-language modeling datasets can be tricky and may result in errors, such as mistakenly placing a system prompt in the middle of a conversation. For additional examples, see [#1930 (comment)](https://github.com/huggingface/trl/pull/1930#issuecomment-2292908614). The [`apply_chat_template`] is designed to handle these intricacies and ensure the correct application of chat templates for various tasks. </Tip> <Tip warning={true}> It's important to note that chat templates are model-specific. For example, if you use the chat template from [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) with the above example, you get a different output: ```python apply_chat_template(example, AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")) # Output: # {'prompt': '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nWhat color is the sky?<|im_end|>\n<|im_start|>assistant\n', # 'completion': 'It is blue.<|im_end|>\n'} ``` Always use the chat template associated with the model you're working with. Using the wrong template can lead to inaccurate or unexpected results. </Tip> ## Using any dataset with TRL: preprocessing and conversion Many datasets come in formats tailored to specific tasks, which might not be directly compatible with TRL. To use such datasets with TRL, you may need to preprocess and convert them into the required format. To make this easier, we provide a set of [example scripts](https://github.com/huggingface/trl/tree/main/examples/datasets) that cover common dataset conversions. ### Example: UltraFeedback dataset Let’s take the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback) as an example. Here's a preview of the dataset: <iframe src="https://huggingface.co/datasets/openbmb/UltraFeedback/embed/viewer/default/train" frameborder="0" width="100%" height="560px" ></iframe> As shown above, the dataset format does not match the expected structure. It’s not in a conversational format, the column names differ, and the results pertain to different models (e.g., Bard, GPT-4) and aspects (e.g., "helpfulness", "honesty"). By using the provided conversion script [`examples/datasets/ultrafeedback.py`](https://github.com/huggingface/trl/tree/main/examples/datasets/ultrafeedback.py), you can transform this dataset into an unpaired preference type, and push it to the Hub: ```sh python examples/datasets/ultrafeedback.py --push_to_hub --repo_id trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness ``` Once converted, the dataset will look like this: <iframe src="https://huggingface.co/datasets/trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness/embed/viewer/default/train?row=0" frameborder="0" width="100%" height="560px" ></iframe> Now, you can use this dataset with TRL! By adapting the provided scripts or creating your own, you can convert any dataset into a format compatible with TRL. ## Utilities for converting dataset types This section provides example code to help you convert between different dataset types. While some conversions can be performed after applying the chat template (i.e., in the standard format), we recommend performing the conversion before applying the chat template to ensure it works consistently. For simplicity, some of the examples below do not follow this recommendation and use the standard format. However, the conversions can be applied directly to the conversational format without modification. | From \ To | Language modeling | Prompt-completion | Prompt-only | Preference with implicit prompt | Preference | Unpaired preference | Stepwise supervision | | ------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------- | --------------------------------------------------------- | --------------------------------------------------------- | ------------------------------------------------------------------------- | -------------------- | | Language modeling | N/A | N/A | N/A | N/A | N/A | N/A | N/A | | Prompt-completion | [🔗](#from-prompt-completion-to-language-modeling-dataset) | N/A | [🔗](#from-prompt-completion-to-prompt-only-dataset) | N/A | N/A | N/A | N/A | | Prompt-only | N/A | N/A | N/A | N/A | N/A | N/A | N/A | | Preference with implicit prompt | [🔗](#from-preference-with-implicit-prompt-to-language-modeling-dataset) | [🔗](#from-preference-with-implicit-prompt-to-prompt-completion-dataset) | [🔗](#from-preference-with-implicit-prompt-to-prompt-only-dataset) | N/A | [🔗](#from-implicit-to-explicit-prompt-preference-dataset) | [🔗](#from-preference-with-implicit-prompt-to-unpaired-preference-dataset) | N/A | | Preference | [🔗](#from-preference-to-language-modeling-dataset) | [🔗](#from-preference-to-prompt-completion-dataset) | [🔗](#from-preference-to-prompt-only-dataset) | [🔗](#from-explicit-to-implicit-prompt-preference-dataset) | N/A | [🔗](#from-preference-to-unpaired-preference-dataset) | N/A | | Unpaired preference | [🔗](#from-unpaired-preference-to-language-modeling-dataset) | [🔗](#from-unpaired-preference-to-prompt-completion-dataset) | [🔗](#from-unpaired-preference-to-prompt-only-dataset) | N/A | N/A | N/A | N/A | | Stepwise supervision | [🔗](#from-stepwise-supervision-to-language-modeling-dataset) | [🔗](#from-stepwise-supervision-to-prompt-completion-dataset) | [🔗](#from-stepwise-supervision-to-prompt-only-dataset) | N/A | N/A | [🔗](#from-stepwise-supervision-to-unpaired-preference-dataset) | N/A | ### From prompt-completion to language modeling dataset To convert a prompt-completion dataset into a language modeling dataset, concatenate the prompt and the completion. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is"], "completion": [" blue.", " in the sky."], }) def concat_prompt_completion(example): return {"text": example["prompt"] + example["completion"]} dataset = dataset.map(concat_prompt_completion, remove_columns=["prompt", "completion"]) ``` ```python >>> dataset[0] {'text': 'The sky is blue.'} ``` ### From prompt-completion to prompt-only dataset To convert a prompt-completion dataset into a prompt-only dataset, remove the completion. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is"], "completion": [" blue.", " in the sky."], }) dataset = dataset.remove_columns("completion") ``` ```python >>> dataset[0] {'prompt': 'The sky is'} ``` ### From preference with implicit prompt to language modeling dataset To convert a preference with implicit prompt dataset into a language modeling dataset, remove the rejected, and rename the column `"chosen"` to `"text"`. ```python from datasets import Dataset dataset = Dataset.from_dict({ "chosen": ["The sky is blue.", "The sun is in the sky."], "rejected": ["The sky is green.", "The sun is in the sea."], }) dataset = dataset.rename_column("chosen", "text").remove_columns("rejected") ``` ```python >>> dataset[0] {'text': 'The sky is blue.'} ``` ### From preference with implicit prompt to prompt-completion dataset To convert a preference dataset with implicit prompt into a prompt-completion dataset, extract the prompt with [`extract_prompt`], remove the rejected, and rename the column `"chosen"` to `"completion"`. ```python from datasets import Dataset from trl import extract_prompt dataset = Dataset.from_dict({ "chosen": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sky."}], ], "rejected": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sea."}], ], }) dataset = dataset.map(extract_prompt).remove_columns("rejected").rename_column("chosen", "completion") ``` ```python >>> dataset[0] {'prompt': [{'role': 'user', 'content': 'What color is the sky?'}], 'completion': [{'role': 'assistant', 'content': 'It is blue.'}]} ``` ### From preference with implicit prompt to prompt-only dataset To convert a preference dataset with implicit prompt into a prompt-only dataset, extract the prompt with [`extract_prompt`], and remove the rejected and the chosen. ```python from datasets import Dataset from trl import extract_prompt dataset = Dataset.from_dict({ "chosen": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sky."}], ], "rejected": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sea."}], ], }) dataset = dataset.map(extract_prompt).remove_columns(["chosen", "rejected"]) ``` ```python >>> dataset[0] {'prompt': [{'role': 'user', 'content': 'What color is the sky?'}]} ``` ### From implicit to explicit prompt preference dataset To convert a preference dataset with implicit prompt into a preference dataset with explicit prompt, extract the prompt with [`extract_prompt`]. ```python from datasets import Dataset from trl import extract_prompt dataset = Dataset.from_dict({ "chosen": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sky."}], ], "rejected": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sea."}], ], }) dataset = dataset.map(extract_prompt) ``` ```python >>> dataset[0] {'prompt': [{'role': 'user', 'content': 'What color is the sky?'}], 'chosen': [{'role': 'assistant', 'content': 'It is blue.'}], 'rejected': [{'role': 'assistant', 'content': 'It is green.'}]} ``` ### From preference with implicit prompt to unpaired preference dataset To convert a preference dataset with implicit prompt into an unpaired preference dataset, extract the prompt with [`extract_prompt`], and unpair the dataset with [`unpair_preference_dataset`]. ```python from datasets import Dataset from trl import extract_prompt, unpair_preference_dataset dataset = Dataset.from_dict({ "chosen": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sky."}], ], "rejected": [ [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}], [{"role": "user", "content": "Where is the sun?"}, {"role": "assistant", "content": "In the sea."}], ], }) dataset = dataset.map(extract_prompt) dataset = unpair_preference_dataset(dataset) ``` ```python >>> dataset[0] {'prompt': [{'role': 'user', 'content': 'What color is the sky?'}], 'completion': [{'role': 'assistant', 'content': 'It is blue.'}], 'label': True} ``` <Tip warning={true}> Keep in mind that the `"chosen"` and `"rejected"` completions in a preference dataset can be both good or bad. Before applying [`unpair_preference_dataset`], please ensure that all `"chosen"` completions can be labeled as good and all `"rejected"` completions as bad. This can be ensured by checking absolute rating of each completion, e.g. from a reward model. </Tip> ### From preference to language modeling dataset To convert a preference dataset into a language modeling dataset, remove the rejected, concatenate the prompt and the chosen into the `"text"` column. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is"], "chosen": [" blue.", " in the sky."], "rejected": [" green.", " in the sea."], }) def concat_prompt_chosen(example): return {"text": example["prompt"] + example["chosen"]} dataset = dataset.map(concat_prompt_chosen, remove_columns=["prompt", "chosen", "rejected"]) ``` ```python >>> dataset[0] {'text': 'The sky is blue.'} ``` ### From preference to prompt-completion dataset To convert a preference dataset into a prompt-completion dataset, remove the rejected, and rename the column `"chosen"` to `"completion"`. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is"], "chosen": [" blue.", " in the sky."], "rejected": [" green.", " in the sea."], }) dataset = dataset.remove_columns("rejected").rename_column("chosen", "completion") ``` ```python >>> dataset[0] {'prompt': 'The sky is', 'completion': ' blue.'} ``` ### From preference to prompt-only dataset To convert a preference dataset into a prompt-only dataset, remove the rejected and the chosen. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is"], "chosen": [" blue.", " in the sky."], "rejected": [" green.", " in the sea."], }) dataset = dataset.remove_columns(["chosen", "rejected"]) ``` ```python >>> dataset[0] {'prompt': 'The sky is'} ``` ### From explicit to implicit prompt preference dataset To convert a preference dataset with explicit prompt into a preference dataset with implicit prompt, concatenate the prompt to both chosen and rejected, and remove the prompt. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": [ [{"role": "user", "content": "What color is the sky?"}], [{"role": "user", "content": "Where is the sun?"}], ], "chosen": [ [{"role": "assistant", "content": "It is blue."}], [{"role": "assistant", "content": "In the sky."}], ], "rejected": [ [{"role": "assistant", "content": "It is green."}], [{"role": "assistant", "content": "In the sea."}], ], }) def concat_prompt_to_completions(example): return {"chosen": example["prompt"] + example["chosen"], "rejected": example["prompt"] + example["rejected"]} dataset = dataset.map(concat_prompt_to_completions, remove_columns="prompt") ``` ```python >>> dataset[0] {'chosen': [{'role': 'user', 'content': 'What color is the sky?'}, {'role': 'assistant', 'content': 'It is blue.'}], 'rejected': [{'role': 'user', 'content': 'What color is the sky?'}, {'role': 'assistant', 'content': 'It is green.'}]} ``` ### From preference to unpaired preference dataset To convert dataset into an unpaired preference dataset, unpair the dataset with [`unpair_preference_dataset`]. ```python from datasets import Dataset from trl import unpair_preference_dataset dataset = Dataset.from_dict({ "prompt": [ [{"role": "user", "content": "What color is the sky?"}], [{"role": "user", "content": "Where is the sun?"}], ], "chosen": [ [{"role": "assistant", "content": "It is blue."}], [{"role": "assistant", "content": "In the sky."}], ], "rejected": [ [{"role": "assistant", "content": "It is green."}], [{"role": "assistant", "content": "In the sea."}], ], }) dataset = unpair_preference_dataset(dataset) ``` ```python >>> dataset[0] {'prompt': [{'role': 'user', 'content': 'What color is the sky?'}], 'completion': [{'role': 'assistant', 'content': 'It is blue.'}], 'label': True} ``` <Tip warning={true}> Keep in mind that the `"chosen"` and `"rejected"` completions in a preference dataset can be both good or bad. Before applying [`unpair_preference_dataset`], please ensure that all `"chosen"` completions can be labeled as good and all `"rejected"` completions as bad. This can be ensured by checking absolute rating of each completion, e.g. from a reward model. </Tip> ### From unpaired preference to language modeling dataset To convert an unpaired preference dataset into a language modeling dataset, concatenate prompts with good completions into the `"text"` column, and remove the prompt, completion and label columns. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is", "The sky is", "The sun is"], "completion": [" blue.", " in the sky.", " green.", " in the sea."], "label": [True, True, False, False], }) def concatenate_prompt_completion(example): return {"text": example["prompt"] + example["completion"]} dataset = dataset.filter(lambda x: x["label"]).map(concatenate_prompt_completion).remove_columns(["prompt", "completion", "label"]) ``` ```python >>> dataset[0] {'text': 'The sky is blue.'} ``` ### From unpaired preference to prompt-completion dataset To convert an unpaired preference dataset into a prompt-completion dataset, filter for good labels, then remove the label columns. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is", "The sky is", "The sun is"], "completion": [" blue.", " in the sky.", " green.", " in the sea."], "label": [True, True, False, False], }) dataset = dataset.filter(lambda x: x["label"]).remove_columns(["label"]) ``` ```python >>> dataset[0] {'prompt': 'The sky is', 'completion': ' blue.'} ``` ### From unpaired preference to prompt-only dataset To convert an unpaired preference dataset into a prompt-only dataset, remove the completion and the label columns. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["The sky is", "The sun is", "The sky is", "The sun is"], "completion": [" blue.", " in the sky.", " green.", " in the sea."], "label": [True, True, False, False], }) dataset = dataset.remove_columns(["completion", "label"]) ``` ```python >>> dataset[0] {'prompt': 'The sky is'} ``` ### From stepwise supervision to language modeling dataset To convert a stepwise supervision dataset into a language modeling dataset, concatenate prompts with good completions into the `"text"` column. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["Blue light", "Water"], "completions": [[" scatters more in the atmosphere,", " so the sky is green."], [" forms a less dense structure in ice,", " which causes it to expand when it freezes."]], "labels": [[True, False], [True, True]], }) def concatenate_prompt_completions(example): completion = "".join(example["completions"]) return {"text": example["prompt"] + completion} dataset = dataset.filter(lambda x: all(x["labels"])).map(concatenate_prompt_completions, remove_columns=["prompt", "completions", "labels"]) ``` ```python >>> dataset[0] {'text': 'Blue light scatters more in the atmosphere, so the sky is green.'} ``` ### From stepwise supervision to prompt-completion dataset To convert a stepwise supervision dataset into a prompt-completion dataset, join the good completions and remove the labels. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["Blue light", "Water"], "completions": [[" scatters more in the atmosphere,", " so the sky is green."], [" forms a less dense structure in ice,", " which causes it to expand when it freezes."]], "labels": [[True, False], [True, True]], }) def join_completions(example): completion = "".join(example["completions"]) return {"completion": completion} dataset = dataset.filter(lambda x: all(x["labels"])).map(join_completions, remove_columns=["completions", "labels"]) ``` ```python >>> dataset[0] {'prompt': 'Blue light', 'completion': ' scatters more in the atmosphere, so the sky is green.'} ``` ### From stepwise supervision to prompt-only dataset To convert a stepwise supervision dataset into a prompt-only dataset, remove the completions and the labels. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["Blue light", "Water"], "completions": [[" scatters more in the atmosphere,", " so the sky is green."], [" forms a less dense structure in ice,", " which causes it to expand when it freezes."]], "labels": [[True, False], [True, True]], }) dataset = dataset.remove_columns(["completions", "labels"]) ``` ```python >>> dataset[0] {'prompt': 'Blue light'} ``` ### From stepwise supervision to unpaired preference dataset To convert a stepwise supervision dataset into an unpaired preference dataset, join the completions and merge the labels. The method for merging the labels depends on the specific task. In this example, we use the logical AND operation. This means that if the step labels indicate the correctness of individual steps, the resulting label will reflect the correctness of the entire sequence. ```python from datasets import Dataset dataset = Dataset.from_dict({ "prompt": ["Blue light", "Water"], "completions": [[" scatters more in the atmosphere,", " so the sky is green."], [" forms a less dense structure in ice,", " which causes it to expand when it freezes."]], "labels": [[True, False], [True, True]], }) def merge_completions_and_labels(example): return {"prompt": example["prompt"], "completion": "".join(example["completions"]), "label": all(example["labels"])} dataset = dataset.map(merge_completions_and_labels, remove_columns=["completions", "labels"]) ``` ```python >>> dataset[0] {'prompt': 'Blue light', 'completion': ' scatters more in the atmosphere, so the sky is green.', 'label': False} ``` ## Vision datasets Some trainers also support fine-tuning vision-language models (VLMs) using image-text pairs. In this scenario, it's recommended to use a conversational format, as each model handles image placeholders in text differently. A conversational vision dataset differs from a standard conversational dataset in two key ways: 1. The dataset must contain the key `images` with the image data. 2. The `"content"` field in messages must be a list of dictionaries, where each dictionary specifies the type of data: `"image"` or `"text"`. Example: ```python # Textual dataset: "content": "What color is the sky?" # Vision dataset: "content": [ {"type": "image"}, {"type": "text", "text": "What color is the sky in the image?"} ] ``` An example of a conversational vision dataset is the [openbmb/RLAIF-V-Dataset](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset). Below is an embedded view of the dataset's training data, allowing you to explore it directly: <iframe src="https://huggingface.co/datasets/trl-lib/rlaif-v/embed/viewer/default/train" frameborder="0" width="100%" height="560px" ></iframe>
trl/docs/source/dataset_formats.md/0
{ "file_path": "trl/docs/source/dataset_formats.md", "repo_id": "trl", "token_count": 18062 }
625
# Logging As reinforcement learning algorithms are historically challenging to debug, it's important to pay careful attention to logging. By default, TRL trainers like [`PPOTrainer`] and [`GRPOTrainer`] save a lot of relevant information to supported experiment trackers like Weights & Biases (wandb) or TensorBoard. Upon initialization, pass the `report_to` argument to the respective configuration object (e.g., [`PPOConfig`] for `PPOTrainer`, or [`GRPOConfig`] for `GRPOTrainer`): ```python # For PPOTrainer ppo_config = PPOConfig( # ..., report_to="wandb" # or "tensorboard" ) # For GRPOTrainer grpc_config = GRPOConfig( # ..., report_to="wandb" # or "tensorboard" ) ``` If you want to log with TensorBoard, you might also need to specify logging directories, for example, by adding `logging_dir=PATH_TO_LOGS` to the configuration object (e.g., `PPOConfig` or `GRPOConfig`). ## PPO Logging Here's a brief explanation for the logged metrics provided in the data: * `eps`: Tracks the number of episodes per second. * `objective/kl`: The mean Kullback-Leibler (KL) divergence between the current policy and reference policy. * `objective/entropy`: The mean entropy of the policy, indicating the randomness of the actions chosen by the policy. * `objective/non_score_reward`: The mean reward from non-score-related sources, basically `beta * kl.sum(1)`, where `beta` is the KL penalty coefficient and `kl` is the per-token KL divergence. * `objective/rlhf_reward`: The mean RLHF reward, which is `score - non_score_reward`. * `objective/scores`: The mean scores returned by the reward model / environment. * `policy/approxkl_avg`: The average approximate KL divergence between consecutive PPO policies. Note that this is not the same as `objective/kl`. * `policy/clipfrac_avg`: The average fraction of policy updates that are clipped, indicating how often the policy updates are constrained to prevent large changes. * `loss/policy_avg`: The average policy loss, indicating how well the policy is performing. * `loss/value_avg`: The average value loss, indicating the difference between the predicted value and the actual reward. * `val/clipfrac_avg`: The average fraction of value function updates that are clipped, similar to `policy/clipfrac_avg` but for the value function. * `policy/entropy_avg`: The average entropy of the policy during training, indicating how diverse the policy's actions are. * `val/ratio`: The mean ratio of the current policy probability to the old policy probability, providing a measure of how much the policy has changed. * `val/ratio_var`: The variance of the `val/ratio`, indicating the variability in policy changes. * `val/num_eos_tokens`: The number of end-of-sequence (EOS) tokens generated, which can indicate the number of complete responses. * `lr`: The current learning rate used by the optimizer. * `episode`: The current episode count in the training process. ### Crucial values During training, many values are logged, here are the most important ones: 1. `objective/scores`: The mean scores returned by the reward model / environment. 1. `objective/rlhf_reward`: The mean RLHF reward. This is the ultimate objective of the RLHF training. If training works as intended, this metric should keep going up. 1. `objective/non_score_reward`: The mean reward from non-score-related sources (e.g., KL penalty). Here are some parameters that are useful to monitor for stability (when these diverge or collapse to 0, try tuning variables): 1. `loss/value_avg`: The average value loss. It will spike / NaN when not going well. 1. `val/ratio`: The mean ratio of the current policy probability to the old policy probability. This number should float around 1.0. If this `ratio` is too high (e.g., 2.0 or 1000.0) or too small (e.g., 0.1), it means the updates between consecutive policies are too drastic. 1. `policy/clipfrac_avg` and `policy/approxkl_avg`: If `val/ratio` is too high, the `ratio` is going to get clipped, resulting in high `policy/clipfrac_avg` and high `policy/approxkl_avg` as well. 1. `objective/kl`: The mean KL divergence. It should stay positive and ideally not too large, so that the policy is not too far away from the reference policy. ## GRPO Logging Here's a brief explanation for the logged metrics provided in the data for the GRPO trainer: * `num_tokens`: Total number of input tokens processed during training so far. #### Completions * `completions/mean_length`: Mean length of all generated completions (including those not ending with an EOS token). * `completions/min_length`: Minimum length among all generated completions. * `completions/max_length`: Maximum length among all generated completions. * `completions/clipped_ratio`: The ratio of completions that did not end with an EOS token before reaching the maximum generation length (i.e., they were truncated). * `completions/mean_terminated_length`: Mean length of only those completions that successfully ended with an EOS token. * `completions/min_terminated_length`: Minimum length among completions that ended with an EOS token. * `completions/max_terminated_length`: Maximum length among completions that ended with an EOS token. #### Rewards * `rewards/{reward_func_name}/mean`: The mean reward obtained from a specific, named reward function (e.g., `rewards/my_custom_reward/mean`). This is logged for each reward function used. * `rewards/{reward_func_name}/std`: The standard deviation of rewards from a specific, named reward function. * `reward`: The overall mean of the (potentially weighted and, if `args.scale_rewards` is true, normalized) rewards, after group-wise normalization (advantages). * `reward_std`: The standard deviation of the (potentially weighted) rewards *before* group-wise normalization for advantages. #### Policy and Loss Metrics * `kl`: The mean Kullback-Leibler (KL) divergence between the current policy and the reference policy. This is logged only if `beta` (the KL coefficient in `GRPOConfig`) is non-zero. * `entropy`: Average entropy of token predictions across generated completions. * If Liger GRPOLoss is used (`use_liger_loss: True` in `GRPOConfig`): * `clip_ratio`: The fraction of policy updates where the probability ratio was clipped according to the GRPO loss's epsilon bounds. * If standard GRPOLoss is used (`use_liger_loss: False`): * `clip_ratio/low_mean`: The mean fraction of instances where the probability ratio `r_t(θ)` was clipped at the lower bound `1 - epsilon_low` (occurs when advantage is negative and ratio is below the bound). * `clip_ratio/low_min`: The minimum observed fraction for `clip_ratio/low_mean` across batches/processes. * `clip_ratio/high_mean`: The mean fraction of instances where the probability ratio `r_t(θ)` was clipped at the upper bound `1 + epsilon_high` (occurs when advantage is positive and ratio is above the bound). * `clip_ratio/high_max`: The maximum observed fraction for `clip_ratio/high_mean` across batches/processes. * `clip_ratio/region_mean`: The mean fraction of instances where the probability ratio was clipped at either the lower or upper bound. ### Crucial GRPO values During GRPO training, monitor these values for insights into performance and stability: 1. `reward`: This is the primary objective. It reflects the (group-wise normalized) rewards the policy is achieving. It should generally increase during successful training. 1. `kl`: If `beta > 0`, this tracks the divergence from the reference model. Keep an eye on it to ensure the policy doesn't stray too far, which can lead to instability. 1. `clip_ratio/*` (either `clip_ratio` for Liger loss or the more detailed `clip_ratio/...` metrics for standard loss): These indicate how often the policy updates are being constrained by the GRPO clipping mechanism. Very high values might suggest that the policy is trying to change too drastically (potentially due to large advantages or a learning rate that's too high) or that the epsilon clipping range is too restrictive. 1. `completions/clipped_ratio`: A high ratio here indicates that the model is frequently generating completions that are cut off by `max_completion_length` rather than naturally ending with an EOS token. This might suggest issues with learning sequence termination or that `max_completion_length` is too short. 1. `rewards/{reward_func_name}/mean`: Monitoring the mean of individual reward functions can help diagnose which aspects of the desired behavior the model is learning or struggling with, especially when using multiple reward sources. 1. `entropy`: Measures how uncertain the policy is in its action choices, higher entropy suggests more exploration. A collapse in entropy means the policy is becoming overconfident and deterministic, often too early. This can stall learning by reducing exploration and making updates overly biased. Stable but non-zero entropy is usually a sign that the policy retains flexibility and continues to explore.
trl/docs/source/logging.md/0
{ "file_path": "trl/docs/source/logging.md", "repo_id": "trl", "token_count": 2445 }
626
# RLOO Trainer [![](https://img.shields.io/badge/All_models-RLOO-blue)](https://huggingface.co/models?other=rloo,trl) TRL supports training LLMs with REINFORCE Leave-One-Out (RLOO). The idea is that instead of using a value function, RLOO generates K completions for each prompt. For each completion, RLOO uses the mean scores from the other K-1 completions as a baseline to calculate the advantage. RLOO also models the entire completion as a single action, whereas PPO models each token as an action. Note that REINFORCE / A2C is a special case of PPO, when the number of PPO epochs is 1 and the number of mini-batches is 1, which is how we implement RLOO in TRL. References: - [Back to Basics: Revisiting REINFORCE Style Optimization for Learning from Human Feedback in LLMs](https://huggingface.co/papers/2402.14740) - [A2C is a special case of PPO](https://huggingface.co/papers/2205.09123) - [Fine-Tuning Language Models from Human Preferences](https://github.com/openai/lm-human-preferences) - [Learning to Summarize from Human Feedback](https://github.com/openai/summarize-from-feedback) - [The N Implementation Details of RLHF with PPO](https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo) - [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031) ## Get started To just run a RLOO script to make sure the trainer can run, you can run the following command to train a RLOO model with a dummy reward model. ```bash python examples/scripts/rloo/rloo.py \ --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ --dataset_train_split descriptiveness \ --learning_rate 3e-6 \ --output_dir models/minimal/rloo \ --per_device_train_batch_size 64 \ --gradient_accumulation_steps 1 \ --total_episodes 10000 \ --model_name_or_path EleutherAI/pythia-14m \ --reward_model_path EleutherAI/pythia-14m \ --missing_eos_penalty 1.0 ``` ## Explanation of the logged metrics The logged metrics are as follows. Here is an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/u2sqci34) <!-- * `rlhf_reward_var_per_prompt`: calculated by `rlhf_reward.var(0).mean()`. This is the variance of the rewards estimated across the `args.rloo_k` samples. Usually we expect it to go down (cause policy entropy goes down). --> * `eps`: Tracks the number of episodes per second. * `objective/kl`: The mean Kullback-Leibler (KL) divergence between the current policy and reference policy. * `objective/entropy`: The mean entropy of the policy, indicating the randomness of the actions chosen by the policy. * `objective/non_score_reward`: The mean reward from non-score-related sources, basically `beta * kl.sum(1)`, where `beta` is the KL penalty coefficient and `kl` is the per-token KL divergence. * `objective/rlhf_reward`: The mean RLHF reward, which is `score - non_score_reward`. * `objective/scores`: The mean scores returned by the reward model / environment. * `policy/approxkl_avg`: The average approximate KL divergence between consecutive PPO policies. Note that this is not the same as `objective/kl`. * `policy/clipfrac_avg`: The average fraction of policy updates that are clipped, indicating how often the policy updates are constrained to prevent large changes. * `loss/policy_avg`: The average policy loss, indicating how well the policy is performing. * `val/clipfrac_avg`: The average fraction of value function updates that are clipped, similar to policy/clipfrac_avg but for the value function. * `policy/entropy_avg`: The average entropy of the policy during training, indicating how diverse the policy's actions are. * `val/ratio`: The mean ratio of the current policy probability to the old policy probability, providing a measure of how much the policy has changed. * `val/ratio_var`: The variance of the `val/ratio`, indicating the variability in policy changes. * `val/num_eos_tokens`: The number of end-of-sequence (EOS) tokens generated, which can indicate the number of complete responses. * `lr`: lr: The current learning rate used by the optimizer. * `episode`: episode: The current global step or episode count in the training process. ## Cookbook * Debugging TIP: `objective/rlhf_reward`: this is the ultimate objective of the RLHF training. If training works as intended, this metric should keep going up. * Debugging TIP: `val/ratio`: this number should float around 1.0, and it gets clipped by `--cliprange 0.2` with PPO's surrogate loss. So if this `ratio` is too high like 2.0 or 1000.0 or too small like 0.1, it means the updates between consecutive policies are too drastic. You should try understand why this is happening and try to fix it. * Memory TIP: If you are running out of memory, you can try to reduce the `--per_device_train_batch_size` or increase the `--gradient_accumulation_steps` to reduce the memory footprint. * Memory TIP: If you have multiple GPUs, you can also run training with DeepSpeed stage 3 to reduce the memory footprint `accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml`. * Usage TIP: We recommend to use the "EOS trick" via `--missing_eos_penalty`, which subtracts a static scalar penalty from the score of completions that do not end with an EOS token. This can help the model learn to generate more coherent completions. ## What is my model doing exactly? To help you understand what your model is doing, we periodically log some sample completions from the model. Here is an example of a completion. In an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/u2sqci34), it looks like the following, allowing you to see the model's response at different stages of training. By default we generate `--num_sample_generations 10` during training, but you can customize the number of generations. ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/ppov2_completions.gif) In the logs the sampled generations look like ``` ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓ ┃ query ┃ model response ┃ score ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩ │ SUBREDDIT: r/AskReddit │ I'm in love with a friend, and │ 3.921875 │ │ │ I don't know how to get rid of │ │ │ TITLE: How do you get someone │ those feelings. I'm │ │ │ out of your head? │ desperate.<|endoftext|>[PAD][P… │ │ │ │ │ │ │ POST: Hi, │ │ │ │ I'm 22, and I have been with my │ │ │ │ girlfriend for 5 years now. We │ │ │ │ recently moved together. We've │ │ │ │ always loved each other │ │ │ │ intensely. │ │ │ │ │ │ │ │ Problem, I recently started to │ │ │ │ have feelings for an other │ │ │ │ person (a friend). This person │ │ │ │ has had a boyfriend for now 3 │ │ │ │ years, and has absolutely no │ │ │ │ ideas. Those feelings were so │ │ │ │ strong, it was hard to hide │ │ │ │ them. After 2 months of me │ │ │ │ being distant and really sad, │ │ │ │ my girlfriend forced me to say │ │ │ │ what was bothering me. I'm not │ │ │ │ a good liar, and now she knows. │ │ │ │ │ │ │ │ We decided to give us a week │ │ │ │ alone, I went to my parents. │ │ │ │ │ │ │ │ Now, I'm completely lost. I │ │ │ │ keep on thinking about this │ │ │ │ person, and I hate that. I │ │ │ │ would like for those feelings │ │ │ │ to go away, to leave me alone. │ │ │ │ But I can't. │ │ │ │ │ │ │ │ What do I do? It's been 3 │ │ │ │ months now, and I'm just │ │ │ │ desperate. │ │ │ │ │ │ │ │ TL;DR: │ │ │ ├─────────────────────────────────┼─────────────────────────────────┼──────────┤ │ SUBREDDIT: r/pettyrevenge │ My mom woke me up with a loud │ 6.84375 │ │ │ TV. I blasted Gangnam Style on │ │ │ TITLE: So, my mom woke me up │ repeat, with the bass cranked │ │ │ with a loud TV. │ up as high as it could │ │ │ │ go.<|endoftext|>[PAD][PAD][PAD… │ │ │ POST: She was in her living │ │ │ │ room, watching TV. This was at │ │ │ │ about 8:30 in the morning, and │ │ │ │ she was exercising. She turned │ │ │ │ the TV up extra loud to hear it │ │ │ │ over her excercycle, and woke │ │ │ │ me up. I went in there asking │ │ │ │ for her to turn it down. She │ │ │ │ said she didn't have to; I │ │ │ │ explained that I always used │ │ │ │ headphones so she didn't have │ │ │ │ to deal with my noise and that │ │ │ │ she should give me a little │ │ │ │ more respect, given that I paid │ │ │ │ rent at the time. │ │ │ │ │ │ │ │ She disagreed. I went back to │ │ │ │ my room, rather pissed off at │ │ │ │ the lack of equality. I had no │ │ │ │ lock on my door; but I had a │ │ │ │ dresser right next to it, so I │ │ │ │ pulled one of the drawers out │ │ │ │ enough so that it caused the │ │ │ │ door to not be openable. Then, │ │ │ │ I turned my speakers up really │ │ │ │ loud and blasted Gangnam Style │ │ │ │ on repeat, with the bass │ │ │ │ cranked up as high as it could │ │ │ │ go. │ │ │ │ │ │ │ │ If you hate Gangnam Style for │ │ │ │ being overplayed, you will see │ │ │ │ why I chose that particular │ │ │ │ song. I personally don't mind │ │ │ │ it. But here's the thing about │ │ │ │ my bass; it vibrates the walls, │ │ │ │ making one hell of a lot of │ │ │ │ noise. Needless to say, my mom │ │ │ │ was not pleased and shut off │ │ │ │ the internet. But it was oh so │ │ │ │ worth it. │ │ │ │ │ │ │ │ TL;DR: │ │ │ └─────────────────────────────────┴─────────────────────────────────┴──────────┘ ``` ## Implementation details The bulk of RLOOTrainer is based on the PPO implementation, which is based on the [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). Below is a vectorized advantage calculation for RLOO: ```python def test_rloo_reward(): local_batch_size = 3 rloo_k = 4 rlhf_reward = torch.tensor([ 1, 2, 3, # first rlhf reward for three prompts 2, 3, 4, # second rlhf reward for three prompts 5, 6, 7, # third rlhf reward for three prompts 8, 9, 10, # fourth rlhf reward for three prompts ]).float() # here we have 3 prompts which have 4 completions each baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) advantages = torch.zeros_like(rlhf_reward) for i in range(0, len(advantages), local_batch_size): other_response_rlhf_rewards = [] for j in range(0, len(advantages), local_batch_size): if i != j: other_response_rlhf_rewards.append(rlhf_reward[j : j + local_batch_size]) advantages[i : i + local_batch_size] = rlhf_reward[i : i + local_batch_size] - torch.stack(other_response_rlhf_rewards).mean(0) assert (1 - (2 + 5 + 8) / 3 - advantages[0].item()) < 1e-6 # First rlhf reward for the first prompt assert (6 - (3 + 2 + 9) / 3 - advantages[7].item()) < 1e-6 # Third rlhf reward for the second prompt # Vectorized implementation rlhf_reward = rlhf_reward.reshape(rloo_k, local_batch_size) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) vec_advantages = rlhf_reward - baseline torch.testing.assert_close(vec_advantages.flatten(), advantages) ``` ## Benchmark experiments To validate the RLOO implementation works, we ran experiment on the 1B model. Here are the command we used to run the experiment. We take the SFT / RM models directly from [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). ``` accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ --output_dir models/minimal/rloo_tldr \ --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ --dataset_test_split validation \ --num_ppo_epochs 2 \ --num_mini_batches 2 \ --learning_rate 3e-6 \ --per_device_train_batch_size 16 \ --gradient_accumulation_steps 16 \ --total_episodes 1000000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ --local_rollout_forward_batch_size 16 \ --missing_eos_penalty 1.0 \ --stop_token eos \ --kl_coef 0.03 ``` Checkpoints and experiment tracking are available at: - [🤗 Model checkpoint](https://huggingface.co/vwxyzjn/rloo_tldr) - [🐝 Tracked experiment](https://wandb.ai/huggingface/trl/runs/u2sqci34) To evaluate, we use [vLLM](https://github.com/vllm-project/vllm) to load the checkpoints and GPT-4o mini as a judge model to evaluate the generated TL;DR against the reference TL;DR. For more information on how to use judges, see [Judges](judges). ```bash $ python examples/scripts/evals/judge_tldr.py --model_name_or_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 33.00% $ python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 51.20% ``` The RLOO checkpoint gets a 51.2% preferred rate vs the 33.0% preference rate of the SFT checkpoint. This is a good sign that the RLOO training is working as intended. Metrics: ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/rloo.png) ```bash # pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation # to use it, change `?we=huggingface&wpn=trl` to your own project and `?tag=pr-1540` to your own tag python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=train/episode&ceik=output_dir&cen=sft_model_path&metrics=train/objective/rlhf_reward&metrics=train/objective/scores&metrics=train/objective/kl&metrics=train/objective/non_score_reward&metrics=train/objective/entropy&metrics=train/policy/approxkl_avg&metrics=train/policy/clipfrac_avg&metrics=train/loss/policy_avg&metrics=train/policy/entropy_avg&metrics=train/val/ratio&metrics=train/val/ratio_var&metrics=train/val/num_eos_tokens&metrics=train/lr&metrics=train/eps' \ "cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr?tag=pr-1540" \ --env-ids models/minimal/rloo_tldr \ --pc.ncols 4 \ --pc.ncols-legend 1 \ --pc.xlabel "Episode" \ --output-filename benchmark/trl/pr-1540/rloo \ --scan-history ``` ## Reinforce++ The [Reinforce++](https://hijkzzz.notion.site/reinforce-plus-plus) report by Jian Hu suggests several optimization tricks to enhance performance and stability of RLHF. They include: - Clipping rewards: limiting reward values within a specific range to mitigate the impact of extreme rewards on model updates, thus preventing gradient explosion - Normalizing rewards: scaling rewards to have a mean of 0 and a standard deviation of 1, which helps in stabilizing the training process - Normalizing advantages: scaling advantages to have a mean of 0 and a standard deviation of 1, which helps in stabilizing the training process - Using token-level KL penalty that is defined as equation (1) of the report vs. sequence-level KL penalty (default) These options are available via the appropriate arguments in the [`RLOOConfig`] class. ## RLOOTrainer [[autodoc]] RLOOTrainer - train - save_model - push_to_hub ## RLOOConfig [[autodoc]] RLOOConfig
trl/docs/source/rloo_trainer.md/0
{ "file_path": "trl/docs/source/rloo_trainer.md", "repo_id": "trl", "token_count": 9483 }
627
<jupyter_start><jupyter_text>Tune GPT2 to generate controlled sentiment reviews> Optimise GPT2 to produce IMDB movie reviews with controlled sentiment using a BERT sentiment classifier for rewards.**WARNING:** We often experienced loss spikes in this examples which caused model training to fail or slow down. There is a [GitHub issue](https://github.com/lvwerra/trl/issues/101) to track the issue. Figure: Experiment setup to tune GPT2. The yellow arrows are outside the scope of this notebook, but the trained models are available through Hugging Face. The experiment setup is very similar to the positive sentiment notebook. However, in this notebook we fine-tune GPT2 (small) to generate **controlled** movie reviews based on the IMDB dataset. The model gets the target sentiment and 5 tokens from a real review and is tasked to produce continuations with the targeted sentiment. The reward for the continuations is calculated with the logits of a BERT sentiment classifier. That reward is then used for PPO training. Setup experiment Import dependencies<jupyter_code>%load_ext autoreload %autoreload 2 import random import torch import wandb import time import os from tqdm import tqdm import numpy as np import pandas as pd from random import choices import matplotlib.pyplot as plt tqdm.pandas() from datasets import load_dataset from transformers import AutoTokenizer, pipeline from trl import ( PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model, )<jupyter_output>/home/leandro_huggingface_co/miniconda3/envs/trl/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Configuration<jupyter_code>sentiment_pipe_kwargs = {"top_k": None, "function_to_apply": "none"} config = PPOConfig( model_name="lvwerra/gpt2-imdb", steps=51200, learning_rate=1.41e-5, remove_unused_columns=False, log_with="wandb", ) txt_in_len = 5 txt_out_len = 20 seed = 1 np.random.seed(seed)<jupyter_output><empty_output><jupyter_text>You can see that we load a GPT2 model called `gpt2_imdb`. This model was additionally fine-tuned on the IMDB dataset for 1 epoch with the huggingface [script](https://github.com/huggingface/transformers/blob/master/examples/run_language_modeling.py) (no special settings). The other parameters are mostly taken from the original paper ["Fine-Tuning Language Models from Human Preferences"](https://huggingface.co/papers/1909.08593). This model as well as the BERT model is available in the Huggingface model zoo [here](https://huggingface.co/models). The following code should automatically download the models. Load data and models Load pre-trained GPT2 language models We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model.<jupyter_code>gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name) gpt2_ref_model = create_reference_model(gpt2_model) gpt2_tokenizer = AutoTokenizer.from_pretrained(config.model_name) gpt2_tokenizer.pad_token = gpt2_tokenizer.eos_token<jupyter_output><empty_output><jupyter_text>Load IMDB datasetThe IMDB dataset contains 50k movie review annotated with "positive"/"negative" feedback indicating the sentiment. We load the IMDB dataset into a DataFrame and filter for comments that are at least 500 characters long and take the first 1000 characters of each comment. The first filter we apply to avoid comments that are less than `txt_in_len` token long and the second to avoid tokenizing way more text than we actually need.<jupyter_code># create the dataset # dataset = load_dataset("stanfordnlp/imdb", split="train") dataset = dataset.rename_columns({"text": "review", "label": "sentiment"}) # make sure the comments are are at least 500 and trim to 1000 dataset = dataset.filter(lambda x: len(x["review"]) > 500, batched=False) dataset = dataset.map(lambda x: {"review": x["review"][:1000]}, batched=False) dataset<jupyter_output>Found cached dataset imdb (/home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1) Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-d314b4c14499bf03.arrow Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-0d5fcb05c95b1186.arrow<jupyter_text>Tokenize IMDB reviews We tokenize all IMDB in advance to avoid tokenizing twice. In the first step we encode the queries and slice the first `txt_in_len` tokens. In a second step we decode these tokens back to text for later display.<jupyter_code>dataset = dataset.map( lambda x: { "input_ids": gpt2_tokenizer.encode(" " + x["review"], return_tensors="pt")[ 0, :txt_in_len ] }, batched=False, ) dataset = dataset.map( lambda x: {"query": gpt2_tokenizer.decode(x["input_ids"])}, batched=False ) dataset = dataset[:20480] from datasets import Dataset dataset = Dataset.from_dict(dataset) dataset.set_format("pytorch") dataset[3]["input_ids"] def collator(data): return dict((key, [d[key] for d in data]) for key in data[0]) ppo_trainer = PPOTrainer( config, gpt2_model, gpt2_ref_model, gpt2_tokenizer, dataset, data_collator=collator )<jupyter_output>Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving. wandb: Currently logged in as: lvwerra. Use `wandb login --relogin` to force relogin<jupyter_text>Load BERT classifierWe load a BERT classifier fine-tuned on the IMDB dataset.<jupyter_code>if ppo_trainer.accelerator.num_processes == 1: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a `pipeline` bug else: device = ppo_trainer.accelerator.device sentiment_pipe = pipeline( "sentiment-analysis", "lvwerra/distilbert-imdb", device=device )<jupyter_output><empty_output><jupyter_text>The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model.<jupyter_code>text = "this movie was really bad!!" output = sentiment_pipe(text, **sentiment_pipe_kwargs) output text = "this movie was really good!!" output = sentiment_pipe(text, **sentiment_pipe_kwargs) output text = "this movie was a documentary" output = sentiment_pipe(text, **sentiment_pipe_kwargs) output<jupyter_output><empty_output><jupyter_text>The resulting reward signal:<jupyter_code>def extract_pipe_output(outputs): positive_logits = [] for out in outputs: for element in out: if element["label"] == "POSITIVE": positive_logits.append(torch.tensor(element["score"])) return positive_logits output[1]["score"]<jupyter_output><empty_output><jupyter_text>Control token dictWe will append the control token at the beginning of each query to signal the model what the target sentiment is. Each control sequence consists of three tokens:<jupyter_code>ctrl_str = ["[negative]", "[neutral]", "[positive]"] device = torch.device( "cuda" if torch.cuda.is_available() else "cpu" ) # this should be handled by accelerate ctrl_tokens = dict( (s, gpt2_tokenizer.encode(s, return_tensors="pt").squeeze().to(device)) for s in ctrl_str ) ctrl_tokens<jupyter_output><empty_output><jupyter_text>Reward function<jupyter_code>def pos_logit_to_reward(logit, task): """ Take the positive sentiment logit and scale it for the task. task [negative]: reward = -logit task [neutral]: reward = -2*abs(logit)+4 task [positive]: reward = logit """ for i in range(len(logit)): if task[i] == "[negative]": logit[i] = -logit[i] elif task[i] == "[neutral]": logit[i] = -2 * torch.abs(logit[i]) + 4 elif task[i] == "[positive]": pass else: raise ValueError("task has to be in [0, 1, 2]!") return logit<jupyter_output><empty_output><jupyter_text>The following examples show the rewards for the cases where the classifier logit is 4, -4 and 0 for the three targets `['negative]`, `['neutral]` and `['positive']`. The scaling is not perfect as it differs between neutral and the other two classes. This is something to further investigate in the future. Ideally, one would use the logit output for each class individually, but since there is no dedicated class for neutral this is a workaround.<jupyter_code>print(ctrl_str) pos_logit_to_reward(torch.Tensor([4, 4, 4]), ctrl_str) pos_logit_to_reward(torch.Tensor([-4, -4, -4]), ctrl_str) pos_logit_to_reward(torch.Tensor([0, 0, 0]), ctrl_str)<jupyter_output><empty_output><jupyter_text>Generation settings<jupyter_code>generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": gpt2_tokenizer.eos_token_id, "max_new_tokens": txt_out_len, "eos_token_id": -1, }<jupyter_output><empty_output><jupyter_text>Optimize model **Steps**The training loop consists of the following steps:1. Get a batch of queries and create random controls2. Get the query responses from the policy3. Join query and responses and tokenize for BERT analysis4. Get sentiments for query/responses from BERT5. Optimize policy with PPO using the (query, response, reward) triplet6. Log all the training statistics**Training time**This step takes **~2h** on a P6000 GPU with the above specified settings.<jupyter_code>for epoch in range(2): for batch in tqdm(ppo_trainer.dataloader): ( logs, game_data, ) = ( dict(), dict(), ) #### prepend a random control token task_list = choices(ctrl_str, k=config.batch_size) game_data["query"] = [t + q for t, q in zip(task_list, batch["query"])] query_tensors = [ torch.cat((ctrl_tokens[t], input_ids)) for t, input_ids in zip(task_list, batch["input_ids"]) ] #### get response from gpt2 response_tensors = [] for query in query_tensors: response = ppo_trainer.generate(query, **generation_kwargs) response_tensors.append(response.squeeze()[-txt_out_len:]) game_data["response"] = [ gpt2_tokenizer.decode(r.squeeze()) for r in response_tensors ] #### sentiment analysis texts = [q + r for q, r in zip(batch["query"], game_data["response"])] logits = extract_pipe_output(sentiment_pipe(texts, **sentiment_pipe_kwargs)) rewards = pos_logit_to_reward(logits, task_list) #### Run PPO training t = time.time() stats = ppo_trainer.step(query_tensors, response_tensors, rewards) for cs in ctrl_str: key = "env/reward_" + cs.strip("[]") stats[key] = np.mean( [r.cpu().numpy() for r, t in zip(rewards, task_list) if t == cs] ) ppo_trainer.log_stats(stats, game_data, rewards)<jupyter_output>8%|▊ | 6/80 [12:44<2:37:54, 128.03s/it]/home/leandro_huggingface_co/miniconda3/envs/trl/lib/python3.9/site-packages/transformers/pipelines/base.py:1045: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset warnings.warn( 100%|██████████| 80/80 [2:46:39<00:00, 124.99s/it] 91%|█████████▏| 73/80 [2:30:39<14:35, 125.03s/it]<jupyter_text>Training progressIf you are tracking the training progress with Weights&Biases you should see a plot similar to the following: Figure: Reward mean and distribution evolution during training. One can observe how the model starts to generate more positive outputs after a few optimisation steps.> Note: Investigating the KL-divergence will probably show that at this point the model has not converged to the target KL-divergence, yet. To get there would require longer training or starting with a higher initial coefficient. Model inspection Reward distributionFirst, we can have a look at the reward distribution. Both the negative and positive rewards are clearly shifted to high rewards. The neutral rewards, however, are still centered around zero. There are a few possible explanations for this. There could be a bug in the code and the way the neutral rewards are calculated. Another problem could be that sentence sometimes start with a strong sentiment and it is hard for the model shift the sentiment towards neutral.<jupyter_code>for ctrl_s in ctrl_str: plt.hist( [r for r, t in zip(logs["env/reward_dist"], task_list) if t == ctrl_s], density=True, alpha=0.5, label=ctrl_s, ) plt.legend(loc="best") plt.title("reward distribution") plt.grid(True) plt.show()<jupyter_output><empty_output><jupyter_text>Save modelFinally, we save the model to disk for later usage.<jupyter_code>gpt2_model.save_pretrained("gpt2-imdb-ctrl") gpt2_tokenizer.save_pretrained("gpt2-imdb-ctrl")<jupyter_output><empty_output>
trl/examples/notebooks/gpt2-sentiment-control.ipynb/0
{ "file_path": "trl/examples/notebooks/gpt2-sentiment-control.ipynb", "repo_id": "trl", "token_count": 4850 }
628
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Fine-Tune Llama2-7b on SE paired dataset import os from dataclasses import dataclass, field from typing import Optional import torch from accelerate import Accelerator from datasets import load_dataset from peft import AutoPeftModelForCausalLM, LoraConfig from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, is_torch_npu_available, is_torch_xpu_available, set_seed, ) from trl import SFTConfig, SFTTrainer from trl.trainer import ConstantLengthDataset @dataclass class ScriptArguments: model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"}) dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"}) subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"}) split: Optional[str] = field(default="train", metadata={"help": "the split to use"}) size_valid_set: Optional[int] = field(default=4000, metadata={"help": "the size of the validation set"}) streaming: Optional[bool] = field(default=True, metadata={"help": "whether to stream the dataset"}) shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"}) seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"}) num_workers: Optional[int] = field(default=4, metadata={"help": "the number of workers"}) use_bnb: Optional[bool] = field(default=True, metadata={"help": "whether to use BitsAndBytes"}) # LoraConfig lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"}) parser = HfArgumentParser((ScriptArguments, SFTConfig)) script_args, training_args = parser.parse_args_into_dataclasses() peft_config = LoraConfig( r=script_args.lora_r, lora_alpha=script_args.lora_alpha, lora_dropout=script_args.lora_dropout, target_modules=["q_proj", "v_proj"], bias="none", task_type="CAUSAL_LM", ) if training_args.group_by_length and training_args.packing: raise ValueError("Cannot use both packing and group by length") # `gradient_checkpointing` was True by default until `1f3314`, but it's actually not used. # `gradient_checkpointing=True` will cause `Variable._execution_engine.run_backward`. if training_args.gradient_checkpointing: raise ValueError("gradient_checkpointing not supported") set_seed(training_args.seed) def chars_token_ratio(dataset, tokenizer, nb_examples=400): """ Estimate the average number of characters per token in the dataset. """ total_characters, total_tokens = 0, 0 for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples): text = prepare_sample_text(example) total_characters += len(text) if tokenizer.is_fast: total_tokens += len(tokenizer(text).tokens()) else: total_tokens += len(tokenizer.tokenize(text)) return total_characters / total_tokens def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def prepare_sample_text(example): """Prepare the text from a sample of the dataset.""" text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}" return text def create_datasets(tokenizer, args, seed=None): dataset = load_dataset( args.dataset_name, data_dir=args.subset, split=args.split, use_auth_token=True, num_proc=args.num_workers if not args.streaming else None, streaming=args.streaming, ) if args.streaming: print("Loading the dataset in streaming mode") valid_data = dataset.take(args.size_valid_set) train_data = dataset.skip(args.size_valid_set) train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=seed) else: dataset = dataset.train_test_split(test_size=0.005, seed=seed) train_data = dataset["train"] valid_data = dataset["test"] print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}") chars_per_token = chars_token_ratio(train_data, tokenizer) print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}") train_dataset = ConstantLengthDataset( tokenizer, train_data, formatting_func=prepare_sample_text, infinite=True, seq_length=args.seq_length, chars_per_token=chars_per_token, ) valid_dataset = ConstantLengthDataset( tokenizer, valid_data, formatting_func=prepare_sample_text, infinite=False, seq_length=args.seq_length, chars_per_token=chars_per_token, ) return train_dataset, valid_dataset bnb_config = None if script_args.use_bnb: bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, ) base_model = AutoModelForCausalLM.from_pretrained( script_args.model_name, quantization_config=bnb_config, device_map={"": Accelerator().local_process_index}, trust_remote_code=True, use_auth_token=True, ) base_model.config.use_cache = False tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training train_dataset, eval_dataset = create_datasets(tokenizer, script_args, seed=training_args.seed) trainer = SFTTrainer( model=base_model, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, max_length=None, formatting_func=prepare_sample_text, processing_class=tokenizer, args=training_args, ) trainer.train() trainer.save_model(training_args.output_dir) output_dir = os.path.join(training_args.output_dir, "final_checkpoint") trainer.model.save_pretrained(output_dir) # Free memory for merging weights del base_model if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16) model = model.merge_and_unload() output_merged_dir = os.path.join(training_args.output_dir, "final_merged_checkpoint") model.save_pretrained(output_merged_dir, safe_serialization=True)
trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py/0
{ "file_path": "trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py", "repo_id": "trl", "token_count": 2894 }
629
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "peft", # ] # /// """ Run the KTO training script with the commands below. In general, the optimal configuration for KTO will be similar to that of DPO. # Full training: python trl/scripts/kto.py \ --dataset_name trl-lib/kto-mix-14k \ --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ --per_device_train_batch_size 16 \ --num_train_epochs 1 \ --learning_rate 5e-7 \ --lr_scheduler_type=cosine \ --gradient_accumulation_steps 1 \ --eval_steps 500 \ --output_dir=kto-aligned-model \ --warmup_ratio 0.1 \ --report_to wandb \ --logging_first_step # QLoRA: python trl/scripts/kto.py \ --dataset_name trl-lib/kto-mix-14k \ --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ --per_device_train_batch_size 8 \ --num_train_epochs 1 \ --learning_rate 5e-7 \ --lr_scheduler_type=cosine \ --gradient_accumulation_steps 1 \ --eval_steps 500 \ --output_dir=kto-aligned-model-lora \ --warmup_ratio 0.1 \ --report_to wandb \ --logging_first_step \ --use_peft \ --load_in_4bit \ --lora_target_modules=all-linear \ --lora_r=16 \ --lora_alpha=16 """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser from trl import KTOConfig, KTOTrainer, ModelConfig, ScriptArguments, get_peft_config if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, KTOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_into_dataclasses() # Load a pretrained model model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) ref_model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # Load the dataset dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) # Initialize the KTO trainer trainer = KTOTrainer( model, ref_model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), ) # Train and push the model to the Hub trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name)
trl/examples/scripts/kto.py/0
{ "file_path": "trl/examples/scripts/kto.py", "repo_id": "trl", "token_count": 1384 }
630
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # ] # /// """ Usage: python examples/scripts/xpo.py \ --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft \ --reward_model_path trl-lib/pythia-1b-deduped-tldr-rm \ --dataset_name trl-lib/tldr \ --learning_rate 5.0e-7 \ --output_dir pythia-1b-tldr-xpo \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 32 \ --num_train_epochs 3 \ --max_new_tokens 64 \ --warmup_ratio 0.1 \ --missing_eos_penalty 1.0 \ --push_to_hub """ import torch from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, GenerationConfig from trl import ( HfPairwiseJudge, LogCompletionsCallback, ModelConfig, OpenAIPairwiseJudge, PairRMJudge, ScriptArguments, TrlParser, XPOConfig, XPOTrainer, get_kbit_device_map, get_quantization_config, ) from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE JUDGES = {"pair_rm": PairRMJudge, "openai": OpenAIPairwiseJudge, "hf": HfPairwiseJudge} if __name__ == "__main__": parser = TrlParser((ScriptArguments, XPOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_and_config() training_args.gradient_checkpointing_kwargs = {"use_reentrant": True} torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs ) ref_model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs ) if training_args.reward_model_path is not None: reward_model = AutoModelForSequenceClassification.from_pretrained( training_args.reward_model_path, num_labels=1, trust_remote_code=model_args.trust_remote_code, **model_kwargs, ) else: reward_model = None if training_args.judge is not None: judge_cls = JUDGES[training_args.judge] judge = judge_cls() else: judge = None tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, padding_side="left", trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) trainer = XPOTrainer( model=model, ref_model=ref_model, reward_model=reward_model, judge=judge, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, ) if training_args.eval_strategy != "no": generation_config = GenerationConfig( max_new_tokens=training_args.max_new_tokens, do_sample=True, temperature=training_args.temperature ) completions_callback = LogCompletionsCallback(trainer, generation_config, num_prompts=8) trainer.add_callback(completions_callback) trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name)
trl/examples/scripts/xpo.py/0
{ "file_path": "trl/examples/scripts/xpo.py", "repo_id": "trl", "token_count": 1910 }
631
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable from datasets import Dataset, load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from trl.extras.dataset_formatting import get_formatting_func_from_dataset from trl.models.utils import ChatMlSpecialTokens, clone_chat_template, setup_chat_format from .testing_utils import TrlTestCase class DatasetFormattingTestCase(TrlTestCase): def setUp(self): super().setUp() self.llama_tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-MistralForCausalLM-0.1") self.chatml_tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5") def test_get_formatting_func_from_dataset_with_chatml_messages(self): dataset = Dataset.from_dict( { "messages": [ [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, ] ] } ) # Llama tokenizer formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) self.assertIsInstance(formatting_func, Callable) formatted_text = formatting_func(dataset[0]) expected = "<s> [INST] You are helpful\n\nHello [/INST] Hi, how can I help you?</s>" self.assertEqual(formatted_text, expected) formatted_text = formatting_func(dataset[0:1]) self.assertListEqual(formatted_text, [expected]) # ChatML tokenizer formatting_func = get_formatting_func_from_dataset(dataset, self.chatml_tokenizer) formatted_text = formatting_func(dataset[0]) expected = "<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n" self.assertEqual(formatted_text, expected) formatted_text = formatting_func(dataset[0:1]) self.assertListEqual(formatted_text, [expected]) def test_get_formatting_func_from_dataset_with_chatml_conversations(self): dataset = Dataset.from_dict( { "conversations": [ [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, ] ] } ) # Llama tokenizer formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) self.assertIsInstance(formatting_func, Callable) formatted_text = formatting_func(dataset[0]) expected = "<s> [INST] You are helpful\n\nHello [/INST] Hi, how can I help you?</s>" self.assertEqual(formatted_text, expected) formatted_text = formatting_func(dataset[0:1]) self.assertListEqual(formatted_text, [expected]) # ChatML tokenizer formatting_func = get_formatting_func_from_dataset(dataset, self.chatml_tokenizer) formatted_text = formatting_func(dataset[0]) expected = "<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n" self.assertEqual(formatted_text, expected) formatted_text = formatting_func(dataset[0:1]) self.assertListEqual(formatted_text, [expected]) def test_get_formatting_func_from_dataset_with_instruction(self): dataset = Dataset.from_list( [{"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}] ) formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) self.assertIsNotNone(formatting_func) self.assertIsInstance(formatting_func, Callable) formatted_text = formatting_func(dataset[0]) self.assertEqual(formatted_text, "<s> [INST] What is 2+2? [/INST] 4</s>") formatted_text = formatting_func(dataset[0:1]) self.assertListEqual(formatted_text, ["<s> [INST] What is 2+2? [/INST] 4</s>"]) def test_get_formatting_func_from_dataset_from_hub(self): ds_1 = load_dataset("philschmid/trl-test-instruction", split="train") ds_2 = load_dataset("philschmid/dolly-15k-oai-style", split="train") for ds in [ds_1, ds_2]: formatting_func = get_formatting_func_from_dataset(ds, self.llama_tokenizer) self.assertIsNotNone(formatting_func) self.assertIsInstance(formatting_func, Callable) ds_3 = load_dataset("philschmid/guanaco-sharegpt-style", split="train") formatting_func = get_formatting_func_from_dataset(ds_3, self.llama_tokenizer) self.assertIsNone(formatting_func) def test_get_formatting_func_from_dataset_with_unknown_format(self): dataset = Dataset.from_dict({"text": "test"}) formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) self.assertIsNone(formatting_func) class SetupChatFormatTestCase(TrlTestCase): def setUp(self): super().setUp() self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5") self.model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5") # remove built-in chat_template to simulate a model having no chat_template self.tokenizer.chat_template = None def test_setup_chat_format(self): modified_model, modified_tokenizer = setup_chat_format( self.model, self.tokenizer, format="chatml", resize_to_multiple_of=123 ) _chatml = ChatMlSpecialTokens() # Check if special tokens are correctly set self.assertEqual(modified_tokenizer.eos_token, "<|im_end|>") self.assertEqual(modified_tokenizer.pad_token, "<|im_end|>") self.assertEqual(modified_tokenizer.bos_token, "<|im_start|>") self.assertEqual(modified_tokenizer.eos_token, _chatml.eos_token) self.assertEqual(modified_tokenizer.pad_token, _chatml.pad_token) self.assertEqual(modified_tokenizer.bos_token, _chatml.bos_token) self.assertEqual((modified_model.vocab_size % 123), 0) def test_example_with_setup_model(self): modified_model, modified_tokenizer = setup_chat_format( self.model, self.tokenizer, ) messages = [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, ] prompt = modified_tokenizer.apply_chat_template(messages, tokenize=False) self.assertEqual( prompt, "<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n", ) class CloneChatTemplateTestCase(TrlTestCase): def setUp(self): super().setUp() # This tokenizer doesn't have a chat_template by default self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-BloomForCausalLM") self.model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-BloomForCausalLM") # This one has a chat_template by default self.source = "trl-internal-testing/tiny-Qwen3ForCausalLM" def test_clone(self): _, modified_tokenizer, _ = clone_chat_template(self.model, self.tokenizer, self.source) # Check if special tokens are correctly set self.assertEqual(modified_tokenizer.eos_token, "<|im_end|>") def test_clone_with_resize(self): modified_model, modified_tokenizer, _ = clone_chat_template( self.model, self.tokenizer, self.source, resize_to_multiple_of=123 ) # Check that the input embeddings have been resized to a multiple of 123 self.assertEqual((modified_model.vocab_size % 123), 0) # Check that the input embeddings size matches the tokenizer vocabulary size self.assertEqual(self.model.vocab_size, len(modified_tokenizer.vocab)) def test_clone_with_resize_and_extra_tokens_already_in_vocab(self): # This will add <extra_id_0>, <extra_id_1>, ... to the tokenizer modified_model, modified_tokenizer, _ = clone_chat_template( self.model, self.tokenizer, self.source, resize_to_multiple_of=123 ) # Try if we can resize a tokenizer that already has extra these extra tokens modified_model, modified_tokenizer, _ = clone_chat_template( modified_model, modified_tokenizer, self.source, resize_to_multiple_of=124 ) # Check that the input embeddings have been resized to a multiple of 123 self.assertEqual((modified_model.vocab_size % 124), 0) # Check that the input embeddings size matches the tokenizer vocabulary size self.assertEqual(self.model.vocab_size, len(modified_tokenizer.vocab)) def test_apply_new_chat_template(self): _, modified_tokenizer, _ = clone_chat_template(self.model, self.tokenizer, self.source) messages = [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, ] prompt = modified_tokenizer.apply_chat_template(messages, tokenize=False) self.assertEqual( prompt, "<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\nHi, how can I help you?<|im_end|>\n", )
trl/tests/test_dataset_formatting.py/0
{ "file_path": "trl/tests/test_dataset_formatting.py", "repo_id": "trl", "token_count": 4465 }
632
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from datasets import Dataset, load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer from transformers.testing_utils import require_peft from transformers.utils import is_peft_available from trl import RewardConfig, RewardTrainer, maybe_apply_chat_template from trl.trainer.reward_trainer import _tokenize from .testing_utils import TrlTestCase if is_peft_available(): from peft import LoraConfig, TaskType class RewardTrainerTester(TrlTestCase): def setUp(self): super().setUp() self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.model = AutoModelForSequenceClassification.from_pretrained(self.model_id) self.model.config.pad_token_id = self.tokenizer.pad_token_id def test_preprocessing_conversational(self): dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=self.tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) dummy_dataset = dummy_dataset.map(maybe_apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer}) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": self.tokenizer}) self.assertDictEqual(trainer.train_dataset[:], dummy_dataset[:]) def test_preprocessing_standard(self): # No chat template, so we load a fresh tokenizer tokenizer = AutoTokenizer.from_pretrained(self.model_id) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train") training_args = RewardConfig(output_dir=self.tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset ) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": tokenizer}) self.assertDictEqual(trainer.train_dataset[:], dummy_dataset[:]) def test_train_full(self): dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=self.tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) def test_train_full_pretokenized(self): dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") dummy_dataset = dummy_dataset.map(maybe_apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer}) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": self.tokenizer}) training_args = RewardConfig(output_dir=self.tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) @require_peft def test_train_lora(self): peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=self.tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset, peft_config=peft_config, ) previous_trainable_params = {} previous_non_trainable_params = {} # due to a change in the way the modules to save are dealt in PEFT. trainable_params_name = ["lora", "modules_to_save"] # check gradients are not None for n, param in trainer.model.named_parameters(): if any(t in n for t in trainable_params_name): previous_trainable_params[n] = param.clone() else: previous_non_trainable_params[n] = param.clone() trainer.train() self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertFalse(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) # Check that the non trainable parameters have not changed for n, param in previous_non_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertTrue(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) @require_peft def test_train_lora_pretokenized(self): peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") dummy_dataset = dummy_dataset.map(maybe_apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer}) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": self.tokenizer}) training_args = RewardConfig(output_dir=self.tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset, peft_config=peft_config, ) previous_trainable_params = {} previous_non_trainable_params = {} # due to a change in the way the modules to save are dealt in PEFT. trainable_params_name = ["lora", "modules_to_save"] # check gradients are not None for n, param in trainer.model.named_parameters(): if any(t in n for t in trainable_params_name): previous_trainable_params[n] = param.clone() else: previous_non_trainable_params[n] = param.clone() trainer.train() self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertFalse(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) # Check that the non trainable parameters have not changed for n, param in previous_non_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertTrue(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) def test_margin(self): dummy_dataset_dict = { "input_ids_chosen": [ torch.LongTensor([0, 1, 2]), ], "attention_mask_chosen": [ torch.LongTensor([1, 1, 1]), ], "input_ids_rejected": [ torch.LongTensor([0, 2]), ], "attention_mask_rejected": [ torch.LongTensor([1, 1]), ], "margin": [ torch.FloatTensor([1.0]), ], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) training_args = RewardConfig(output_dir=self.tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) batch = [dummy_dataset[0]] batch = trainer.data_collator(batch) batch = {k: v.to(trainer.model.device) if isinstance(v, torch.Tensor) else v for k, v in batch.items()} loss, outputs = trainer.compute_loss(trainer.model, batch, return_outputs=True) l_val = -torch.nn.functional.logsigmoid( outputs["rewards_chosen"] - outputs["rewards_rejected"] - batch["margin"] ).mean() self.assertLess(abs(loss - l_val), 1e-6) def test_tags(self): dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=self.tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) self.assertEqual(trainer.model.model_tags, trainer._tag_names)
trl/tests/test_reward_trainer.py/0
{ "file_path": "trl/tests/test_reward_trainer.py", "repo_id": "trl", "token_count": 4510 }
633
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os from copy import deepcopy from typing import Optional import torch import torch.nn as nn from accelerate import PartialState from huggingface_hub import hf_hub_download from huggingface_hub.utils import ( EntryNotFoundError, HFValidationError, LocalEntryNotFoundError, RepositoryNotFoundError, ) from safetensors.torch import load_file as safe_load_file from transformers import GenerationMixin, PreTrainedModel, is_torch_npu_available, is_torch_xpu_available from transformers.utils import is_peft_available if is_peft_available(): from peft import ( PeftConfig, PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PromptLearningConfig, get_peft_model, prepare_model_for_kbit_training, ) from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled LAYER_PATTERNS = [ "transformer.h.{layer}", "model.decoder.layers.{layer}", "gpt_neox.layers.{layer}", "model.layers.{layer}", ] class PreTrainedModelWrapper(nn.Module): r""" A wrapper class around a (`transformers.PreTrainedModel`) to be compatible with the (`~transformers.PreTrained`) class in order to keep some attributes and methods of the (`~transformers.PreTrainedModel`) class. Attributes: pretrained_model (`transformers.PreTrainedModel`): The model to be wrapped. parent_class (`transformers.PreTrainedModel`): The parent class of the model to be wrapped. supported_args (`list`): The list of arguments that are supported by the wrapper class. """ transformers_parent_class = None supported_args = None supported_modules = ("v_head",) supported_rm_modules = ("score",) supported_pretrained_model_architectures = ( (PreTrainedModel) if not is_peft_available() else (PreTrainedModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM) ) def __init__( self, pretrained_model=None, score_module=None, supports_rm_adapter=False, rm_adapter_name=None, **kwargs ): super().__init__() self.pretrained_model = pretrained_model self.config = pretrained_model.config self.prepare_inputs_for_generation = pretrained_model.prepare_inputs_for_generation self.is_loaded_in_8bit = getattr(pretrained_model, "is_loaded_in_8bit", False) self.is_loaded_in_4bit = getattr(pretrained_model, "is_loaded_in_4bit", False) self.is_sequential_parallel = False if hasattr(pretrained_model, "gradient_checkpointing_disable"): self.gradient_checkpointing_disable = pretrained_model.gradient_checkpointing_disable if hasattr(pretrained_model, "gradient_checkpointing_enable"): self.gradient_checkpointing_enable = pretrained_model.gradient_checkpointing_enable if hasattr(pretrained_model, "enable_input_require_grads"): self.enable_input_require_grads = pretrained_model.enable_input_require_grads self.supports_rm_adapter = supports_rm_adapter self.rm_adapter_name = rm_adapter_name self.policy_adapter_name = "default" if score_module is not None: self.score = score_module @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates a new model from a pretrained model from `transformers`. The pretrained model is loaded using the `from_pretrained` method of the `transformers.PreTrainedModel` class. The arguments that are specific to the `transformers.PreTrainedModel` class are passed along this method and filtered out from the `kwargs` argument. Args: pretrained_model_name_or_path (`str` or `transformers.PreTrainedModel`): The path to the pretrained model or its name. *model_args (`list`, *optional*)): Additional positional arguments passed along to the underlying model's `from_pretrained` method. **kwargs (`dict`, *optional*): Additional keyword arguments passed along to the underlying model's `from_pretrained` method. We also pre-process the kwargs to extract the arguments that are specific to the `transformers.PreTrainedModel` class and the arguments that are specific to trl models. The kwargs also support `prepare_model_for_kbit_training` arguments from `peft` library. """ if kwargs is not None: peft_config = kwargs.pop("peft_config", None) reward_adapter = kwargs.pop("reward_adapter", None) reward_adapter_name = kwargs.pop("reward_adapter_name", "reward_adapter") is_trainable = kwargs.pop("is_trainable", False) trl_model_args, pretrained_kwargs, peft_quantization_kwargs = cls._split_kwargs(kwargs) token = pretrained_kwargs.get("token", None) else: peft_config = None is_trainable = False trl_model_args = {} pretrained_kwargs = {} peft_quantization_kwargs = {} token = None if reward_adapter is not None and not isinstance(reward_adapter, str): raise ValueError( "The `reward_adapter` argument should be a string representing the name of local path or the Hub id to the Reward Modeling adapter." ) is_peft_model = False current_device = cls._get_current_device() if isinstance(pretrained_model_name_or_path, str): is_loaded_in_8bit = pretrained_kwargs["load_in_8bit"] if "load_in_8bit" in pretrained_kwargs else False is_loaded_in_4bit = pretrained_kwargs["load_in_4bit"] if "load_in_4bit" in pretrained_kwargs else False else: is_loaded_in_8bit = getattr(pretrained_model_name_or_path, "is_loaded_in_8bit", False) is_loaded_in_4bit = getattr(pretrained_model_name_or_path, "is_loaded_in_4bit", False) if (is_loaded_in_8bit or is_loaded_in_4bit) and "device_map" not in pretrained_kwargs: # warn users logging.warning( "The `device_map` argument is not provided. We will override the device_map argument." " to set the entire" " model on the current device. If you want to set the model on multiple devices, please provide" " a custom `device_map` argument." ) pretrained_kwargs["device_map"] = {"": current_device} if is_peft_available() and peft_config is not None and not isinstance(peft_config, PeftConfig): raise ValueError("The `peft_config` argument should be an instance of `peft.PeftConfig` class.") # First, load the pre-trained model using the parent-class # either `AutoModelForCausalLM` or `AutoModelForSeq2SeqLM` if isinstance(pretrained_model_name_or_path, str): if is_peft_available(): try: # If there is a trained peft adapter in the hub, load its config. remote_adapter_config = hf_hub_download( pretrained_model_name_or_path, "adapter_config.json", token=token, ) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): remote_adapter_config = None else: remote_adapter_config = None local_adapter_present = os.path.exists(os.path.join(pretrained_model_name_or_path, "adapter_config.json")) if (local_adapter_present or remote_adapter_config is not None) and is_peft_available(): if peft_config is not None: logging.warning( "`peft_config` argument ignored since a peft config file was found in " f"{pretrained_model_name_or_path}" ) # Load the trained peft adapter config if local_adapter_present: trained_adapter_config = PeftConfig.from_pretrained(pretrained_model_name_or_path) else: remote_adapter_dir = os.path.dirname(remote_adapter_config) trained_adapter_config = PeftConfig.from_pretrained(remote_adapter_dir) # Load the pretrained base model pretrained_model = cls.transformers_parent_class.from_pretrained( trained_adapter_config.base_model_name_or_path, *model_args, **pretrained_kwargs ) # Wrap the pretrained model with the trained peft adapter pretrained_model = PeftModel.from_pretrained( pretrained_model, pretrained_model_name_or_path, is_trainable=is_trainable, token=token ) logging.info("Trained peft adapter loaded") else: pretrained_model = cls.transformers_parent_class.from_pretrained( pretrained_model_name_or_path, *model_args, **pretrained_kwargs ) if peft_config is not None: # Initialize a new peft adapter with the given config if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training( pretrained_model, **peft_quantization_kwargs, ) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info("peft adapter initialised") elif isinstance(pretrained_model_name_or_path, cls.supported_pretrained_model_architectures): pretrained_model = pretrained_model_name_or_path if peft_config is not None and isinstance(pretrained_model, PreTrainedModel): # Initialize a new peft adapter with the given config if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training( pretrained_model, **peft_quantization_kwargs, ) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info("peft adapter initialised") else: raise ValueError( "pretrained_model_name_or_path should be a string or a PreTrainedModel, " f"but is {type(pretrained_model_name_or_path)}" ) if is_peft_available(): if isinstance(pretrained_model, PeftModel): is_peft_model = True # for backward compatibility if hasattr(pretrained_model, "active_peft_config") and isinstance( pretrained_model.active_peft_config, PromptLearningConfig ): raise ValueError("PromptLearningConfig is not supported for PPO training.") # Add reward modeling adapter if specified if not is_peft_model and reward_adapter is not None: raise ValueError("reward_adapter can only be used with a PeftModel. ") elif is_peft_model and reward_adapter is not None: score_module = cls.add_and_load_reward_modeling_adapter( pretrained_model, reward_adapter, reward_adapter_name, token=token ) multi_adapter_args = { "score_module": score_module, "supports_rm_adapter": True, "rm_adapter_name": reward_adapter_name, } else: multi_adapter_args = {"supports_rm_adapter": False} # Then, create the full model by instantiating the wrapper class model = cls(pretrained_model, **multi_adapter_args, **trl_model_args) # if resume_training, load the state_dict again - this is ok since the # state_dict is removed from the model after loading it. is_resuming_training = True if isinstance(pretrained_model_name_or_path, str): safe_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors") filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin") sharded_index_filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin.index.json") safe_sharded_index_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors.index.json") is_sharded = False use_safe = os.path.exists(safe_filename) if not (os.path.exists(filename) or os.path.exists(safe_filename)): # Try with `pytorch_model.bin` filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub( pretrained_model, pretrained_model_name_or_path, sharded_index_filename, token=token, ) # Try with safetensors if filename is None and files_to_download is None: safe_filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub( pretrained_model, pretrained_model_name_or_path, safe_sharded_index_filename, token=token, model_name="model.safetensors", model_index_name="model.safetensors.index.json", ) use_safe = True else: use_safe = False loading_func = safe_load_file if use_safe else torch.load load_kwargs = {} if use_safe else {"map_location": "cpu", "weights_only": True} if is_resuming_training: if is_sharded: # download each file and add it to the state_dict state_dict = {} for shard_file in files_to_download: filename = hf_hub_download( pretrained_model_name_or_path, shard_file, token=token, ) state_dict.update(loading_func(filename, **load_kwargs)) else: state_dict = loading_func(filename if not use_safe else safe_filename, **load_kwargs) else: state_dict = pretrained_model_name_or_path.state_dict() model.is_peft_model = is_peft_model model.current_device = current_device if is_resuming_training: model.post_init(state_dict=state_dict) return model @classmethod def _get_checkpoint_from_hub( cls, pretrained_model, pretrained_model_name_or_path, index_filename, token=None, model_name="pytorch_model.bin", model_index_name="pytorch_model.bin.index.json", ): files_to_download = None filename = None is_resuming_training = True is_sharded = False try: filename = hf_hub_download( pretrained_model_name_or_path, model_name, token=token, ) # sharded except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): if os.path.exists(index_filename): index_file_name = index_filename else: try: index_file_name = hf_hub_download( pretrained_model_name_or_path, model_index_name, token=token, ) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): # not continue training, do not have v_head weight is_resuming_training = False logging.warning( f"A {type(pretrained_model)} model is loaded from '{pretrained_model_name_or_path}', " f"and no v_head weight is found. This IS expected if you are not resuming PPO training." ) # load json if is_resuming_training: with open(index_file_name) as f: index = json.load(f) # check filename with `v_head` or any known extra module: files_to_download = set() for k, v in index["weight_map"].items(): if any(module in k for module in cls.supported_modules): files_to_download.add(v) is_sharded = True return filename, files_to_download, is_sharded, is_resuming_training @classmethod def _get_current_device(cls): r""" Get the current device. For GPU & XPU, we return the local process index using the `accelerate.PartialState` object to handle corner cases when running scripts in distributed environments. Returns: current_device (`Union[int, str]`): The current device. """ state = PartialState() if torch.cuda.is_available() or is_torch_xpu_available(): return state.local_process_index elif is_torch_npu_available(): return f"npu:{state.local_process_index}" else: return "cpu" @classmethod def _split_kwargs(cls, kwargs): """ Separate the kwargs from the arguments that we support inside `supported_args` and the ones that we don't. """ check_peft_kwargs = False if is_peft_available(): from peft import prepare_model_for_kbit_training check_peft_kwargs = True supported_kwargs = {} unsupported_kwargs = {} peft_kwargs = {} for key, value in kwargs.items(): if key in cls.supported_args: supported_kwargs[key] = value else: unsupported_kwargs[key] = value if check_peft_kwargs: if key in prepare_model_for_kbit_training.__code__.co_varnames: peft_kwargs[key] = value if key in unsupported_kwargs: unsupported_kwargs.pop(key) return supported_kwargs, unsupported_kwargs, peft_kwargs @classmethod def add_and_load_reward_modeling_adapter( cls, pretrained_model, adapter_model_id, adapter_name="reward_model_adapter", token=None ): r""" Add and load a reward modeling adapter. This method can only be used if the model is a `PeftModel` and if you have initialized the model with the `reward_modeling_adapter_id` argument, pointing to the id of the reward modeling adapter. The latest needs also to contain the score head in order to produce the reward. """ pretrained_model.load_adapter(adapter_model_id, adapter_name, is_trainable=False) pretrained_model.train() filename = os.path.join(adapter_model_id, "adapter_model.bin") safe_loading = False if not os.path.exists(filename): try: local_filename = hf_hub_download( adapter_model_id, "adapter_model.bin", token=token, ) except Exception: filename = os.path.join(adapter_model_id, "adapter_model.safetensors") safe_loading = True if not os.path.exists(filename): try: local_filename = hf_hub_download( adapter_model_id, "adapter_model.safetensors", token=token, ) except Exception as exc: raise ValueError( "Could not find adapter model in the Hub, make sure you have the correct adapter model id." ) from exc else: local_filename = filename else: local_filename = filename loading_func = safe_load_file if safe_loading else torch.load load_kwargs = {} if safe_loading else {"map_location": "cpu", "weights_only": True} adapter_state_dict = loading_func(local_filename, **load_kwargs) for score_name_candidate in cls.supported_rm_modules: if any(score_name_candidate in name for name in adapter_state_dict.keys()): score_name = score_name_candidate # we have found the correct head name and can break break score_dict = {} for name, param in adapter_state_dict.items(): if score_name in name: key_name = ".".join(name.split(".")[-1:]) score_dict[key_name] = param.to(cls._get_current_device()) num_labels, hidden_dim = score_dict["weight"].shape has_bias = any("bias" in name for name in adapter_state_dict.keys()) score = nn.Linear(hidden_dim, num_labels, bias=has_bias).to( device=cls._get_current_device(), dtype=pretrained_model.dtype, ) score.load_state_dict(score_dict) for param in score.parameters(): param.requires_grad = False return score def push_to_hub(self, *args, **kwargs): r""" Push the pretrained model to the hub. This method is a wrapper around `transformers.PreTrainedModel.push_to_hub`. Please refer to the documentation of `transformers.PreTrainedModel.push_to_hub` for more information. Args: *args (`list`, *optional*): Positional arguments passed along to the underlying model's `push_to_hub` method. **kwargs (`dict`, *optional*): Keyword arguments passed along to the underlying model's `push_to_hub` method. """ raise NotImplementedError def save_pretrained(self, *args, **kwargs): r""" Save the pretrained model to a directory. This method is a wrapper around `transformers.PreTrainedModel.save_pretrained`. Please refer to the documentation of `transformers.PreTrainedModel.save_pretrained` for more information. Args: *args (`list`, *optional*): Positional arguments passed along to the underlying model's `save_pretrained` method. **kwargs (`dict`, *optional*): Keyword arguments passed along to the underlying model's `save_pretrained` method. """ state_dict = kwargs.get("state_dict") if state_dict is None: state_dict = self.state_dict() kwargs["state_dict"] = state_dict # if it is a peft model only save the `v_head` state_dict and # pop the `state_dict` from the kwargs to avoid silent bugs with `peft` if self.is_peft_model: save_path = args[0] save_path = os.path.join(save_path, "pytorch_model.bin") torch.save(state_dict, save_path) _ = kwargs.pop("state_dict", None) return self.pretrained_model.save_pretrained(*args, **kwargs) def state_dict(self, *args, **kwargs): r""" Return the state_dict of the pretrained model. """ raise NotImplementedError def post_init(self, *args, **kwargs): r""" Post initialization method. This method is called after the model is instantiated and loaded from a checkpoint. It can be used to perform additional operations such as loading the state_dict. """ raise NotImplementedError def compute_reward_score(self, input_ids, attention_mask=None, **kwargs): r""" Computes the reward score for a given input. The method has first to enable the adapter and then compute the reward score. After that the model disables the reward modeling adapter and enables the default ppo adapter again. """ if not self.supports_rm_adapter: raise ValueError("This model does not support reward modeling adapter.") # enable rm adapter self.pretrained_model.set_adapter(self.rm_adapter_name) self.pretrained_model.eval() with torch.no_grad(): base_model_output = self.pretrained_model( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, return_dict=True, **kwargs, ) last_hidden_states = base_model_output.hidden_states[-1] scores = self.score(last_hidden_states) self.pretrained_model.set_adapter(self.policy_adapter_name) self.pretrained_model.eval() return scores def create_reference_model( model: PreTrainedModelWrapper, num_shared_layers: Optional[int] = None, pattern: Optional[str] = None ) -> PreTrainedModelWrapper: """ Creates a static reference copy of a model. Note that model will be in `.eval()` mode. Args: model (`PreTrainedModelWrapper`): The model to be copied. num_shared_layers (`int`, *optional*): The number of initial layers that are shared between both models and kept frozen. pattern (`str`, *optional*): The shared layers are selected with a string pattern (e.g. "transformer.h.{layer}" for GPT2) and if a custom pattern is necessary it can be passed here. Returns: `PreTrainedModelWrapper` """ if is_deepspeed_zero3_enabled(): raise ValueError( "DeepSpeed ZeRO-3 is enabled and is not compatible with `create_reference_model()`. Please instantiate your reference model directly with `AutoModelForCausalLM.from_pretrained()`." ) parameter_names = [n for n, _ in model.named_parameters()] ref_model = deepcopy(model) # if no layers are shared, return copy of model if num_shared_layers is None: for param_name in parameter_names: param = ref_model.get_parameter(param_name) param.requires_grad = False return ref_model.eval() # identify layer name pattern if pattern is not None: pattern = pattern.format(layer=num_shared_layers) else: for pattern_candidate in LAYER_PATTERNS: pattern_candidate = pattern_candidate.format(layer=num_shared_layers) if any(pattern_candidate in name for name in parameter_names): pattern = pattern_candidate break if pattern is None: raise ValueError("Layer pattern could not be matched.") # divide parameters in shared and unshared parameter lists shared_param_list = [] unshared_param_list = [] shared_parameter = True for name, _param in model.named_parameters(): if pattern in name: shared_parameter = False if shared_parameter: shared_param_list.append(name) else: unshared_param_list.append(name) # create reference of the original parameter if they are shared for param_name in shared_param_list: param = model.get_parameter(param_name) param.requires_grad = False _ref_param = ref_model.get_parameter(param_name) # for all other parameters just make sure they don't use gradients for param_name in unshared_param_list: param = ref_model.get_parameter(param_name) param.requires_grad = False if pattern is not None and len(unshared_param_list) == 0: logging.warning("Pattern passed or found, but no layers matched in the model. Check for a typo.") return ref_model.eval() class GeometricMixtureWrapper(GenerationMixin): r""" Geometric Mixture generation wrapper that samples from the logits of two model's geometric mixture. Args: model (`PreTrainedModel`): The model to be wrapped. ref_model (`PreTrainedModel`): The reference model. generation_config (`GenerationConfig`): The generation config. mixture_coef (`float`, *optional* - default: 0.5): The mixture coefficient. """ main_input_name = "input_ids" _supports_cache_class = False _supports_static_cache = False _is_stateful = False def __init__(self, model, ref_model, generation_config, mixture_coef=0.5, device=None): super().__init__() self.model = model self.config = model.config self.ref_model = ref_model self.generation_config = generation_config self.mixture_coef = mixture_coef self.device = device if hasattr(self.model, "_is_stateful"): self._is_stateful = self.model._is_stateful def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) @torch.inference_mode() def forward(self, *args, **kwargs): model_outputs = self.model(*args, **kwargs) model_logits = model_outputs.logits ref_model_logits = self.ref_model(*args, **kwargs).logits model_outputs.logits = torch.nn.functional.log_softmax( self.mixture_coef * ref_model_logits + (1 - self.mixture_coef) * model_logits, dim=-1 ) return model_outputs def prepare_inputs_for_generation(self, *args, **kwargs): # turn off cache in the generation config kwargs["use_cache"] = False model_inputs = self.model.prepare_inputs_for_generation(*args, **kwargs) _ = self.ref_model.prepare_inputs_for_generation(*args, **kwargs) return model_inputs def _validate_model_class(self): self.model._validate_model_class() def _validate_model_kwargs(self, model_kwargs): return self.model._validate_model_kwargs(model_kwargs)
trl/trl/models/modeling_base.py/0
{ "file_path": "trl/trl/models/modeling_base.py", "repo_id": "trl", "token_count": 13829 }
634
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import base64 import logging import os from collections.abc import Sequence from contextlib import asynccontextmanager from dataclasses import dataclass, field from io import BytesIO from itertools import chain from multiprocessing import Pipe, Process from multiprocessing.connection import Connection from typing import Optional import torch from transformers import is_vision_available from trl import TrlParser from trl.import_utils import ( is_fastapi_available, is_pydantic_available, is_uvicorn_available, is_vllm_ascend_available, is_vllm_available, ) if is_fastapi_available(): from fastapi import FastAPI if is_pydantic_available(): from pydantic import BaseModel if is_uvicorn_available(): import uvicorn if is_vision_available(): from PIL import Image if is_vllm_available(): from vllm import LLM, SamplingParams from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator from vllm.distributed.parallel_state import get_world_group from vllm.distributed.utils import StatelessProcessGroup from vllm.sampling_params import GuidedDecodingParams from vllm.utils import get_open_port if is_vllm_ascend_available(): from vllm_ascend.distributed.device_communicators.pyhccl import PyHcclCommunicator as PyNcclCommunicator logger = logging.getLogger(__name__) # We use CUDA with multiprocessing, so we must use the 'spawn' start method. Otherwise, we will get the following # error: RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use # the 'spawn' start method os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" class WeightSyncWorkerExtension: """ A vLLM worker extension that enables weight synchronization between a client and multiple server workers. This worker uses a `StatelessProcessGroup` to establish communication and a `PyNcclCommunicator` to handle efficient GPU-based communication using NCCL. The primary purpose of this class is to receive updated model weights from a client process and distribute them to all worker processes participating in model inference. """ # The following attributes are initialized when `init_communicator` method is called. pynccl_comm = None # Communicator for weight updates client_rank = None # Source rank for broadcasting updated weights def init_communicator(self, host: str, port: int, world_size: int, client_device_uuid: str) -> None: """ Initializes the weight update communicator using a stateless process group. This method creates a `StatelessProcessGroup` that allows external training processes to communicate with vLLM workers without interfering with the global torch distributed group. Args: host (`str`): Hostname or IP address of the master node. port (`int`): Port number to be used for communication. world_size (`int`): Total number of participating processes in the update group. client_device_uuid (`str`): UUID of the device of client main process. Used to assert that devices are different from vllm workers devices. """ if self.pynccl_comm is not None: raise RuntimeError("Weight update group already initialized. Call close_communicator first.") if client_device_uuid == str(torch.cuda.get_device_properties(self.device).uuid): raise RuntimeError( f"Attempting to use the same CUDA device (UUID: {client_device_uuid}) for multiple distinct " "roles/ranks within the same communicator. This setup is unsupported and will likely lead to program " "hangs or incorrect behavior. Ensure that trainer is using different devices than vLLM server." ) # Get the rank of the current worker in the global world group. rank = get_world_group().rank # Create a stateless process group to manage communication between training processes and vLLM workers. pg = StatelessProcessGroup.create(host=host, port=port, rank=rank, world_size=world_size) # Initialize the NCCL-based communicator for weight synchronization. self.pynccl_comm = PyNcclCommunicator(pg, device=self.device) # The client process that sends updated weights has the highest rank (world_size - 1). self.client_rank = world_size - 1 def update_named_param(self, name: str, dtype: str, shape: Sequence[int]) -> None: """ Receives updated weights from the client process and updates the named parameter in the model. Args: name (`str`): Name of the weight tensor being updated. dtype (`str`): Data type of the weight tensor as a string (e.g., `"torch.float32"`). shape (`Sequence[int]`): Shape of the weight tensor. """ if self.pynccl_comm is None: raise RuntimeError("Communicator not initialized. Call `init_communicator` first.") dtype = getattr(torch, dtype.split(".")[-1]) # Allocate memory for the incoming weight tensor on the correct device. weight = torch.empty(shape, dtype=dtype, device=self.device) # Use NCCL to broadcast the updated weights from the client (src) to all workers. self.pynccl_comm.broadcast(weight, src=self.client_rank) self.pynccl_comm.group.barrier() # Load the received weights into the model. self.model_runner.model.load_weights(weights=[(name, weight)]) def close_communicator(self) -> None: """ Closes the communicator when weight synchronization is no longer needed. This method deletes the NCCL communicator to release associated resources. """ if self.pynccl_comm is not None: del self.pynccl_comm self.pynccl_comm = None # Ensure attribute is reset to None self.client_rank = None # Ensure attribute is reset to None @dataclass class ScriptArguments: r""" Arguments for the script. Args: model (`str`): Model name or path to load the model from. revision (`str` or `None`, *optional*, defaults to `None`): Revision to use for the model. If not specified, the default branch will be used. tensor_parallel_size (`int`, *optional*, defaults to `1`): Number of tensor parallel workers to use. data_parallel_size (`int`, *optional*, defaults to `1`): Number of data parallel workers to use. host (`str`, *optional*, defaults to `"0.0.0.0"`): Host address to run the server on. port (`int`, *optional*, defaults to `8000`): Port to run the server on. gpu_memory_utilization (`float`, *optional*, defaults to `0.9`): Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV cache on the device dedicated to generation powered by vLLM. Higher values will increase the KV cache size and thus improve the model's throughput. However, if the value is too high, it may cause out-of-memory (OOM) errors during initialization. dtype (`str`, *optional*, defaults to `"auto"`): Data type to use for vLLM generation. If set to `"auto"`, the data type will be automatically determined based on the model configuration. Find the supported values in the vLLM documentation. max_model_len (`int` or `None`, *optional*, defaults to `None`): If set, the `max_model_len` to use for vLLM. This can be useful when running with reduced `vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model context size, which might be much larger than the KV cache, leading to inefficiencies. enable_prefix_caching (`bool` or `None`, *optional*, defaults to `None`): Whether to enable prefix caching in vLLM. If set to `True`, ensure that the model and the hardware support this feature. enforce_eager (`bool`, *optional*, defaults to `False`): Whether to enforce eager execution. If set to `True`, we will disable CUDA graph and always execute the model in eager mode. If `False` (default behavior), we will use CUDA graph and eager execution in hybrid. vllm_model_impl (`str`, *optional*, defaults to `"vllm"`): Model implementation to use for vLLM. Must be one of `"transformers"` or `"vllm"`. `"transformers"`: Use the `transformers` backend for model implementation. `"vllm"`: Use the `vllm` library for model implementation. kv_cache_dtype (`str`, *optional*, defaults to `"auto"`): Data type to use for KV cache. If set to `"auto"`, the dtype will default to the model data type. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether to trust remote code when loading models. Set to `True` to allow executing code from model repositories. This is required for some custom models but introduces security risks. log_level (`str`, *optional*, defaults to `"info"`): Log level for uvicorn. Possible choices: `"critical"`, `"error"`, `"warning"`, `"info"`, `"debug"`, `"trace"`. """ model: str = field( metadata={"help": "Model name or path to load the model from."}, ) revision: Optional[str] = field( default=None, metadata={"help": "Revision to use for the model. If not specified, the default branch will be used."}, ) tensor_parallel_size: int = field( default=1, metadata={"help": "Number of tensor parallel workers to use."}, ) data_parallel_size: int = field( default=1, metadata={"help": "Number of data parallel workers to use."}, ) host: str = field( default="0.0.0.0", metadata={"help": "Host address to run the server on."}, ) port: int = field( default=8000, metadata={"help": "Port to run the server on."}, ) gpu_memory_utilization: float = field( default=0.9, metadata={ "help": "Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV " "cache on the device dedicated to generation powered by vLLM. Higher values will increase the KV cache " "size and thus improve the model's throughput. However, if the value is too high, it may cause " "out-of-memory (OOM) errors during initialization." }, ) dtype: str = field( default="auto", metadata={ "help": "Data type to use for vLLM generation. If set to 'auto', the data type will be automatically " "determined based on the model configuration. Find the supported values in the vLLM documentation." }, ) max_model_len: Optional[int] = field( default=None, metadata={ "help": "If set, the `max_model_len` to use for vLLM. This can be useful when running with reduced " "`vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model " "context size, which might be much larger than the KV cache, leading to inefficiencies." }, ) enable_prefix_caching: Optional[bool] = field( default=None, metadata={ "help": "Whether to enable prefix caching in vLLM. If set to `True`, ensure that the model and the " "hardware support this feature." }, ) enforce_eager: Optional[bool] = field( default=False, metadata={ "help": "Whether to enforce eager execution. If set to `True`, we will disable CUDA graph and always " "execute the model in eager mode. If `False` (default behavior), we will use CUDA graph and eager " "execution in hybrid." }, ) kv_cache_dtype: str = field( default="auto", metadata={ "help": "Data type to use for KV cache. If set to 'auto', the dtype will default to the model data type." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": "Whether to trust remote code when loading models. Set to True to allow executing code from model " "repositories. This is required for some custom models but introduces security risks." }, ) log_level: str = field( default="info", metadata={ "help": "Log level for uvicorn. Possible choices: 'critical', 'error', 'warning', 'info', 'debug', " "'trace'." }, ) vllm_model_impl: str = field( default="vllm", metadata={ "help": "Model implementation to use for vLLM. Must be one of `transformers` or `vllm`. `transformers`: " "Use the `transformers` backend for model implementation. `vllm`: Use the `vllm` library for " "model implementation." }, ) def llm_worker( script_args: ScriptArguments, data_parallel_rank: int, master_port: int, connection: Connection ) -> None: # Set required environment variables for DP to work with vLLM os.environ["VLLM_DP_RANK"] = str(data_parallel_rank) os.environ["VLLM_DP_RANK_LOCAL"] = str(data_parallel_rank) os.environ["VLLM_DP_SIZE"] = str(script_args.data_parallel_size) os.environ["VLLM_DP_MASTER_PORT"] = str(master_port) llm = LLM( model=script_args.model, revision=script_args.revision, tensor_parallel_size=script_args.tensor_parallel_size, gpu_memory_utilization=script_args.gpu_memory_utilization, enforce_eager=script_args.enforce_eager, dtype=script_args.dtype, # Automatic Prefix Caching caches the KV cache of existing queries, so that a new query can # directly reuse the KV cache if it shares the same prefix with one of the existing queries. # This is particularly useful here because we generate completions from the same prompts. enable_prefix_caching=script_args.enable_prefix_caching, kv_cache_dtype=script_args.kv_cache_dtype, max_model_len=script_args.max_model_len, worker_extension_cls="trl.scripts.vllm_serve.WeightSyncWorkerExtension", trust_remote_code=script_args.trust_remote_code, model_impl=script_args.vllm_model_impl, ) # Send ready signal to parent process connection.send({"status": "ready"}) while True: # Wait for commands from the parent process try: command = connection.recv() except KeyboardInterrupt: llm.collective_rpc(method="close_communicator") break # Handle commands if command["type"] in ["call", "fire_and_forget"]: method_name = command["method"] args, kwargs = command.get("args", ()), command.get("kwargs", {}) method = getattr(llm, method_name) result = method(*args, **kwargs) if command["type"] == "call": connection.send(result) elif command["type"] == "shutdown": break def chunk_list(lst: list, n: int) -> list[list]: """ Split list `lst` into `n` evenly distributed sublists. Example: ```python >>> chunk_list([1, 2, 3, 4, 5, 6], 2) [[1, 2, 3], [4, 5, 6]] >>> chunk_list([1, 2, 3, 4, 5, 6], 4) [[1, 2], [3, 4], [5], [6]] >>> chunk_list([1, 2, 3, 4, 5, 6], 8) [[1], [2], [3], [4], [5], [6], [], []] ``` """ k, r = divmod(len(lst), n) return [lst[i * k + min(i, r) : (i + 1) * k + min(i + 1, r)] for i in range(n)] def main(script_args: ScriptArguments): if not is_fastapi_available(): raise ImportError( "FastAPI is required to run the vLLM serve script. Please install it using `pip install fastapi`." ) if not is_pydantic_available(): raise ImportError( "Pydantic is required to run the vLLM serve script. Please install it using `pip install pydantic`." ) if not is_uvicorn_available(): raise ImportError( "Uvicorn is required to run the vLLM serve script. Please install it using `pip install uvicorn`." ) if not is_vllm_available(): raise ImportError("vLLM is required to run the vLLM serve script. Please install it using `pip install vllm`.") # Spawn dp workers, and setup pipes for communication master_port = get_open_port() connections = [] processes = [] for data_parallel_rank in range(script_args.data_parallel_size): parent_connection, child_connection = Pipe() process = Process(target=llm_worker, args=(script_args, data_parallel_rank, master_port, child_connection)) process.start() connections.append(parent_connection) processes.append(process) @asynccontextmanager async def lifespan(app: FastAPI): # Wait for all workers to send "ready" ready_connections = set() while len(ready_connections) < script_args.data_parallel_size: for connection in connections: msg = connection.recv() if isinstance(msg, dict) and msg.get("status") == "ready": ready_connections.add(connection) yield # Wait for processes to terminate for process in processes: process.join(timeout=10) # Wait for 10 seconds for the process to terminate if process.is_alive(): logger.warning(f"Process {process} is still alive after 10 seconds, attempting to terminate...") process.terminate() process.join() # ensure process termination after calling terminate() app = FastAPI(lifespan=lifespan) # Define the endpoints for the model server @app.get("/health/") async def health(): """ Health check endpoint to verify that the server is running. """ return {"status": "ok"} @app.get("/get_world_size/") async def get_world_size(): """ Retrieves the world size of the LLM engine, which is `tensor_parallel_size * data_parallel_size`. Returns: `dict`: A dictionary containing the world size. Example response: ```json {"world_size": 8} ``` """ return {"world_size": script_args.tensor_parallel_size * script_args.data_parallel_size} class GenerateRequest(BaseModel): prompts: list[str] images: Optional[list[str]] = None n: int = 1 repetition_penalty: float = 1.0 temperature: float = 1.0 top_p: float = 1.0 top_k: int = -1 min_p: float = 0.0 max_tokens: int = 16 guided_decoding_regex: Optional[str] = None generation_kwargs: dict = field(default_factory=dict) class GenerateResponse(BaseModel): completion_ids: list[list[int]] @app.post("/generate/", response_model=GenerateResponse) async def generate(request: GenerateRequest): """ Generates completions for the provided prompts. Args: request (`GenerateRequest`): - `prompts` (list of `str`): A list of prompts (text strings) for the model to generate completions. - `images` (list of `str`, *optional*, default to `None`): A list of base64 encoded images to process along with prompts. - `n` (`int`, *optional*, defaults to `1`): Number of completions to generate for each prompt. - `repetition_penalty` (`float`, *optional*, defaults to `1.0`): Repetition penalty to apply during generation. - `temperature` (`float`, *optional*, defaults to `1.0`): Temperature for sampling. Higher values lead to more random outputs. - `top_p` (`float`, *optional*, defaults to `1.0`): Top-p (nucleus) sampling parameter. It controls the diversity of the generated text. - `top_k` (`int`, *optional*, defaults to `-1`): Top-k sampling parameter. If set to `-1`, it disables top-k sampling. - `min_p` (`float`, *optional*, defaults to `0.0`): Minimum probability threshold for sampling. - `max_tokens` (`int`, *optional*, defaults to `16`): Maximum number of tokens to generate for each completion. - `guided_decoding_regex` (`str`, *optional*): A regex pattern for guided decoding. If provided, the model will only generate tokens that match this regex pattern. - `generation_kwargs` (`dict`, *optional*): Additional generation parameters to pass to the vLLM `SamplingParams`. This can include parameters like `seed`, `frequency_penalty`, etc. If it contains keys that conflict with the other parameters, they will override them. Returns: `GenerateResponse`: - `completion_ids` (list of list of `int`): A list of lists of token IDs for each generated completion. Example request: ```json {"prompts": ["Hello world", "What is AI?"]} ``` Example response: ```json {"completion_ids": [[101, 102, 103], [201, 202, 203]]} ``` """ request.images = request.images or [None] * len(request.prompts) prompts = [] for prompt, image in zip(request.prompts, request.images): row = {"prompt": prompt} if image is not None: row["multi_modal_data"] = {"image": Image.open(BytesIO(base64.b64decode(image)))} prompts.append(row) # Guided decoding, if enabled if request.guided_decoding_regex is not None: guided_decoding = GuidedDecodingParams(backend="outlines", regex=request.guided_decoding_regex) else: guided_decoding = None generation_kwargs = { "n": request.n, "repetition_penalty": request.repetition_penalty, "temperature": request.temperature, "top_p": request.top_p, "top_k": request.top_k, "min_p": request.min_p, "max_tokens": request.max_tokens, "guided_decoding": guided_decoding, } generation_kwargs.update(request.generation_kwargs) sampling_params = SamplingParams(**generation_kwargs) # Evenly distribute prompts across DP ranks chunked_prompts = chunk_list(prompts, script_args.data_parallel_size) # Send the prompts to each worker for connection, prompts in zip(connections, chunked_prompts): # When the number of prompts is less than data_parallel_size, some workers will receive empty prompts. # However, vLLM requires that we always send at least one prompt. So we send a placeholder prompt to comply # with vLLM's requirement, and we later ignore the result. if not prompts: prompts = ["<placeholder>"] kwargs = {"prompts": prompts, "sampling_params": sampling_params} connection.send({"type": "call", "method": "generate", "kwargs": kwargs}) # Receive results all_outputs = [connection.recv() for connection in connections] # Handle empty prompts (see above) all_outputs = [output for output, prompts in zip(all_outputs, chunked_prompts) if prompts] # Flatten and combine all results all_outputs = list(chain.from_iterable(all_outputs)) # from list of list to single list completion_ids = [list(output.token_ids) for outputs in all_outputs for output in outputs.outputs] return {"completion_ids": completion_ids} class InitCommunicatorRequest(BaseModel): host: str port: int world_size: int client_device_uuid: str @app.post("/init_communicator/") async def init_communicator(request: InitCommunicatorRequest): """ Initializes the communicator for synchronizing model weights between a client and multiple server workers. Args: request (`InitCommunicatorRequest`): - `host` (`str`): Hostname or IP address of the master node. - `port` (`int`): Port number to be used for communication. - `world_size` (`int`): Total number of participating processes in the group. - `client_device_uuid` (`str`): UUID of the device of client main process. Used to assert that devices are different from vLLM workers devices. """ world_size = script_args.tensor_parallel_size * script_args.data_parallel_size + 1 # The function init_communicator is called this way: init_communicator(host, port, world_size) # So with collective_rpc we need to call it this way: # llm.collective_rpc(method="init_communicator", args=(host, port, world_size)) kwargs = { "method": "init_communicator", "args": (request.host, request.port, world_size, request.client_device_uuid), } for connection in connections: connection.send({"type": "fire_and_forget", "method": "collective_rpc", "kwargs": kwargs}) return {"message": "Request received, initializing communicator"} class UpdateWeightsRequest(BaseModel): name: str dtype: str shape: list[int] @app.post("/update_named_param/") async def update_named_param(request: UpdateWeightsRequest): """ Updates the model weights with the provided tensor. Once this endpoint is called, the client process should broadcast the updated weights to all server workers. Args: request (`UpdateWeightsRequest`): - `name` (`str`): Name of the weight tensor being updated. - `dtype` (`str`): Data type of the weight tensor (e.g., `"torch.float32"`). - `shape` (list of `int`): Shape of the weight """ # The function update_named_param is called this way: update_named_param("name", "torch.float32", (10, 10)) # So with collective_rpc we need to call it this way: # llm.collective_rpc("update_named_param", args=("name", "torch.float32", (10, 10))) kwargs = {"method": "update_named_param", "args": (request.name, request.dtype, tuple(request.shape))} for connection in connections: connection.send({"type": "fire_and_forget", "method": "collective_rpc", "kwargs": kwargs}) return {"message": "Request received, updating named parameter"} @app.post("/reset_prefix_cache/") async def reset_prefix_cache(): """ Resets the prefix cache for the model. """ for connection in connections: connection.send({"type": "call", "method": "reset_prefix_cache"}) # Wait for and collect all results all_outputs = [connection.recv() for connection in connections] success = all(output for output in all_outputs) return {"message": "Request received, resetting prefix cache status: " + str(success)} @app.post("/close_communicator/") async def close_communicator(): """ Closes the weight update group and cleans up associated resources. """ kwargs = {"method": "close_communicator"} for connection in connections: connection.send({"type": "fire_and_forget", "method": "collective_rpc", "kwargs": kwargs}) return {"message": "Request received, closing communicator"} # Start the server uvicorn.run(app, host=script_args.host, port=script_args.port, log_level=script_args.log_level) def make_parser(subparsers: argparse._SubParsersAction = None): if subparsers is not None: parser = subparsers.add_parser("vllm-serve", help="Run the vLLM serve script", dataclass_types=ScriptArguments) else: parser = TrlParser(ScriptArguments) return parser if __name__ == "__main__": parser = make_parser() (script_args,) = parser.parse_args_and_config() main(script_args)
trl/trl/scripts/vllm_serve.py/0
{ "file_path": "trl/trl/scripts/vllm_serve.py", "repo_id": "trl", "token_count": 11453 }
635
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional, Union from transformers import TrainingArguments @dataclass class GRPOConfig(TrainingArguments): r""" Configuration class for the [`GRPOTrainer`]. This class includes only the parameters that are specific to GRPO training. For a full list of training arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may differ from those in [`~transformers.TrainingArguments`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: > Parameters that control the model and reference model model_init_kwargs (`str`, `dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model` argument of the [`GRPOTrainer`] is provided as a string. disable_dropout (`bool`, *optional*, defaults to `False`): Whether to disable dropout in the model. This is useful for training with a reference model, as it prevents the model from generating different logprobs for the same input. > Parameters that control the data preprocessing remove_unused_columns (`bool`, *optional*, defaults to `False`): Whether to only keep the column `"prompt"` in the dataset. If you use a custom reward function that requires any column other than `"prompts"` and `"completions"`, you should keep this to `False`. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt. If the prompt is longer than this value, it will be truncated left. num_generations (`int` or `None`, *optional*, defaults to `8`): Number of generations per prompt to sample. The effective batch size (num_processes * per_device_batch_size * gradient_accumulation_steps) must be evenly divisible by this value. max_completion_length (`int` or `None`, *optional*, defaults to `256`): Maximum length of the generated completion. ds3_gather_for_generation (`bool`, *optional*, defaults to `True`): This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation, improving generation speed. However, disabling this option allows training models that exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation. Disabling this option is not compatible with vLLM generation. shuffle_dataset (`bool`, *optional*, defaults to `True`): Whether to shuffle the training dataset. > Parameters that control generation generation_batch_size: (`int` or `None`, *optional*, defaults to `None`): Batch size to use for generation. If `None`, it defaults to the effective training batch size: `per_device_train_batch_size * num_processes * steps_per_generation`. In other words, there is one generation batch processed per optimization step. Mutually exclusive with `steps_per_generation`. steps_per_generation: (`int` or `None`, *optional*, defaults to `None`): Number of steps per generation. If `None`, it defaults to `gradient_accumulation_steps`. Mutually exclusive with `generation_batch_size`. temperature (`float`, defaults to `1.0`): Temperature for sampling. The higher the temperature, the more random the completions. top_p (`float`, *optional*, defaults to `1.0`): Float that controls the cumulative probability of the top tokens to consider. Must be in (0, 1]. Set to `1.0` to consider all tokens. top_k (`int` or `None`, *optional*, defaults to `None`): Number of highest probability vocabulary tokens to keep for top-k-filtering. If `None`, top-k-filtering is disabled and all tokens are considered. min_p (`float` or `None`, *optional*, defaults to `None`): Minimum token probability, which will be scaled by the probability of the most likely token. It must be a value between `0.0` and `1.0`. Typical values are in the `0.01-0.2` range. repetition_penalty (`float`, *optional*, defaults to `1.0`): Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > `1.0` encourage the model to use new tokens, while values < `1.0` encourage the model to repeat tokens. use_transformers_paged (`bool`, *optional*, defaults to `False`): Whether to use the `transformers` paged implementation for generation. If set to `True`, the `transformers` paged implementation will be used for generation instead of the default padded implementation. This parameter is only effective when `use_vllm` is set to `False`. cache_implementation (`str` or `None`, *optional*, defaults to `None`): Implementation of the cache method for faster generation when `use_vllm` is set to `False`. generation_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Additional keyword arguments to pass to `GenerationConfig` (if using transformers) or `SamplingParams` (if using vLLM) when sampling completions. This can be used to further customize the generation behavior, such as setting `suppress_tokens`, `num_beams`, etc. If it contains keys that conflict with the other generation parameters (like `min_p`, `top_p`, etc.), they will override them. > Parameters that control generation acceleration powered by vLLM use_vllm (`bool`, *optional*, defaults to `False`): Whether to use vLLM for generating completions. If set to `True`, the trainer will use vLLM for generation instead of the default model.generate(). Requires `vllm` to be installed. vllm_mode (`str`, *optional*, defaults to `"server"`): Mode to use for vLLM integration when `use_vllm` is set to `True`. Must be one of `"server"` or `"colocate"`. - `"server"`: The trainer will send generation requests to a separate vLLM server. Make sure a TRL vLLM server is running (start with `trl vllm-serve`). - `"colocate"`: vLLM will run in the same process and share the training GPUs. This avoids the need for a separate server but may cause resource contention with training. vllm_guided_decoding_regex (`str` or `None`, *optional*, defaults to `None`): Regex for vLLM guided decoding. If `None` (default), guided decoding is disabled. > Parameters that control the vLLM server (only used when `vllm_mode` is `"server"`) vllm_server_base_url (`str` or `None`, *optional*, defaults to `None`): Base URL for the vLLM server (e.g., `"http://localhost:8000"`). If provided, `vllm_server_host` and `vllm_server_port` are ignored. vllm_server_host (`str`, *optional*, defaults to `"0.0.0.0"`): Host of the vLLM server to connect to. Ignored if `vllm_server_base_url` is provided. vllm_server_port (`int`, *optional*, defaults to `8000`): Port of the vLLM server to connect to. Ignored if `vllm_server_base_url` is provided. vllm_server_timeout (`float`, *optional*, defaults to `240.0`): Total timeout duration in seconds to wait for the vLLM server to be up. If the server is not up after the timeout, a `ConnectionError` is raised. > Parameters that control colocated vLLM execution (only used when `vllm_mode` is `"colocate"`) vllm_gpu_memory_utilization (`float`, *optional*, defaults to `0.3`): Control the GPU memory utilization for vLLM. This setting only applies when `vllm_mode` is set to `"colocate"`. If you are using `vllm_mode="server"`, this parameter must be passed separately when launching the vLLM server via the `--vllm_gpu_memory_utilization` flag. vllm_tensor_parallel_size (`int`, *optional*, defaults to `1`): Control the tensor parallel size for vLLM. This setting only applies when `vllm_mode` is set to `"colocate"`. If you are using `vllm_mode="server"`, this parameter must be passed separately when launching the vLLM server via the `--vllm_tensor_parallel_size` flag. vllm_model_impl (`str`, *optional*, defaults to `"vllm"`): Model implementation to use for vLLM. Must be one of `"transformers"` or `"vllm"`. `"transformers"`: Use the `transformers` backend for model implementation. `"vllm"`: Use the `vllm` library for model implementation. > Parameters that control the training beta (`float`, *optional*, defaults to `0.0`): KL coefficient. If `0.0` (default), the reference model is not loaded, reducing memory usage and improving training speed. num_iterations (`int`, *optional*, defaults to `1`): Number of iterations per batch (denoted as μ in the algorithm). epsilon (`float`, *optional*, defaults to `0.2`): Epsilon value for clipping. delta (`float` or `None`, *optional*, defaults to `None`): Enables the upper clipping bound in two-sided GRPO loss when set to a float. If `None` (default), standard GRPO clipping is used. Recommended to be greater than `1 + ε` when enabled. This method is introduced in the [INTELLECT-2 tech report](https://huggingface.co/papers/2505.07291). epsilon_high (`float` or `None`, *optional*, defaults to `None`): Upper-bound epsilon value for clipping. If not specified, it defaults to the same value as the lower-bound specified in argument `epsilon`. Paper [DAPO](https://huggingface.co/papers/2503.14476) recommends `0.28`. importance_sampling_level (`str`, *optional*, defaults to `"token"`): Controls whether importance sampling ratios are computed at the `"token"` or `"sequence"` level. `"token"` keeps the raw per-token log-probability ratios (one weight per token). `"sequence"` averages the log-probability ratios across valid tokens to produce a single ratio per sequence. The [GSPO paper](https://huggingface.co/papers/2507.18071) shows that sequence-level sampling often yields more stable training and better alignment with sequence-level rewards. reward_weights (`list[float]` or `None`, *optional*, defaults to `None`): Weights for each reward function. Must match the number of reward functions. If `None`, all rewards are weighted equally with weight `1.0`. scale_rewards (`str` or `bool`, *optional*, defaults to `"group"`): Specifies the scaling strategy for rewards. Supported values are: - `True` or `"group"` (default): rewards are scaled by the standard deviation within each group, ensuring unit variance within a group. - `"batch"`: rewards are scaled by the standard deviation across the entire batch, as recommended in the [PPO Lite paper](https://huggingface.co/papers/2508.08221). - `False` or `"none"`: no scaling is applied. The [Dr. GRPO paper](https://huggingface.co/papers/2503.20783) recommends not scaling rewards, as scaling by the standard deviation introduces a question-level difficulty bias. loss_type (`str`, *optional*, defaults to `"dapo"`): Specifies the loss formulation to use. Supported values are: - `"grpo"`: Aggregates token-level losses by normalizing over sequence length. Not recommended due to length bias—this approach tends to prefer shorter completions with positive advantages and longer ones with negative advantages. - `"dr_grpo"`: Aggregates token-level losses by normalizing with a global constant. This method was introduced in the [Dr. GRPO paper](https://huggingface.co/papers/2503.20783) to eliminate length bias. The value of the constant corresponds to `max_completion_length`. - `"dapo"` (default): Aggregates token-level losses by normalizing with the number of active token in the global accumulated batch. This method was introduced in the [DAPO paper](https://huggingface.co/papers/2503.14476) to eliminate length bias. - `"bnpo"`: Aggregates token-level losses by normalizing with the number of active token in the local batch. Note that normalization is performed over the local batch only, so results may slightly vary depending on the local batch size, despite a constant effective batch size. When using `per_device_train_batch_size==1`, the loss is equivalent to the GRPO loss. mask_truncated_completions (`bool`, *optional*, defaults to `False`): When enabled, truncated completions are excluded from the loss calculation, preventing them from being incorrectly penalized and introducing noise during training. According to the [DAPO](https://huggingface.co/papers/2503.14476) paper, this is a good practice for training stability. sync_ref_model (`bool`, *optional*, defaults to `False`): Whether to synchronize the reference model with the active model every `ref_model_sync_steps` steps, using the `ref_model_mixup_alpha` parameter. This synchronization originates from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper. ref_model_mixup_alpha (`float`, *optional*, defaults to `0.6`): α parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which controls the mix between the current policy and the previous reference policy during updates. The reference policy is updated according to the equation: `π_ref = α * π_θ + (1 - α) * π_ref_prev`. To use this parameter, you must set `sync_ref_model=True`. ref_model_sync_steps (`int`, *optional*, defaults to `512`): τ parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which determines how frequently the current policy is synchronized with the reference policy. To use this parameter, you must set `sync_ref_model=True`. top_entropy_quantile (`float`, *optional*, defaults to `1.0`): ρ parameter from [Beyond the 80/20 Rule](https://huggingface.co/papers/2506.01939). Keeps in the policy loss term only the top-ρ quantile of tokens by entropy of the probability distribution at each sequence position, improving results. Range: `[0.0-1.0]`. A value of `0.0` masks all but the highest entropy token; `1.0` keeps all tokens. The paper recommends a value of `0.2`. If used with `mask_truncated_completions=True`, only tokens from non-truncated completions are considered. use_liger_loss (`bool`, *optional*, defaults to `False`): Whether to use the Liger GRPO loss. > Parameters that control the logging log_completions (`bool`, *optional*, defaults to `False`): Whether to log a sample of (prompt, completion) pairs every `logging_steps` steps. If `rich` is installed, it prints the sample. If `wandb` logging is enabled, it logs it to `wandb`. num_completions_to_print (`int` or `None`, *optional*, defaults to `None`): Number of completions to print with `rich`. If `None`, all completions are logged. wandb_log_unique_prompts (`bool`, *optional*, defaults to `False`): Whether to log unique prompts in wandb. If `True`, only unique prompts are logged. If `False`, all prompts are logged. """ _VALID_DICT_FIELDS = TrainingArguments._VALID_DICT_FIELDS + ["model_init_kwargs"] # Parameters whose default values are overridden from TrainingArguments learning_rate: float = field( default=1e-6, metadata={"help": "The initial learning rate for AdamW."}, ) logging_steps: float = field( default=10, metadata={ "help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, " "will be interpreted as ratio of total training steps." }, ) gradient_checkpointing: bool = field( default=True, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) bf16: Optional[bool] = field( default=None, metadata={ "help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA " "architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if " "`fp16` is not set." }, ) # Parameters that control the model and reference model model_init_kwargs: Optional[Union[dict, str]] = field( default=None, metadata={ "help": "Keyword arguments for `transformers.AutoModelForCausalLM.from_pretrained`, used when the `model` " "argument of the `GRPOTrainer` is provided as a string." }, ) disable_dropout: bool = field( default=False, metadata={ "help": "Whether to disable dropout in the model. This is useful for training with a reference model, as " "it prevents the model from generating different logprobs for the same input." }, ) # Parameters that control the data preprocessing # The default value remove_unused_columns is overwritten from the parent class, because in GRPO we usually rely on # additional columns to compute the reward remove_unused_columns: Optional[bool] = field( default=False, metadata={ "help": "Whether to only keep the column 'prompt' in the dataset. If you use a custom reward function " "that requires any column other than 'prompts' and 'completions', you should keep this to `False`." }, ) max_prompt_length: Optional[int] = field( default=512, metadata={ "help": "Maximum length of the prompt. If the prompt is longer than this value, it will be truncated left." }, ) num_generations: Optional[int] = field( default=8, metadata={ "help": "Number of generations to sample. The effective batch size (num_processes * per_device_batch_size " "* gradient_accumulation_steps) must be evenly divisible by this value." }, ) max_completion_length: Optional[int] = field( default=256, metadata={"help": "Maximum length of the generated completion."}, ) ds3_gather_for_generation: bool = field( default=True, metadata={ "help": "This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for " "generation, improving generation speed. However, disabling this option allows training models that " "exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation. Disabling this option " "is not compatible with vLLM generation." }, ) shuffle_dataset: Optional[bool] = field( default=True, metadata={"help": "Whether to shuffle the training dataset."}, ) # Parameters that control generation generation_batch_size: Optional[int] = field( default=None, metadata={ "help": "Batch size to use for generation. If `None`, it defaults to the effective training batch size: " "`per_device_train_batch_size * num_processes * steps_per_generation`." }, ) steps_per_generation: Optional[int] = field( default=None, metadata={"help": "Number of steps per generation. If `None`, it defaults to `gradient_accumulation_steps`."}, ) temperature: float = field( default=1.0, metadata={"help": "Temperature for sampling. The higher the temperature, the more random the completions."}, ) top_p: float = field( default=1.0, metadata={ "help": "Float that controls the cumulative probability of the top tokens to consider. Must be in (0, 1]. " "Set to 1.0 to consider all tokens." }, ) top_k: Optional[int] = field( default=None, metadata={ "help": "Number of highest probability vocabulary tokens to keep for top-k-filtering. If `None`, " "top-k-filtering is disabled and all tokens are considered." }, ) min_p: Optional[float] = field( default=None, metadata={ "help": "Minimum token probability, which will be scaled by the probability of the most likely token. It " "must be a value between 0.0 and 1.0. Typical values are in the 0.01-0.2 range." }, ) generation_kwargs: Optional[dict] = field( default=None, metadata={ "help": "Additional keyword arguments to pass to `GenerationConfig` (if using transformers) or " "`SamplingParams` (if using vLLM) when sampling completions. This can be used to further customize the " "generation behavior, such as setting `suppress_tokens`, `num_beams`, etc. If it contains keys that " "conflict with the other generation parameters (like `min_p`, `top_p`, etc.), they will override them." }, ) repetition_penalty: float = field( default=1.0, metadata={ "help": "Float that penalizes new tokens based on whether they appear in the prompt and the generated " "text so far. Values > 1.0 encourage the model to use new tokens, while values < 1.0 encourage the model " "to repeat tokens." }, ) use_transformers_paged: bool = field( default=False, metadata={ "help": "Whether to use the `transformers` paged implementation for generation. If set to `True`, the " "`transformers` paged implementation will be used for generation instead of the default padded " "implementation. This parameter is only effective when `use_vllm` is set to `False`." }, ) cache_implementation: Optional[str] = field( default=None, metadata={"help": "Implementation of the cache method for faster generation when use_vllm is set to False."}, ) # Parameters that control generation acceleration powered by vLLM use_vllm: bool = field( default=False, metadata={ "help": "Whether to use vLLM for generating completions. If set to `True`, the trainer will use vLLM for " "generation instead of the default model.generate(). Requires `vllm` to be installed." }, ) vllm_server_base_url: Optional[str] = field( default=None, metadata={ "help": "Base URL for the vLLM server (e.g., 'http://localhost:8000'). If provided, `vllm_server_host` " "and `vllm_server_port` are ignored." }, ) vllm_mode: str = field( default="server", metadata={ "help": "Mode to use for vLLM integration when `use_vllm` is set to `True`. Must be one of `server` or " "`'colocate'`. `'server'`: The trainer will send generation requests to a separate vLLM server. Make sure " "a TRL vLLM server is running (start with `trl vllm-serve`). `'colocate'`: vLLM will run in the same " "process and share the training GPUs. This avoids the need for a separate server but may cause resource " "contention with training." }, ) vllm_model_impl: str = field( default="vllm", metadata={ "help": "Model implementation to use for vLLM. Must be one of `transformers` or `vllm`. `transformers`: " "Use the `transformers` backend for model implementation. `vllm`: Use the `vllm` library for " "model implementation." }, ) vllm_guided_decoding_regex: Optional[str] = field( default=None, metadata={"help": "Regex for vLLM guided decoding. If `None` (default), guided decoding is disabled."}, ) # Parameters that control the vLLM server (only used when `vllm_mode` is `"server"`) vllm_server_host: str = field( default="0.0.0.0", metadata={"help": "Host of the vLLM server to connect to. Ignored if vllm_server_base_url is provided."}, ) vllm_server_port: int = field( default=8000, metadata={"help": "Port of the vLLM server to connect to. Ignored if vllm_server_base_url is provided."}, ) vllm_server_timeout: float = field( default=240.0, metadata={ "help": "Total timeout duration in seconds to wait for the vLLM server to be up. If the server is not up " "after the timeout, a `ConnectionError` is raised." }, ) # Parameters that control colocated vLLM execution (only used when `vllm_mode` is `"colocate"`) vllm_gpu_memory_utilization: float = field( default=0.3, metadata={ "help": "Control the GPU memory utilization for vLLM. This setting only applies when `vllm_mode` is set " "to `'colocate'`. If you are using `vllm_mode='server'`, this parameter must be passed separately when " "launching the vLLM server via the `--vllm_gpu_memory_utilization` flag." }, ) vllm_tensor_parallel_size: int = field( default=1, metadata={ "help": "Control the tensor parallel size for vLLM. This setting only applies when `vllm_mode` is set " "to `'colocate'`. If you are using `vllm_mode='server'`, this parameter must be passed separately when " "launching the vLLM server via the `--vllm_tensor_parallel_size` flag." }, ) # Parameters that control the training beta: float = field( default=0.0, metadata={ "help": "KL coefficient. If `0.0` (default), the reference model is not loaded, reducing memory usage and " "improving training speed." }, ) num_iterations: int = field( default=1, metadata={"help": "Number of iterations per batch (denoted as μ in the algorithm)."}, ) epsilon: float = field( default=0.2, metadata={"help": "Epsilon value for clipping."}, ) delta: Optional[float] = field( default=None, metadata={ "help": "Enables the upper clipping bound in two-sided GRPO loss when set to a float. If `None` " "(default), standard GRPO clipping is used. Recommended to be greater than `1 + ε` when enabled. This " "method is introduced in the [INTELLECT-2 tech report](https://huggingface.co/papers/2505.07291)." }, ) epsilon_high: Optional[float] = field( default=None, metadata={ "help": "Upper-bound epsilon value for clipping. If not specified, it defaults to the same value as the " "lower-bound specified in argument `epsilon`. Paper DAPO recommends `0.28`." }, ) importance_sampling_level: str = field( default="token", metadata={ "help": "Controls whether importance sampling ratios are computed at the `'token'` or `'sequence'` level. " "`'token'` keeps the raw per-token log-probability ratios (one weight per token). `'sequence'` averages " "the log-probability ratios across valid tokens to produce a single ratio per sequence. The GSPO paper " "shows that sequence-level sampling often yields more stable training and better alignment with " "sequence-level rewards." }, ) reward_weights: Optional[list[float]] = field( default=None, metadata={ "help": "Weights for each reward function. Must match the number of reward functions. If `None`, all " "rewards are weighted equally with weight `1.0`." }, ) scale_rewards: str = field( default="group", metadata={ "help": "Specifies the scaling strategy for rewards. Supported values are: " "`True` or `group'` (default): rewards are scaled by the standard deviation within each group, ensuring " "unit variance within a group. " "`'batch'`: rewards are scaled by the standard deviation across the entire batch, as recommended in the " "PPO Lite paper. " "`False` or `'none'`: no scaling is applied. The Dr. GRPO paper recommends not scaling rewards, as " "scaling by the standard deviation introduces a question-level difficulty bias." }, ) loss_type: str = field( default="dapo", metadata={ "help": "Specifies the loss formulation to use. Supported values are 'grpo', 'dapo', 'bnpo', and " "'dr_grpo'. " "'grpo': Aggregates token-level losses by normalizing over sequence length. Not recommended due to length " "bias—this approach tends to prefer shorter completions with positive advantages and longer ones with " "negative advantages. " "'dapo' (default): Aggregates token-level losses by normalizing with the number of active token in the " "global accumulated batch. This method was introduced in the DAPO paper to eliminate length bias. " "'dr_grpo': Aggregates token-level losses by normalizing with a global constant. This method was " "introduced in the Dr. GRPO paper to eliminate length bias. The value of the constant corresponds to " "`max_completion_length`. " "'bnpo': Aggregates token-level losses by normalizing with the number of active token in the local batch. " "Note that normalization is performed over the local batch only, so results may slightly vary depending " "on the local batch size, despite a constant effective batch size. When using " "`per_device_train_batch_size==1`, the loss is equivalent to the GRPO loss." }, ) mask_truncated_completions: bool = field( default=False, metadata={ "help": "When enabled, truncated completions are excluded from the loss calculation, preventing them from " "being incorrectly penalized and introducing noise during training. According to the DAPO paper, this is " "a good practice for training stability." }, ) sync_ref_model: bool = field( default=False, metadata={ "help": "Whether to synchronize the reference model with the active model every `ref_model_sync_steps` " "steps, using the `ref_model_mixup_alpha` parameter." }, ) ref_model_mixup_alpha: float = field( default=0.6, metadata={ "help": "α parameter from the TR-DPO paper, which controls the mix between the current policy and the " "previous reference policy during updates. The reference policy is updated according to the equation: " "`π_ref = α * π_θ + (1 - α) * π_ref_prev`. To use this parameter, you must set `sync_ref_model=True`." }, ) ref_model_sync_steps: int = field( default=512, metadata={ "help": "τ parameter from the TR-DPO paper, which determines how frequently the current policy is " "synchronized with the reference policy. To use this parameter, you must set `sync_ref_model=True`." }, ) top_entropy_quantile: float = field( default=1.0, metadata={ "help": "ρ parameter from Beyond the 80/20 Rule. Keeps in the policy loss term only the top-ρ quantile of " "tokens by entropy of the probability distribution at each sequence position, improving results. Range: " "[0.0-1.0]. A value of `0.0` masks all but the highest entropy token; `1.0` keeps all tokens. The paper " "recommends a value of `0.2`. If used with `mask_truncated_completions=True`, only tokens from " "non-truncated completions are considered." }, ) use_liger_loss: bool = field( default=False, metadata={"help": "Whether to use the Liger GRPO loss."}, ) # Parameters that control the logging log_completions: bool = field( default=False, metadata={ "help": "Whether to log a sample of (prompt, completion) pairs every `logging_steps` steps. If `rich` is " "installed, it prints the sample. If `wandb` logging is enabled, it logs it to `wandb`." }, ) num_completions_to_print: Optional[int] = field( default=None, metadata={"help": "Number of completions to print with `rich`. If `None`, all completions are logged."}, ) wandb_log_unique_prompts: Optional[bool] = field( default=False, metadata={ "help": "Whether to log unique prompts in wandb. If `True`, only unique prompts are logged. If `False`, " "all prompts are logged." }, ) def __post_init__(self): self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16 super().__post_init__() num_processes = self.world_size # The current default effective batch size if self.generation_batch_size is None and self.steps_per_generation is None: self.steps_per_generation = self.gradient_accumulation_steps self.generation_batch_size = self.per_device_train_batch_size * num_processes * self.steps_per_generation elif self.generation_batch_size is not None and self.steps_per_generation is None: # Just ensure the value is divisible by the global batch size if self.generation_batch_size % (self.per_device_train_batch_size * num_processes) != 0: raise ValueError( f"generation_batch_size ({self.generation_batch_size}) must be divisible by the global batch size " f"({self.per_device_train_batch_size * num_processes})." ) self.steps_per_generation = self.generation_batch_size // ( self.per_device_train_batch_size * num_processes ) elif self.generation_batch_size is None and self.steps_per_generation is not None: self.generation_batch_size = self.per_device_train_batch_size * num_processes * self.steps_per_generation else: raise ValueError( "'generation_batch_size' and 'steps_per_generation' can not be both configured at the same time" ) if self.do_eval and self.eval_strategy != "no": # Just ensure the value is divisible by the global batch size if (self.per_device_eval_batch_size * num_processes) % self.num_generations != 0: raise ValueError( f"The global eval batch size ({self.per_device_eval_batch_size} * {num_processes}) must be " f"divisible by num_generations ({self.num_generations})." ) # The generation batch must contain full prompt groups (no partials), so it must be divisible by # num_generations. if self.generation_batch_size % self.num_generations != 0: raise ValueError( f"generation_batch_size ({self.generation_batch_size}) must be divisible by num_generations " f"({self.num_generations})." ) if self.num_generations < 2: raise ValueError( "GRPO requires at least 2 generations per prompt to calculate the advantages. You provided " f"{self.num_generations}, which is less than the minimum required." ) if self.delta is not None and self.use_liger_loss: raise ValueError("Liger loss does not support two-sided GRPO loss yet.")
trl/trl/trainer/grpo_config.py/0
{ "file_path": "trl/trl/trainer/grpo_config.py", "repo_id": "trl", "token_count": 14062 }
636
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from transformers import TrainingArguments @dataclass class PRMConfig(TrainingArguments): r""" Configuration class for the [`PRMTrainer`]. This class includes only the parameters that are specific to PRM training. For a full list of training arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may differ from those in [`~transformers.TrainingArguments`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int` or `None`, *optional*, defaults to `1024`): Maximum length of the sequences (prompt + completion) used for truncation. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt used for truncation. max_completion_length (`int` or `None`, *optional*, defaults to `None`): Maximum length of the completion used for truncation. The completion is the concatenation of the steps. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model. step_separator (`str`, *optional*, defaults to `"\n"`): Separator used to separate each step of the reasoning process. train_on_last_step_only (`bool`, *optional*, defaults to `False`): Whether to train only on the last step. dataset_num_proc (`int`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. """ # Parameters whose default values are overridden from TrainingArguments learning_rate: float = field( default=1e-5, metadata={"help": "The initial learning rate for AdamW."}, ) logging_steps: float = field( default=10, metadata={ "help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, " "will be interpreted as ratio of total training steps." }, ) gradient_checkpointing: bool = field( default=True, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) bf16: Optional[bool] = field( default=None, metadata={ "help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA " "architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if " "`fp16` is not set." }, ) # Note: In transformers>=4.54.0, `average_tokens_across_devices` defaults to True. Overriding this setting is only # needed for earlier versions. Once we require transformers>=4.54.0, this line can be safely removed. # See https://github.com/huggingface/transformers/pull/39395 average_tokens_across_devices: bool = field( default=True, metadata={ "help": "Whether or not to average tokens across devices. If enabled, will use all_reduce to synchronize " "num_tokens_in_batch for precise loss calculation. Reference: https://github.com/huggingface/transformers/issues/34242 " }, ) max_length: Optional[int] = field( default=1024, metadata={"help": "Maximum length of the sequences (prompt + completion) used for truncation."}, ) max_prompt_length: Optional[int] = field( default=512, metadata={"help": "Maximum length of the prompt used for truncation."}, ) max_completion_length: Optional[int] = field( default=None, metadata={ "help": "Maximum length of the completion used for truncation. The completion is the concatenation of the " "steps." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model and reference model."}, ) step_separator: str = field( default="\n", metadata={"help": "Separator used to separate each step of the reasoning process."}, ) train_on_last_step_only: bool = field( default=False, metadata={"help": "Whether to train only on the last step."}, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, ) def __post_init__(self): self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16 super().__post_init__()
trl/trl/trainer/prm_config.py/0
{ "file_path": "trl/trl/trainer/prm_config.py", "repo_id": "trl", "token_count": 1932 }
637
## Об организации процесса перевода в команде: 0. У нас Холакратия. Координатор команды - не менеджер! Он координирует работу команды. Чтобы все участники команды имели представление об общей картине, все ключевые моменты обсуждаются открыто в Issue. 1. Ставьте реальные сроки (если это возможно). Если это не возможно - не ставьте. Взятый вами блок - ваша ответственность, но вас никто не бросит. Не стесняйтесь спрашивать! 2. Если есть вопросы по применяемым инструментам, особенностям перевода - спрашивайте в Issue. 3. Старайтесь брать блоки для перевода последовательно. Тогда список эквивалентных слов будет максимально полезен. 4. Пополняйте список эквивалентных слов. Это упростит процесс перевода и существенно его ускорит. 5. Сообщения в commits и Pull Request в основной репозиторий курса пишите на английском языке. Это важно для последующей проверки PR. 6. Старайтесь не переводить по ночам) Хороша та работа, которую не нужно переделовать) ## Об использование вспомогательных средств (ИИ, нейронные сети, переводчики): 0. Можно и нужно! По другому сделать быстро и качественно - никак. К тому же будет забавно смотреться со стороны - перевод курса по ИИ без использования ИИ. Но обязательно проверьте свой перевод! Иногда там такая ахинея, ревьювер это сразу заметит))) 1. Если сомневаетесь в качестве, используйте альтернативный источник (например можно проверить перевод сделанный одним переводчиком с помощью другого, обсудить с командой). 2. Можно вообще ничего не использовать, если вы уверенны в своих знаниях английского и русского языка. Но в этом случае постарайтесь пожалуйста вначале выбирайте небольшие блоки для перевода. ## Соглашение по переводу: 1. Общепринятые в отрасли сокращения или аббревиатуры не переводится. За исключением случая введения нового термина и соответствующего ему сокращения. Как правило делается в определении. AI - ИИ LLM - БЯМ VLM - ЯМЗ 2. Нет смысла переводить тест используемый в примерах (в том числе на картинках, так как картинки мы не переводим). ***Пример:*** При предсказании следующего слова, не все слова в предложении одинаково важны; такие слова, как "France" и "capital" в предложении *"The capital of France is ..."*, несут наибольшую смысловую нагрузку. 3. Коментарии в примерах кода и ноутбуках переводим. 4. Для обеспечения единообразия перевода в случае работы нескольких переводчиков необходимо использовать приведенные ниже варианты перевода. Если встречаются сложные случаи употребления, то решение принимается в рамках конструктивной аргументированной дискуссии (не спора) с участием всех заинтересованных сторон. Наша цель - сделать качественный, максимально близкий к оригиналу перевод. | English Term | Russian Translation | Notes | |---|---|---| | Onboarding | Вводная часть | | | Optional | Необязательно | | | Live | Прямой эфир | | | Quick Quiz | Быстрый тест | | | Unit, section | Раздел | | | Tokens | Токены | | | Self-audit | Самооценка | | | Foundational Units | Фундаментальные разделы | | | Hands-on | Практические занятия | | | Hugging Face Spaces | Пространства (Spaces) Hugging Face | | | Use case assignments | Задания на применение | | | The Challenge | Соревнования | | | Leaderboard | Таблица результатов | | | Living project | Живой проект | | | Syllabus | Программа курса | | | Frameworks | Фреймворки | | | Use Cases | Примеры использования | | | Final Assignment | Итоговое задание | | | Benchmark | Бенчмарк | | | Acknowledgments | Благодарности | | | Chitchat | Свободное общение | | | Conversation | Диалог | | | Workflow | Рабочий процесс | | | Let’s dive in! | Погружаемся! | | | The Big Picture | Общая картина | | | Agency | Агентность | | | Vision Language Model | Языковая Модель Зрения (Vision Language Model) | см. п.п 1 соглашения по переводу | | Large Language Model | Большая Языковая Модель (Large Language Model) | см. п.п 1 соглашения по переводу | | Environment | Окружение | | | Intentional | Намеренными | | | Reasoning | Reasoning | | | Task | Задача (не задание!) | | | Processing | Предобработка (предварительная обработка, препроцессинг) | | | transformer | трансформер | | | dense representation | плотное векторное представление | | | Named Entity Recognition | Распознавание Именованных Сущностей (Named Entity Recognition, NER)| | | interactive playground | интерактивная демонстрация | | | input prompts | инструкции для ввода | Так как они определяют то, как будет обработанны входные данные (текст) | | wording | формулировка | | | self-supervised | самообучение | | | masked language modeling | маскированное языковое моделирование | | | unseen data | ранее не встречавщиеся данные | Звучит лучше, чем ранее не виденные данные) | | unsupervised learning | обучение без учителя | Стандарт | | supervised learning | обучение с учителем | Стандарт | | notebook | блокнот | Стандарт | | checkout | изучить | Зависит от контекста | | Instruct Model | Инструктивная модель | Более благозвучный вариант предложен @tixonsit | | tokenizer | Токенизатор | | | Generic | Универсальный | Зависит от контекста | | What You’ll Learn | Что вы узнаете | | | Implement | Имплементировать | | | instruct-tuned | инструктивно дообученная | Более благозвучный вариант предложен @tixonsit | | aligned | выровнена | Более благозвучный вариант предложен @tixonsit | | Low-Rank Adaptation of Large Language Models | Низкоранговая адаптация Больших Языковых Моделей | | | share | распространять | | | state-of-the-art | передовые | | | feedback | обратная связь | | ## Ревью перевода (список часто встречающихся проблем, за чем важно следить!): (в разработке, на основе предыдущих работ с Марией Халюсовой) 1. Качество перевода, обоснованное отклонение от оригинала. 2. Лишний пробел между концом предложения и точкой. (Данная ошибка часто возникает при использовании машинного перевода, либо работы над переводом ночью). 3. Часто забываем переводить текст в картинках (в примере ниже это `alt="Visual Gif of Attention"`). Пример: ``` <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AttentionSceneFinal.gif" alt="Visual Gif of Attention" width="60%"> ```
agents-course/translation_agreements/ru/TRANSLATION_AGREEMENTS.md/0
{ "file_path": "agents-course/translation_agreements/ru/TRANSLATION_AGREEMENTS.md", "repo_id": "agents-course", "token_count": 5294 }
0
# Live 1: How the Course Works and First Q&A In this first live stream of the Agents Course, we explained how the course **works** (scope, units, challenges, and more) and answered your questions. <iframe width="560" height="315" src="https://www.youtube.com/embed/iLVyYDbdSmM?si=TCX5Ai3uZuKLXq45" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> To know when the next live session is scheduled, check our **Discord server**. We will also send you an email. If you can’t participate, don’t worry, we **record all live sessions**.
agents-course/units/en/communication/live1.mdx/0
{ "file_path": "agents-course/units/en/communication/live1.mdx", "repo_id": "agents-course", "token_count": 211 }
1
# What are Tools? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-2.jpg" alt="Unit 1 planning"/> One crucial aspect of AI Agents is their ability to take **actions**. As we saw, this happens through the use of **Tools**. In this section, we’ll learn what Tools are, how to design them effectively, and how to integrate them into your Agent via the System Message. By giving your Agent the right Tools—and clearly describing how those Tools work—you can dramatically increase what your AI can accomplish. Let’s dive in! ## What are AI Tools? A **Tool is a function given to the LLM**. This function should fulfill a **clear objective**. Here are some commonly used tools in AI agents: | Tool | Description | |----------------|---------------------------------------------------------------| | Web Search | Allows the agent to fetch up-to-date information from the internet. | | Image Generation | Creates images based on text descriptions. | | Retrieval | Retrieves information from an external source. | | API Interface | Interacts with an external API (GitHub, YouTube, Spotify, etc.). | Those are only examples, as you can in fact create a tool for any use case! A good tool should be something that **complements the power of an LLM**. For instance, if you need to perform arithmetic, giving a **calculator tool** to your LLM will provide better results than relying on the native capabilities of the model. Furthermore, **LLMs predict the completion of a prompt based on their training data**, which means that their internal knowledge only includes events prior to their training. Therefore, if your agent needs up-to-date data you must provide it through some tool. For instance, if you ask an LLM directly (without a search tool) for today's weather, the LLM will potentially hallucinate random weather. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/weather.jpg" alt="Weather"/> - A Tool should contain: - A **textual description of what the function does**. - A *Callable* (something to perform an action). - *Arguments* with typings. - (Optional) Outputs with typings. ## How do tools work? LLMs, as we saw, can only receive text inputs and generate text outputs. They have no way to call tools on their own. When we talk about providing tools to an Agent, we mean teaching the LLM about the existence of these tools and instructing it to generate text-based invocations when needed. For example, if we provide a tool to check the weather at a location from the internet and then ask the LLM about the weather in Paris, the LLM will recognize that this is an opportunity to use the “weather” tool. Instead of retrieving the weather data itself, the LLM will generate text that represents a tool call, such as call weather_tool('Paris'). The **Agent** then reads this response, identifies that a tool call is required, executes the tool on the LLM’s behalf, and retrieves the actual weather data. The Tool-calling steps are typically not shown to the user: the Agent appends them as a new message before passing the updated conversation to the LLM again. The LLM then processes this additional context and generates a natural-sounding response for the user. From the user’s perspective, it appears as if the LLM directly interacted with the tool, but in reality, it was the Agent that handled the entire execution process in the background. We'll talk a lot more about this process in future sessions. ## How do we give tools to an LLM? The complete answer may seem overwhelming, but we essentially use the system prompt to provide textual descriptions of available tools to the model: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt.png" alt="System prompt for tools"/> For this to work, we have to be very precise and accurate about: 1. **What the tool does** 2. **What exact inputs it expects** This is the reason why tool descriptions are usually provided using expressive but precise structures, such as computer languages or JSON. It's not _necessary_ to do it like that, any precise and coherent format would work. If this seems too theoretical, let's understand it through a concrete example. We will implement a simplified **calculator** tool that will just multiply two integers. This could be our Python implementation: ```python def calculator(a: int, b: int) -> int: """Multiply two integers.""" return a * b ``` So our tool is called `calculator`, it **multiplies two integers**, and it requires the following inputs: - **`a`** (*int*): An integer. - **`b`** (*int*): An integer. The output of the tool is another integer number that we can describe like this: - (*int*): The product of `a` and `b`. All of these details are important. Let's put them together in a text string that describes our tool for the LLM to understand. ```text Tool Name: calculator, Description: Multiply two integers., Arguments: a: int, b: int, Outputs: int ``` > **Reminder:** This textual description is *what we want the LLM to know about the tool*. When we pass the previous string as part of the input to the LLM, the model will recognize it as a tool, and will know what it needs to pass as inputs and what to expect from the output. If we want to provide additional tools, we must be consistent and always use the same format. This process can be fragile, and we might accidentally overlook some details. Is there a better way? ### Auto-formatting Tool sections Our tool was written in Python, and the implementation already provides everything we need: - A descriptive name of what it does: `calculator` - A longer description, provided by the function's docstring comment: `Multiply two integers.` - The inputs and their type: the function clearly expects two `int`s. - The type of the output. There's a reason people use programming languages: they are expressive, concise, and precise. We could provide the Python source code as the _specification_ of the tool for the LLM, but the way the tool is implemented does not matter. All that matters is its name, what it does, the inputs it expects and the output it provides. We will leverage Python's introspection features to leverage the source code and build a tool description automatically for us. All we need is that the tool implementation uses type hints, docstrings, and sensible function names. We will write some code to extract the relevant portions from the source code. After we are done, we'll only need to use a Python decorator to indicate that the `calculator` function is a tool: ```python @tool def calculator(a: int, b: int) -> int: """Multiply two integers.""" return a * b print(calculator.to_string()) ``` Note the `@tool` decorator before the function definition. With the implementation we'll see next, we will be able to retrieve the following text automatically from the source code via the `to_string()` function provided by the decorator: ```text Tool Name: calculator, Description: Multiply two integers., Arguments: a: int, b: int, Outputs: int ``` As you can see, it's the same thing we wrote manually before! ### Generic Tool implementation We create a generic `Tool` class that we can reuse whenever we need to use a tool. > **Disclaimer:** This example implementation is fictional but closely resembles real implementations in most libraries. ```python from typing import Callable class Tool: """ A class representing a reusable piece of code (Tool). Attributes: name (str): Name of the tool. description (str): A textual description of what the tool does. func (callable): The function this tool wraps. arguments (list): A list of arguments. outputs (str or list): The return type(s) of the wrapped function. """ def __init__(self, name: str, description: str, func: Callable, arguments: list, outputs: str): self.name = name self.description = description self.func = func self.arguments = arguments self.outputs = outputs def to_string(self) -> str: """ Return a string representation of the tool, including its name, description, arguments, and outputs. """ args_str = ", ".join([ f"{arg_name}: {arg_type}" for arg_name, arg_type in self.arguments ]) return ( f"Tool Name: {self.name}," f" Description: {self.description}," f" Arguments: {args_str}," f" Outputs: {self.outputs}" ) def __call__(self, *args, **kwargs): """ Invoke the underlying function (callable) with provided arguments. """ return self.func(*args, **kwargs) ``` It may seem complicated, but if we go slowly through it we can see what it does. We define a **`Tool`** class that includes: - **`name`** (*str*): The name of the tool. - **`description`** (*str*): A brief description of what the tool does. - **`function`** (*callable*): The function the tool executes. - **`arguments`** (*list*): The expected input parameters. - **`outputs`** (*str* or *list*): The expected outputs of the tool. - **`__call__()`**: Calls the function when the tool instance is invoked. - **`to_string()`**: Converts the tool's attributes into a textual representation. We could create a Tool with this class using code like the following: ```python calculator_tool = Tool( "calculator", # name "Multiply two integers.", # description calculator, # function to call [("a", "int"), ("b", "int")], # inputs (names and types) "int", # output ) ``` But we can also use Python's `inspect` module to retrieve all the information for us! This is what the `@tool` decorator does. > If you are interested, you can disclose the following section to look at the decorator implementation. <details> <summary> decorator code</summary> ```python import inspect def tool(func): """ A decorator that creates a Tool instance from the given function. """ # Get the function signature signature = inspect.signature(func) # Extract (param_name, param_annotation) pairs for inputs arguments = [] for param in signature.parameters.values(): annotation_name = ( param.annotation.__name__ if hasattr(param.annotation, '__name__') else str(param.annotation) ) arguments.append((param.name, annotation_name)) # Determine the return annotation return_annotation = signature.return_annotation if return_annotation is inspect._empty: outputs = "No return annotation" else: outputs = ( return_annotation.__name__ if hasattr(return_annotation, '__name__') else str(return_annotation) ) # Use the function's docstring as the description (default if None) description = func.__doc__ or "No description provided." # The function name becomes the Tool name name = func.__name__ # Return a new Tool instance return Tool( name=name, description=description, func=func, arguments=arguments, outputs=outputs ) ``` </details> Just to reiterate, with this decorator in place we can implement our tool like this: ```python @tool def calculator(a: int, b: int) -> int: """Multiply two integers.""" return a * b print(calculator.to_string()) ``` And we can use the `Tool`'s `to_string` method to automatically retrieve a text suitable to be used as a tool description for an LLM: ```text Tool Name: calculator, Description: Multiply two integers., Arguments: a: int, b: int, Outputs: int ``` The description is **injected** in the system prompt. Taking the example with which we started this section, here is how it would look like after replacing the `tools_description`: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt_tools.png" alt="System prompt for tools"/> In the [Actions](actions) section, we will learn more about how an Agent can **Call** this tool we just created. ### Model Context Protocol (MCP): a unified tool interface Model Context Protocol (MCP) is an **open protocol** that standardizes how applications **provide tools to LLMs**. MCP provides: - A growing list of pre-built integrations that your LLM can directly plug into - The flexibility to switch between LLM providers and vendors - Best practices for securing your data within your infrastructure This means that **any framework implementing MCP can leverage tools defined within the protocol**, eliminating the need to reimplement the same tool interface for each framework. If you want to dive deeper about MCP, you can check our [free MCP Course](https://huggingface.co/learn/mcp-course/). --- Tools play a crucial role in enhancing the capabilities of AI agents. To summarize, we learned: - *What Tools Are*: Functions that give LLMs extra capabilities, such as performing calculations or accessing external data. - *How to Define a Tool*: By providing a clear textual description, inputs, outputs, and a callable function. - *Why Tools Are Essential*: They enable Agents to overcome the limitations of static model training, handle real-time tasks, and perform specialized actions. Now, we can move on to the [Agent Workflow](agent-steps-and-structure) where you’ll see how an Agent observes, thinks, and acts. This **brings together everything we’ve covered so far** and sets the stage for creating your own fully functional AI Agent. But first, it's time for another short quiz!
agents-course/units/en/unit1/tools.mdx/0
{ "file_path": "agents-course/units/en/unit1/tools.mdx", "repo_id": "agents-course", "token_count": 4175 }
2
# Introduction to LlamaIndex Welcome to this module, where you’ll learn how to build LLM-powered agents using the [LlamaIndex](https://www.llamaindex.ai/) toolkit. LlamaIndex is **a complete toolkit for creating LLM-powered agents over your data using indexes and workflows**. For this course we'll focus on three main parts that help build agents in LlamaIndex: **Components**, **Agents and Tools** and **Workflows**. ![LlamaIndex](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/thumbnail.png) Let's look at these key parts of LlamaIndex and how they help with agents: - **Components**: Are the basic building blocks you use in LlamaIndex. These include things like prompts, models, and databases. Components often help connect LlamaIndex with other tools and libraries. - **Tools**: Tools are components that provide specific capabilities like searching, calculating, or accessing external services. They are the building blocks that enable agents to perform tasks. - **Agents**: Agents are autonomous components that can use tools and make decisions. They coordinate tool usage to accomplish complex goals. - **Workflows**: Are step-by-step processes that process logic together. Workflows or agentic workflows are a way to structure agentic behaviour without the explicit use of agents. ## What Makes LlamaIndex Special? While LlamaIndex does some things similar to other frameworks like smolagents, it has some key benefits: - **Clear Workflow System**: Workflows help break down how agents should make decisions step by step using an event-driven and async-first syntax. This helps you clearly compose and organize your logic. - **Advanced Document Parsing with LlamaParse**: LlamaParse was made specifically for LlamaIndex, so the integration is seamless, although it is a paid feature. - **Many Ready-to-Use Components**: LlamaIndex has been around for a while, so it works with lots of other frameworks. This means it has many tested and reliable components, like LLMs, retrievers, indexes, and more. - **LlamaHub**: is a registry of hundreds of these components, agents, and tools that you can use within LlamaIndex. All of these concepts are required in different scenarios to create useful agents. In the following sections, we will go over each of these concepts in detail. After mastering the concepts, we will use our learnings to **create applied use cases with Alfred the agent**! Getting our hands on LlamaIndex is exciting, right? So, what are we waiting for? Let's get started with **finding and installing the integrations we need using LlamaHub! 🚀**
agents-course/units/en/unit2/llama-index/introduction.mdx/0
{ "file_path": "agents-course/units/en/unit2/llama-index/introduction.mdx", "repo_id": "agents-course", "token_count": 636 }
3
<CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/vision_agents.ipynb"}, ]} askForHelpUrl="http://hf.co/join/discord" /> # Vision Agents with smolagents <Tip warning={true}> The examples in this section require access to a powerful VLM model. We tested them using the GPT-4o API. However, <a href="./why_use_smolagents">Why use smolagents</a> discusses alternative solutions supported by smolagents and Hugging Face. If you'd like to explore other options, be sure to check that section. </Tip> Empowering agents with visual capabilities is crucial for solving tasks that go beyond text processing. Many real-world challenges, such as web browsing or document understanding, require analyzing rich visual content. Fortunately, `smolagents` provides built-in support for vision-language models (VLMs), enabling agents to process and interpret images effectively. In this example, imagine Alfred, the butler at Wayne Manor, is tasked with verifying the identities of the guests attending the party. As you can imagine, Alfred may not be familiar with everyone arriving. To help him, we can use an agent that verifies their identity by searching for visual information about their appearance using a VLM. This will allow Alfred to make informed decisions about who can enter. Let's build this example! ## Providing Images at the Start of the Agent's Execution <Tip> You can follow the code in <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/vision_agents.ipynb" target="_blank">this notebook</a> that you can run using Google Colab. </Tip> In this approach, images are passed to the agent at the start and stored as `task_images` alongside the task prompt. The agent then processes these images throughout its execution. Consider the case where Alfred wants to verify the identities of the superheroes attending the party. He already has a dataset of images from previous parties with the names of the guests. Given a new visitor's image, the agent can compare it with the existing dataset and make a decision about letting them in. In this case, a guest is trying to enter, and Alfred suspects that this visitor might be The Joker impersonating Wonder Woman. Alfred needs to verify their identity to prevent anyone unwanted from entering. Let’s build the example. First, the images are loaded. In this case, we use images from Wikipedia to keep the example minimal, but imagine the possible use-case! ```python from PIL import Image import requests from io import BytesIO image_urls = [ "https://upload.wikimedia.org/wikipedia/commons/e/e8/The_Joker_at_Wax_Museum_Plus.jpg", # Joker image "https://upload.wikimedia.org/wikipedia/en/9/98/Joker_%28DC_Comics_character%29.jpg" # Joker image ] images = [] for url in image_urls: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" } response = requests.get(url,headers=headers) image = Image.open(BytesIO(response.content)).convert("RGB") images.append(image) ``` Now that we have the images, the agent will tell us whether one guest is actually a superhero (Wonder Woman) or a villain (The Joker). ```python from smolagents import CodeAgent, OpenAIServerModel model = OpenAIServerModel(model_id="gpt-4o") # Instantiate the agent agent = CodeAgent( tools=[], model=model, max_steps=20, verbosity_level=2 ) response = agent.run( """ Describe the costume and makeup that the comic character in these photos is wearing and return the description. Tell me if the guest is The Joker or Wonder Woman. """, images=images ) ``` In the case of my run, the output is the following, although it could vary in your case, as we've already discussed: ```python { 'Costume and Makeup - First Image': ( 'Purple coat and a purple silk-like cravat or tie over a mustard-yellow shirt.', 'White face paint with exaggerated features, dark eyebrows, blue eye makeup, red lips forming a wide smile.' ), 'Costume and Makeup - Second Image': ( 'Dark suit with a flower on the lapel, holding a playing card.', 'Pale skin, green hair, very red lips with an exaggerated grin.' ), 'Character Identity': 'This character resembles known depictions of The Joker from comic book media.' } ``` In this case, the output reveals that the person is impersonating someone else, so we can prevent The Joker from entering the party! ## Providing Images with Dynamic Retrieval <Tip> You can follow the code in <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/vision_web_browser.py" target="_blank">this Python file</a> </Tip> The previous approach is valuable and has many potential use cases. However, in situations where the guest is not in the database, we need to explore other ways of identifying them. One possible solution is to dynamically retrieve images and information from external sources, such as browsing the web for details. In this approach, images are dynamically added to the agent's memory during execution. As we know, agents in `smolagents` are based on the `MultiStepAgent` class, which is an abstraction of the ReAct framework. This class operates in a structured cycle where various variables and knowledge are logged at different stages: 1. **SystemPromptStep:** Stores the system prompt. 2. **TaskStep:** Logs the user query and any provided input. 3. **ActionStep:** Captures logs from the agent's actions and results. This structured approach allows agents to incorporate visual information dynamically and respond adaptively to evolving tasks. Below is the diagram we've already seen, illustrating the dynamic workflow process and how different steps integrate within the agent lifecycle. When browsing, the agent can take screenshots and save them as `observation_images` in the `ActionStep`. ![Dynamic image retrieval](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/smolagents-can-see/diagram_adding_vlms_smolagents.png) Now that we understand the need, let's build our complete example. In this case, Alfred wants full control over the guest verification process, so browsing for details becomes a viable solution. To complete this example, we need a new set of tools for the agent. Additionally, we'll use Selenium and Helium, which are browser automation tools. This will allow us to build an agent that explores the web, searching for details about a potential guest and retrieving verification information. Let's install the tools needed: ```bash pip install "smolagents[all]" helium selenium python-dotenv ``` We'll need a set of agent tools specifically designed for browsing, such as `search_item_ctrl_f`, `go_back`, and `close_popups`. These tools allow the agent to act like a person navigating the web. ```python @tool def search_item_ctrl_f(text: str, nth_result: int = 1) -> str: """ Searches for text on the current page via Ctrl + F and jumps to the nth occurrence. Args: text: The text to search for nth_result: Which occurrence to jump to (default: 1) """ elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]") if nth_result > len(elements): raise Exception(f"Match n°{nth_result} not found (only {len(elements)} matches found)") result = f"Found {len(elements)} matches for '{text}'." elem = elements[nth_result - 1] driver.execute_script("arguments[0].scrollIntoView(true);", elem) result += f"Focused on element {nth_result} of {len(elements)}" return result @tool def go_back() -> None: """Goes back to previous page.""" driver.back() @tool def close_popups() -> str: """ Closes any visible modal or pop-up on the page. Use this to dismiss pop-up windows! This does not work on cookie consent banners. """ webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform() ``` We also need functionality for saving screenshots, as this will be an essential part of what our VLM agent uses to complete the task. This functionality captures the screenshot and saves it in `step_log.observations_images = [image.copy()]`, allowing the agent to store and process the images dynamically as it navigates. ```python def save_screenshot(step_log: ActionStep, agent: CodeAgent) -> None: sleep(1.0) # Let JavaScript animations happen before taking the screenshot driver = helium.get_driver() current_step = step_log.step_number if driver is not None: for step_logs in agent.logs: # Remove previous screenshots from logs for lean processing if isinstance(step_log, ActionStep) and step_log.step_number <= current_step - 2: step_logs.observations_images = None png_bytes = driver.get_screenshot_as_png() image = Image.open(BytesIO(png_bytes)) print(f"Captured a browser screenshot: {image.size} pixels") step_log.observations_images = [image.copy()] # Create a copy to ensure it persists, important! # Update observations with current URL url_info = f"Current url: {driver.current_url}" step_log.observations = url_info if step_logs.observations is None else step_log.observations + "\n" + url_info return ``` This function is passed to the agent as `step_callback`, as it's triggered at the end of each step during the agent's execution. This allows the agent to dynamically capture and store screenshots throughout its process. Now, we can generate our vision agent for browsing the web, providing it with the tools we created, along with the `DuckDuckGoSearchTool` to explore the web. This tool will help the agent retrieve necessary information for verifying guests' identities based on visual cues. ```python from smolagents import CodeAgent, OpenAIServerModel, DuckDuckGoSearchTool model = OpenAIServerModel(model_id="gpt-4o") agent = CodeAgent( tools=[DuckDuckGoSearchTool(), go_back, close_popups, search_item_ctrl_f], model=model, additional_authorized_imports=["helium"], step_callbacks=[save_screenshot], max_steps=20, verbosity_level=2, ) ``` With that, Alfred is ready to check the guests' identities and make informed decisions about whether to let them into the party: ```python agent.run(""" I am Alfred, the butler of Wayne Manor, responsible for verifying the identity of guests at party. A superhero has arrived at the entrance claiming to be Wonder Woman, but I need to confirm if she is who she says she is. Please search for images of Wonder Woman and generate a detailed visual description based on those images. Additionally, navigate to Wikipedia to gather key details about her appearance. With this information, I can determine whether to grant her access to the event. """ + helium_instructions) ``` You can see that we include `helium_instructions` as part of the task. This special prompt is aimed to control the navigation of the agent, ensuring that it follows the correct steps while browsing the web. Let's see how this works in the video below: <iframe width="560" height="315" src="https://www.youtube.com/embed/rObJel7-OLc?si=TnNwQ8rqXqun_pqE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> This is the final output: ```python Final answer: Wonder Woman is typically depicted wearing a red and gold bustier, blue shorts or skirt with white stars, a golden tiara, silver bracelets, and a golden Lasso of Truth. She is Princess Diana of Themyscira, known as Diana Prince in the world of men. ``` With all of that, we've successfully created our identity verifier for the party! Alfred now has the necessary tools to ensure only the right guests make it through the door. Everything is set to have a good time at Wayne Manor! ## Further Reading - [We just gave sight to smolagents](https://huggingface.co/blog/smolagents-can-see) - Blog describing the vision agent functionality. - [Web Browser Automation with Agents 🤖🌐](https://huggingface.co/docs/smolagents/examples/web_browser) - Example for Web browsing using a vision agent. - [Web Browser Vision Agent Example](https://github.com/huggingface/smolagents/blob/main/src/smolagents/vision_web_browser.py) - Example for Web browsing using a vision agent.
agents-course/units/en/unit2/smolagents/vision_agents.mdx/0
{ "file_path": "agents-course/units/en/unit2/smolagents/vision_agents.mdx", "repo_id": "agents-course", "token_count": 3661 }
4
# Conclusión [[conclusion]] ¡Felicidades por terminar esta primera Unidad Bonus 🥳 ¡Acabas de **dominar la comprensión de las llamadas a funciones y cómo hacer fine-tuning de tu modelo para realizar llamadas a funciones**! Si tenemos un consejo ahora, es intentar **hacer fine-tuning de diferentes modelos**. La **mejor manera de aprender es intentándolo.** En la siguiente Unidad, aprenderás a usar **frameworks de última generación como `smolagents`, `LlamaIndex` y `LangGraph`**. Finalmente, nos encantaría **escuchar lo que piensas del curso y cómo podemos mejorarlo**. Si tienes algun comentario, por favor 👉 [completa este formulario](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### Sigue Aprendiendo, Mantente Genial 🤗
agents-course/units/es/bonus-unit1/conclusion.mdx/0
{ "file_path": "agents-course/units/es/bonus-unit1/conclusion.mdx", "repo_id": "agents-course", "token_count": 325 }
5
# Bienvenido/a al curso de 🤗 Agentes IA [[introduction]] <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/thumbnail.jpg" alt="AI Agents Course thumbnail" width="100%"/> <figcaption>The background of the image was generated using <a href="https://scenario.com/">Scenario.com</a> </figcaption> </figure> Bienvenido/a al tema mas emocionante en la IA en la actualidad: **Agents**! Este curso gratuito te llevará a un viaje, **desde principiante hasta experto**, en comprender, usar y construir agentes IA. Esta primera unidad te ayudará a comenzar: - Descubre el **programa del curso**. - **Elige el camino** que vas a tomar (ya sea el camino de autoaudición o el proceso de certificación). - **Obtén más información sobre el proceso de certificación y las fechas de entrega**. - Conoce a los miembros del equipo detras del curso. - Crea tu cuenta de **Hugging Face**. - **Registrate al servidor de Discord**, y conocenos a nosotros y a tus compañeross. Empecemos! ## Que esperar de este curso? [[expect]] En este curso, aprenderás: - 📖 Estudia los agentes de IA con **teoría, diseño y práctica**. - 🧑‍💻 Aprende a **usar bibliotecas de agentes IA establecidas** como [smolagents](https://huggingface.co/docs/smolagents/en/index), [LangChain](https://www.langchain.com/) y [LlamaIndex](https://www.llamaindex.ai/). - 💾 **Comparte tus agentes** en el Hugging Face Hub y explora agentes creados por la comunidad. - 🏆 Participa en **retos** donde **evaluáras a tus agentes contra agentes de otros estudiantes**. - 🎓 **Obtén un certificado** al completar las asignaciones. Y mucho mas! Al final de este curso entenderás **como funcionan los agentes y como construir tus propios agentes usando las librerías y herramientas mas recientes**. No olvides **<a href="https://bit.ly/hf-learn-agents">registrarte a este curso!</a>** (Respetamos tu privacidad. Recopilamos tu dirección de correo electrónico para poder **enviarte los enlaces cuando se publique cada unidad y darte información sobre los desafíos y actualizaciones**). ## ¿Cómo es el curso? [[course-look-like]] El curso está compuesto por: - *Unidades fundamentales*: donde aprenderás los **conceptos de Agentes en teoría**. - *Prácticas*: donde aprenderás **a usar bibliotecas de Agentes IA establecidas** para entrenar tus agentes en entornos únicos. Estas secciones prácticas serán en **Hugging Face Spaces** con un entorno preconfigurado. - *Prácticas de casos de uso*: donde aplicarás los conceptos que has aprendido para resolver un problema que elijas. - *Desafío*: pondrás a tu agente a competir contra otros agentes en un desafío. Tambien habra una [tabla de clasificación](https://huggingface.co/spaces/huggingface-projects/AI-Agents-Leaderboard) (aún no disponible) para que compares el rendimiento de los agentes. Este **curso es un proyecto latente, que evoluciona con tus comentarios y contribuciones!** No dudes en [abrir issues y PRs en GitHub](https://github.com/huggingface/agents-course), y participar en discusiones en nuestro servidor de Discord. Después de haber completado el curso, también puedes enviar tus comentarios [👉 usando este formulario](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ## ¿Cuál es el programa? [[syllabus]] Aquí está el **programa general del curso**. Una lista más detallada de temas se publicará con cada unidad. | Capítulo | Tema | Descripción | | :---- | :---- | :---- | | 0 | Introducción | Te prepara con las herramientas y plataformas que utilizarás. | | 1 | Fundamentos de Agentes | Explica Herramientas, Pensamientos, Acciones, Observaciones y sus formatos. Explica LLMs, mensajes, tokens especiales y plantillas de chat. Muestra un caso de uso simple usando funciones de Python como herramientas. | | 1.5 | Bonus: Fine-tuning de un LLM para llamadas a funciones | Usemos LoRa y hagamos fine-tuning de un modelo para realizar llamadas a funciones dentro de un notebook. | | 2 | Frameworks | Comprende como se implementan los fundamentos en bibliotecas populares: smolagents, LangGraph, LLamaIndex | | 3 | Casos de Uso | Construyamos algunos casos de uso reales (abierto a PRs 🤗 de personas con experiencia creando Agentes) | | 4 | Asignación Final | Construye un agente para un benchmark seleccionado y demuestra tu comprensión de los Agentes en la tabla de clasificación de estudiantes 🚀 | *También estamos planeando lanzar algunas unidades adicionales, ¡mantente al tanto!* ## ¿Cuáles son los requisitos previos? Para poder seguir este curso deberías tener: - Conocimientos básicos de Python - Conocimientos básicos de LLMs (tenemos una sección en la Unidad 1 para recapitular qué son) ## ¿Qué herramientas necesito? [[tools]] Solo necesitas 2 cosas: - *Una computadora* con conexión a internet. - Una *Cuenta de Hugging Face*: para subir y cargar modelos, agentes y crear Spaces. Si aun no tienes una cuenta, puedes crear una **[aquí](https://hf.co/join)** (es gratis). <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/tools.jpg" alt="Herramientas necesarias para el curso" width="100%"/> ## El Proceso de Certificación [[certification-process]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/three-paths.jpg" alt="Dos caminos" width="100%"/> Puedes elegir seguir este curso *en modo auditoría*, o hacer las actividades y *obtener uno de los dos certificados que emitiremos*. Si auditas el curso, puedes participar en todos los desafíos y hacer asignaciones si quieres, y **no necesitas notificarnos**. El proceso de certificación es **completamente gratuito**: - *Para obtener una certificación de fundamentos*: necesitas completar la Unidad 1 del curso. Esto está destinado a estudiantes que quieren ponerse al día con las últimas tendencias en Agentes. - *Para obtener un certificado de finalización*: necesitas completar la Unidad 1, una de las asignaciones de casos de uso que propondremos durante el curso, y el desafío final. Hay una fecha límite para el proceso de certificación: todas las asignaciones deben terminarse antes del **1 de mayo de 2025**. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/deadline.jpg" alt="Fecha límite" width="100%"/> ## ¿Cuál es el ritmo recomendado? [[recommended-pace]] Cada capítulo de este curso está diseñado **para completarse en 1 semana, con aproximadamente 3-4 horas de trabajo por semana**. Como hay una fecha límite, te proporcionamos un ritmo recomendado: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/recommended-pace.jpg" alt="Ritmo recomendado" width="100%"/> ## ¿Cómo aprovechar al máximo el curso? [[advice]] Para aprovechar al máximo el curso, tenemos algunos consejos: 1. <a href="https://discord.gg/UrrTSsSyjb">Únete a grupos de estudio en Discord</a>: estudiar en grupos siempre es más fácil. Para hacerlo, necesitas unirte a nuestro servidor de Discord y verificar tu cuenta de Hugging Face. 2. **Haz los cuestionarios y asignaciones**: la mejor manera de aprender es a través de la práctica y la autoevaluación. 3. **Define un horario para mantenerte sincronizado**: puedes usar nuestro horario de ritmo recomendado a continuación o crear el tuyo. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/advice.jpg" alt="Consejos del curso" width="100%"/> ## Quiénes somos [[who-are-we]] Sobre los autores: ### Joffrey Thomas Joffrey es un ingeniero de aprendizaje automático en Hugging Face y ha construido e implementado Agentes IA en producción. Joffrey será tu instructor principal para este curso. - [Sigue a Joffrey en Hugging Face](https://huggingface.co/Jofthomas) - [Sigue a Joffrey en X](https://x.com/Jthmas404) - [Sigue a Joffrey en Linkedin](https://www.linkedin.com/in/joffrey-thomas/) ### Ben Burtenshaw Ben es un ingeniero de aprendizaje automático en Hugging Face y ha impartido múltiples cursos en varias plataformas. El objetivo de Ben es hacer que el curso sea accesible para todos. - [Sigue a Ben en Hugging Face](https://huggingface.co/burtenshaw) - [Sigue a Ben en X](https://x.com/ben_burtenshaw) - [Sigue a Ben en Linkedin](https://www.linkedin.com/in/ben-burtenshaw/) ### Thomas Simonini Thomas es un ingeniero de aprendizaje automático en Hugging Face e impartió los exitosos cursos de <a href="https://huggingface.co/learn/deep-rl-course/unit0/introduction">Deep RL</a> y <a href="https://huggingface.co/learn/ml-games-course/en/unit0/introduction">ML para juegos</a>. Thomas es un gran fan de los Agentes y está emocionado de ver lo que la comunidad construirá. - [Sigue a Thomas en Hugging Face](https://huggingface.co/ThomasSimonini) - [Sigue a Thomas en X](https://x.com/ThomasSimonini) - [Sigue a Thomas en Linkedin](https://www.linkedin.com/in/simoninithomas/) ## Agradecimientos Nos gustaría extender nuestro agradecimiento a las siguientes personas por sus invaluables contribuciones a este curso: - **[Pedro Cuenca](https://huggingface.co/pcuenq)** – Por su orientación y experiencia en la revisión de los materiales. - **[Aymeric Roucher](https://huggingface.co/m-ric)** – Por sus increíbles espacios de demostración (decodificación y agente final) así como su ayuda en las partes de smolagents. - **[Joshua Lochner](https://huggingface.co/Xenova)** – Por su increíble espacio de demostración sobre tokenización. - **[Quentin Gallouédec](https://huggingface.co/qgallouedec)** – Por su ayuda en el contenido del curso. - **[David Berenstein](https://huggingface.co/davidberenstein1957)** – Por su ayuda en el contenido del curso y moderación. - **[XiaXiao (ShawnSiao)](https://huggingface.co/SSSSSSSiao)** – Traductor al chino para el curso. - **[Jiaming Huang](https://huggingface.co/nordicsushi)** – Traductor al chino para el curso. ## Encontré un error, o quiero mejorar el curso [[contribute]] Las contribuciones son **bienvenidas** 🤗 - Si *encontraste un error 🐛 en un notebook*, por favor <a href="https://github.com/huggingface/agents-course/issues">abre un issue</a> y **describe el problema**. - Si *quieres mejorar el curso*, puedes <a href="https://github.com/huggingface/agents-course/pulls">abrir un Pull Request.</a> - Si *quieres agregar una sección completa o una nueva unidad*, lo mejor es <a href="https://github.com/huggingface/agents-course/issues">abrir un issue</a> y **describir qué contenido quieres agregar antes de comenzar a escribirlo para que podamos guiarte**. ## Todavía tengo preguntas [[questions]] Por favor, haz tu pregunta en nuestro <a href="https://discord.gg/UrrTSsSyjb">servidor de discord #ai-agents-discussions.</a> Ahora que tienes toda la información, ¡vamos a bordo! ⛵ <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="Hora de incorporarse" width="100%"/>
agents-course/units/es/unit0/introduction.mdx/0
{ "file_path": "agents-course/units/es/unit0/introduction.mdx", "repo_id": "agents-course", "token_count": 4085 }
6
# ¿Qué es un Agente? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-no-check.jpg" alt="Planificación de la Unidad 1"/> Al final de esta sección, te sentirás cómodo con el concepto de agentes y sus diversas aplicaciones en la IA. Para explicar qué es un Agente, comencemos con una analogía. ## La Imagen General: Alfred El Agente Conoce a Alfred. Alfred es un **Agente**. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/this-is-alfred.jpg" alt="Este es Alfred"/> Imagina que Alfred **recibe una orden**, como: "Alfred, me gustaría un café por favor." <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/coffee-please.jpg" alt="Me gustaría un café"/> Como Alfred **entiende el lenguaje natural**, comprende rápidamente nuestra petición. Antes de cumplir la orden, Alfred se involucra en un proceso de **razonamiento y planificación**, determinando los pasos y herramientas que necesita para: 1. Ir a la cocina 2. Usar la máquina de café 3. Preparar el café 4. Traer el café de vuelta <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/reason-and-plan.jpg" alt="Razonar y planificar"/> Una vez que tiene un plan, **debe actuar**. Para ejecutar su plan, **puede usar herramientas de la lista de herramientas que conoce**. En este caso, para hacer un café, usa una máquina de café. Activa la máquina de café para preparar el café. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/make-coffee.jpg" alt="Hacer café"/> Finalmente, Alfred nos trae el café recién preparado. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/bring-coffee.jpg" alt="Traer café"/> Y esto es lo que es un Agente: un **modelo de IA capaz de razonar, planificar e interactuar con su entorno**. Lo llamamos Agente porque tiene _agencia_, es decir, tiene la capacidad de interactuar con el entorno. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/process.jpg" alt="Proceso del Agente"/> ## Vamos a ser más formales Ahora que tienes la imagen general, aquí hay una definición más precisa: > Un Agente es un sistema que aprovecha un modelo de IA para interactuar con su entorno con el fin de lograr un objetivo definido por el usuario. Combina razonamiento, planificación y ejecución de acciones (a menudo a través de herramientas externas) para cumplir tareas. Piensa en el Agente como si tuviera dos partes principales: 1. **El Cerebro (Modelo de IA)** Aquí es donde ocurre todo el pensamiento. El modelo de IA **maneja el razonamiento y la planificación**. Decide **qué Acciones tomar según la situación**. 2. **El Cuerpo (Capacidades y Herramientas)** Esta parte representa **todo lo que el Agente está equipado para hacer**. El **alcance de las acciones posibles** depende de con qué **ha sido equipado** el agente. Por ejemplo, como los humanos carecen de alas, no pueden realizar la "Acción" de "volar", pero pueden ejecutar **Acciones** como "caminar", "correr", "saltar", "agarrar", etc. ## ¿Qué tipo de modelos de IA usamos para los Agentes? El modelo de IA más común en los Agentes es un LLM (Modelo de Lenguaje Grande), que toma **Texto** como entrada y también produce **Texto** como salida. Ejemplos conocidos son **GPT4** de **OpenAI**, **LLama** de **Meta**, **Gemini** de **Google**, etc. Estos modelos han sido entrenados con una gran cantidad de texto y son capaces de generalizar bien. Aprenderemos más sobre los LLMs en la [siguiente sección](what-are-llms.mdx). <Tip> También es posible usar modelos que aceptan otras entradas como modelo central del Agente. Por ejemplo, un Modelo de Lenguaje Visual (VLM), que es como un LLM pero también entiende imágenes como entrada. Por ahora nos centraremos en los LLMs y discutiremos otras opciones más adelante. </Tip> ## ¿Cómo actúa una IA sobre su entorno? Los LLMs son modelos asombrosos, pero **solo pueden generar texto**. Sin embargo, si le pides a una aplicación de chat conocida como HuggingChat o ChatGPT que genere una imagen, ¡pueden hacerlo! ¿Cómo es posible? La respuesta es que los desarrolladores de HuggingChat, ChatGPT y aplicaciones similares implementaron funcionalidades adicionales (llamadas **Herramientas**), que el LLM puede usar para crear imágenes. <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/eiffel_brocolis.jpg" alt="Eiffel Brócolis"/> <figcaption>El modelo usó una Herramienta de Generación de Imágenes para generar esta imagen. </figcaption> </figure> Aprenderemos más sobre herramientas en la sección de [Herramientas](tools.mdx). ## ¿Qué tipo de tareas puede hacer un Agente? Un Agente puede realizar cualquier tarea que implementemos a través de **Herramientas** para completar **Acciones**. Por ejemplo, si escribo un Agente para que actúe como mi asistente personal (como Siri) en mi computadora, y le pido que "envíe un correo electrónico a mi Gerente pidiendo retrasar la reunión de hoy", puedo darle un código para enviar correos electrónicos. Esta será una nueva Herramienta que el Agente puede usar cuando necesite enviar un correo electrónico. Podemos escribirlo en Python: ```python def send_message_to(recipient, message): """Útil para enviar un mensaje de correo electrónico a un destinatario""" ... ``` El LLM, como veremos, generará código para ejecutar la herramienta cuando la necesite, y así cumplir con la tarea deseada. ```python send_message_to("Gerente", "¿Podemos posponer la reunión de hoy?") ``` El **diseño de las Herramientas es muy importante y tiene un gran impacto en la calidad de tu Agente**. Algunas tareas requerirán Herramientas muy específicas, mientras que otras pueden resolverse con herramientas de propósito general como "búsqueda_web". > Ten en cuenta que **las Acciones no son lo mismo que las Herramientas**. Una Acción, por ejemplo, puede involucrar el uso de múltiples Herramientas para completarse. Permitir que un agente interactúe con su entorno **permite un uso en la vida real para empresas e individuos**. ### Ejemplo 1: Asistentes Virtuales Personales Asistentes virtuales como Siri, Alexa o Google Assistant, funcionan como agentes cuando interactúan en nombre de los usuarios utilizando sus entornos digitales. Toman las consultas de los usuarios, analizan el contexto, recuperan información de bases de datos y proporcionan respuestas o inician acciones (como establecer recordatorios, enviar mensajes o controlar dispositivos inteligentes). ### Ejemplo 2: Chatbots de Servicio al Cliente Muchas empresas implementan chatbots como agentes que interactúan con los clientes en lenguaje natural. Estos agentes pueden responder preguntas, guiar a los usuarios a través de pasos de solución de problemas, abrir incidencias en bases de datos internas o incluso completar transacciones. Sus objetivos predefinidos podrían incluir mejorar la satisfacción del usuario, reducir los tiempos de espera o aumentar las tasas de conversión de ventas. Al interactuar directamente con los clientes, aprender de los diálogos y adaptar sus respuestas con el tiempo, demuestran los principios fundamentales de un agente en acción. ### Ejemplo 3: Personaje No Jugable de IA en un videojuego Los agentes de IA impulsados por LLMs pueden hacer que los Personajes No Jugables (NPCs) sean más dinámicos e impredecibles. En lugar de seguir árboles de comportamiento rígidos, pueden **responder contextualmente, adaptarse a las interacciones del jugador** y generar diálogos más matizados. Esta flexibilidad ayuda a crear personajes más realistas y atractivos que evolucionan junto con las acciones del jugador. --- En resumen, un Agente es un sistema que utiliza un Modelo de IA (típicamente un LLM) como su motor de razonamiento central, para: - **Entender el lenguaje natural:** Interpretar y responder a las instrucciones humanas de manera significativa. - **Razonar y planificar:** Analizar información, tomar decisiones y diseñar estrategias para resolver problemas. - **Interactuar con su entorno:** Recopilar información, realizar acciones y observar los resultados de esas acciones. Ahora que tienes una comprensión sólida de lo que son los Agentes, reforcemos tu comprensión con un breve cuestionario sin calificación. Después de eso, profundizaremos en el "cerebro del Agente": los [LLMs](what-are-llms.mdx).
agents-course/units/es/unit1/what-are-agents.mdx/0
{ "file_path": "agents-course/units/es/unit1/what-are-agents.mdx", "repo_id": "agents-course", "token_count": 3132 }
7
# Pequeño Quiz (no calificado) [[quiz1]] Hasta ahora hemos discutido los componentes clave y herramientas utilizadas en LlamaIndex. Es hora de hacer un pequeño quiz, ya que **probarse a sí mismo** es la mejor manera de aprender y [evitar la ilusi n de competencia](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf). Esto te ayudar a encontrar **dónde debes reforzar tus conocimientos**. Este es un quiz opcional y no está calificado. ### Q1: ¿Qué es un QueryEngine? ¿Cuál de las siguientes opciones describe mejor un componente QueryEngine? <Question choices={[ { text: "Un sistema que solo procesa texto estático sin capacidad de recuperación.", explain: "Un QueryEngine debe ser capaz de recuperar y procesar información relevante.", }, { text: "Un componente que encuentra y recupera información relevante como parte del proceso RAG.", explain: "Esta es la función principal de un componente QueryEngine.", correct: true }, { text: "Un herramienta que solo almacena embeddings vectoriales sin funcionalidad de búsqueda.", explain: "Un QueryEngine hace más que solo almacenar embeddings - busca y recupera información activamente.", }, { text: "Un componente que solo evalúa la calidad de la respuesta.", explain: "La evaluación es separada del propósito principal de recuperación del QueryEngine.", } ]} /> --- ### Q2: ¿Cuál es el propósito de FunctionTools? ¿Por qué son importantes FunctionTools para un Agente? <Question choices={[ { text: "Para manejar grandes cantidades de almacenamiento de datos.", explain: "FunctionTools no son principalmente para almacenamiento de datos.", }, { text: "Para convertir funciones de Python en herramientas que un agente puede usar.", explain: "FunctionTools envuelven funciones de Python para hacerlas accesibles a los agentes.", correct: true }, { text: "Para permitir que los agentes creen definiciones de funciones aleatorias.", explain: "FunctionTools sirven para un propósito específico: hacer que las funciones sean accesibles a los agentes.", }, { text: "Para procesar solo datos de texto.", explain: "FunctionTools pueden trabajar con varios tipos de funciones, no solo procesamiento de texto.", } ]} /> --- ### Q3: ¿Qué son los Toolspecs en LlamaIndex? ¿Cuál es el propósito principal de los Toolspecs? <Question choices={[ { text: "Son componentes redundantes que no añaden funcionalidad.", explain: "Los Toolspecs sirven para un propósito importante en el ecosistema de LlamaIndex.", }, { text: "Son conjuntos de herramientas creadas por la comunidad que amplían las capacidades de los agentes.", explain: "Los Toolspecs permiten a la comunidad compartir y reutilizar herramientas.", correct: true }, { text: "Solo se utilizan para manejo de memoria.", explain: "Los Toolspecs son sobre proporcionar herramientas, no manejo de memoria.", }, { text: "Solo trabajan con procesamiento de texto.", explain: "Los Toolspecs pueden incluir varios tipos de herramientas, no solo procesamiento de texto.", } ]} /> --- ### Q4: ¿Qué se requiere para crear una herramienta? ¿Qué información debe incluirse al crear una herramienta? <Question choices={[ { text: "Una función, un nombre y una descripción deben ser definidos.", explain: "Aunque todos estos elementos forman parte de una herramienta, el nombre y la descripción pueden ser parseados desde la función y la docstring.", }, { text: "Solo se requiere el nombre.", explain: "Una función y descripción/docstring también son requeridas para la documentación adecuada de la herramienta.", }, { text: "Solo se requiere la descripción.", explain: "Una función es requerida para que tengamos código que ejecutar cuando un agente selecciona una herramienta.", }, { text: "Solo se requiere la función.", explain: "El nombre y la descripción se pueden establecer en el nombre y la docstring de la función proporcionada", correct: true } ]} /> --- !Felicitaciones por completar este Quiz 🥳!, si no entendiste algün elemento, tóoma el tiempo de releer el capítulo para reforzar tus conocimientos. Si lo pasaste, estás listo para profundizar en el uso de estos componentes!
agents-course/units/es/unit2/llama-index/quiz1.mdx/0
{ "file_path": "agents-course/units/es/unit2/llama-index/quiz1.mdx", "repo_id": "agents-course", "token_count": 1495 }
8
# Finetunons un modèle pour pouvoir faire de l'appel de fonctions Nous sommes maintenant prêts à finetuner notre premier modèle pour de l'appel de fonctions 🔥. ## Comment entraînons-nous un tel modèle ? > Réponse : Nous avons besoin de **données** Un processus d'entraînement peut être divisé en 3 étapes : 1. **Le modèle est pré-entraîné sur une grande quantité de données**. Le résultat de cette étape est un **modèle pré-entraîné**. Par exemple, [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b). C'est un modèle de base et il sait seulement comment **prédire le prochain *token* sans fortes capacités de suivi d'instructions**. 2. Pour être utile dans un contexte de conversation, le modèle doit ensuite être **finetuné** pour suivre des instructions. À cette étape, il peut être entraîné par les créateurs du modèle, la communauté open-source, vous, ou n'importe qui. Par exemple, [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) est un modèle finetuné pour les instructions par l'équipe Google derrière le projet Gemma. 3. Le modèle peut ensuite être **aligné** selon les préférences du créateur. Par exemple, un modèle conversationnel d'un service client ne doit jamais être impoli avec l'utilisateur. Habituellement, un produit complet comme *Gemini* ou *Mistral* **sera passé par les 3 étapes**, alors que les modèles que vous pouvez trouver sur *Hugging Face* ont effectué une ou plusieurs étapes de cet entraînement. Dans ce tutoriel, nous allons construire un modèle d'appel de fonctions basé sur [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it). Nous choisissons le modèle [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) au lieu du modèle de base [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b) parce que le modèle finetuné a été amélioré pour notre cas d'usage. Partir du modèle pré-entraîné **nécessiterait plus d'entraînement pour apprendre le suivi d'instructions, le chat ET l'appel de fonctions**. En partant du modèle finetuné pour les instructions, **nous minimisons la quantité d'informations que notre modèle doit apprendre**. ## LoRA (Low-Rank Adaptation of Large Language Models) LoRA est une technique d'entraînement populaire et légère qui **réduit significativement le nombre de paramètres à entraîner**. Elle fonctionne en **insérant un lot d'adaptateurs constitués d'un petit nombre de nouveaux poids, dans le modèle à entraîner**. Cela rend l'entraînement avec LoRA beaucoup plus rapide, économe en mémoire, et produit des poids de modèle plus petits (quelques centaines de MB), qui sont plus faciles à stocker et partager. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/blog_multi-lora-serving_LoRA.gif" alt="LoRA inference" width="50%"/> LoRA fonctionne en ajoutant des paires de matrices de décomposition de rang aux couches d'un *transformer* (typiquement les couches linéaires). Durant l'entraînement, nous gèlons le reste du modèle et ne mettons à jour uniquement les poids de ces adaptateurs ajoutés. Ce faisant, le nombre de **paramètres** que nous devons entraîner diminue considérablement car nous devons seulement mettre à jour les poids des adaptateurs. Durant l'inférence, l'entrée est passée dans les adaptateurs et le modèle de base. Ou bien les poids des adaptateurs peuvent être fusionnés avec le modèle de base, ne résultant en aucune surcharge de latence supplémentaire. LoRA est particulièrement utile pour adapter de **grands** modèles de langage à des tâches ou domaines spécifiques tout en gardant les exigences de ressources gérables. Cela aide à réduire la mémoire **requise** pour entraîner un modèle. Si vous voulez en savoir plus sur comment LoRA fonctionne, vous devriez consulter ce [tutoriel](https://huggingface.co/learn/nlp-course/chapter11/4?fw=pt). ## Finetuning d'un modèle pour l'appel de fonctions La suite de cette section se passe dans le *notebook* du tutoriel que vous pouvez accéder 👉 [ici](https://huggingface.co/agents-course/notebooks/blob/main/fr/bonus-unit1/bonus-unit1.ipynb). Ensuite, cliquez sur [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/fr/bonus-unit1/bonus-unit1.ipynb) pour pouvoir l'exécuter dans Colab.
agents-course/units/fr/bonus-unit1/fine-tuning.mdx/0
{ "file_path": "agents-course/units/fr/bonus-unit1/fine-tuning.mdx", "repo_id": "agents-course", "token_count": 1652 }
9
# Embarquement : vos premiers pas ⛵ <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="Il est temps de démarrer" width="100%"/> Maintenant que vous avez tous les détails, commençons ! Nous allons réaliser quatre choses : 1. **Créer votre compte Hugging Face** si ce n'est pas déjà fait 2. **Vous inscrire à Discord et vous présenter** (ne soyez pas timide 🤗) 3. **Suivre le cours sur les agents** sur le 🤗 Hub 4. **Faire passer le mot** à propos du cours ### Étape 1 : Créer votre compte Hugging Face (Si ce n'est pas déjà fait) créez un compte Hugging Face <a href='https://huggingface.co/join' target='_blank'>ici</a>. ### Étape 2 : Rejoindre notre Discord 👉🏻 Rejoignez notre serveur Discord <a href="https://discord.gg/UrrTSsSyjb" target="_blank">ici.</a> Lorsque vous rejoignez, n'oubliez pas de vous présenter dans `#introduce-yourself`. Nous disposons de plusieurs canaux liés aux agents : - `agents-course-announcements` : pour les **dernières informations portant sur le cours**. - `🎓-agents-course-general` : pour **les discussions générales et les bavardages**. - `agents-course-questions` : pour **poser des questions et aider vos camarades**. - `agents-course-showcase` : pour **présenter vos meilleurs agents**. De plus, vous pouvez consulter : - `smolagents` : pour **les discussions et l'assistance concernant la bibliothèque**. Si c'est votre première utilisation de Discord, nous avons rédigé un guide d'introduction pour vous donner les meilleures pratiques. Consultez [la section suivante](discord101). ### Étape 3 : Suivre l'organisation *Hugging Face Agent Course* sur le 🤗 Hub Restez à jour avec les derniers matériels de cours, mises à jour, et annonces **en suivant l'organisation du cours sur le Hub**. 👉 Rendez-vous <a href="https://huggingface.co/agents-course" target="_blank">ici</a> et cliquez sur **suivre**. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/hf_course_follow.gif" alt="Suivre" width="100%"/> ### Étape 4 : Faites passer le mot à propos du cours Aidez-nous à rendre ce cours plus visible ! Il y a deux façons de nous aider : 1. Montrez votre soutien en <a href="https://github.com/huggingface/agents-course" target="_blank">laissant une étoile ⭐ sur le dépôt du cours</a>. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/please_star.gif" alt="Favoriser le dépôt"/> 2. Partagez votre parcours d'apprentissage : faites savoir aux autres **que vous suivez ce cours** ! Nous avons préparé une illustration que vous pouvez utiliser dans vos publications sur les réseaux sociaux. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png" alt="Partagez votre parcours d'apprentissage" width="100%"/> Vous pouvez télécharger l'image en cliquant 👉 [ici](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png?download=true) ### Étape 5 : Exécuter des modèles localement avec Ollama (En cas de limites de crédits) 1. **Installez Ollama** Suivez les instructions officielles <a href="https://ollama.com/download" target="_blank">ici.</a> 2. **Téléchargez un modèle localement** ``` bash ollama pull qwen2:7b # Consultez le site web d'Ollama pour plus de modèles ``` 3. **Démarrez Ollama en arrière-plan (dans un terminal)** ``` bash ollama serve ``` Si vous rencontrez l'erreur "*listen tcp 127.0.0.1:11434: bind: address already in use*", vous pouvez utiliser la commande `sudo lsof -i :11434` pour identifier l'ID du processus (PID) qui utilise actuellement ce port. Si le processus est `ollama`, il est probable que le script d'installation ci-dessus ait démarré le service ollama, vous pouvez donc ignorer cette commande pour démarrer Ollama. 4. **Utilisez `LiteLLMModel` au lieu de `InferenceClientModel`** Pour utiliser le module `LiteLLMModel` dans `smolagents`, vous pouvez exécuter la commande `pip` pour installer le module. ``` bash pip install 'smolagents[litellm]' ``` ``` python from smolagents import LiteLLMModel model = LiteLLMModel( model_id="ollama_chat/qwen2:7b", # Ou essayez d'autres modèles supportés par Ollama api_base="http://127.0.0.1:11434", # Serveur local Ollama par défaut num_ctx=8192, ) ``` 5. **Pourquoi cela fonctionne-t-il ?** - Ollama sert des modèles localement en utilisant une API compatible avec OpenAI à `http://localhost:11434`. - `LiteLLMModel` est conçu pour communiquer avec tout modèle qui supporte le format d'API OpenAI chat/completion. - Cela signifie que vous pouvez simplement remplacer `InferenceClientModel` par `LiteLLMModel` sans autres changements de code nécessaires. C'est une solution transparente et prête à l'emploi. Félicitations ! 🎉 **Vous avez terminé le processus d'embarquement** ! Vous êtes maintenant prêt à commencer à en apprendre plus sur les agents IA. Amusez-vous bien ! Continuez à apprendre, restez formidable 🤗
agents-course/units/fr/unit0/onboarding.mdx/0
{ "file_path": "agents-course/units/fr/unit0/onboarding.mdx", "repo_id": "agents-course", "token_count": 1898 }
10
# Qu'est-ce qu'un LLM ? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-1.jpg" alt="Planification de l'Unité 1"/> Dans la section précédente, nous avons appris que chaque agent a besoin **de se baser sur un modèle d'IA** et que les LLM sont le type de modèle d'IA le plus courant pour cet usage. Maintenant, nous allons découvrir ce que sont les LLM et comment ils alimentent les agents. Cette section offre une explication technique concise sur l'utilisation des LLM. Si vous souhaitez approfondir, vous pouvez consulter notre <a href="https://huggingface.co/learn/llm-course/fr/chapter1/1" target="_blank">cours gratuit sur le traitement du langage naturel</a>. ## Qu'est-ce qu'un LLM ? Un LLM est un type de modèle d'IA qui excelle dans **la compréhension et la génération du langage humain**. Ils sont entraînés sur d'immenses quantités de données textuelles, ce qui leur permet d'apprendre des motifs, la structure, et même les nuances du langage. Ces modèles se composent généralement de plusieurs millions de paramètres. La plupart des LLM actuels sont **basés sur l'architecture *Transformer***, une architecture d'apprentissage profond basée sur le mécanisme d'attention, qui a suscité un intérêt considérable depuis la sortie de BERT de Google en 2018. <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/transformer.jpg" alt="Transformer"/> <figcaption>L'architecture originale du Transformer ressemblait à ceci, avec un encodeur à gauche et un décodeur à droite. </figcaption> </figure> Il existe 3 types de *transformers* : 1. **Encodeurs** Un *transformer* basé sur un encodeur prend en entrée un texte (ou d'autres données) et produit une représentation dense (aussi appellée *embedding*) de ce texte. - **Exemple** : BERT de Google - **Cas d'utilisation** : Classification de texte, recherche sémantique, reconnaissance d'entités nommées - **Taille typique** : Des millions de paramètres 2. **Décodeurs** Un *transformer* basé sur un décodeur se concentre **sur la génération de nouveaux *tokens* pour compléter une séquence, un *token* à la fois**. - **Exemple** : Llama de Meta - **Cas d'utilisation** : Génération de texte, chatbots, génération de code - **Taille typique** : Des milliards de paramètres 3. **Seq2Seq (Encodeur–Décodeur)** Un *transformer* séquence-à-séquence _combine_ un encodeur et un décodeur. L'encodeur traite d'abord la séquence d'entrée pour en extraire une représentation contextuelle, puis le décodeur génère une séquence de sortie. - **Exemple** : T5, BART - **Cas d'utilisation** : Traduction, résumé, paraphrase - **Taille typique** : Des millions de paramètres Bien que les modèles de langage de grande taille existent sous différentes formes, les LLM sont typiquement des modèles basés sur le décodeur avec des milliards de paramètres. Voici quelques-uns des LLM les plus connus : | **Modèle** | **Fournisseur** | |-----------------|-------------------------------------| | **Deepseek-R1** | DeepSeek | | **GPT4** | OpenAI | | **Llama** | Meta (Facebook AI Research) | | **SmolLM** | Hugging Face | | **Gemma** | Google | | **Mistral** | Mistral | Le principe fondamental d'un LLM est simple mais très efficace : **son objectif est de prédire le *token* suivant, étant donné une séquence de *tokens* précédents**. Un *token* est l'unité d'information avec laquelle travaille un LLM. Vous pouvez considérer un *token* comme s'il s'agissait d'un mot, mais pour des raisons d'efficacité, les LLM n'utilisent pas des mots entiers. Par exemple, alors que l'anglais compte environ 600 000 mots, un LLM peut avoir un vocabulaire d'environ 32 000 *tokens* (comme c'est le cas avec Llama 2). La tokenisation fonctionne souvent sur des unités sous-mot pouvant être combinées. Par exemple, les *tokens* "intéress" et "ant" peuvent se combiner pour former "intéressant", ou "é" peut être ajouté pour former "intéressé". Vous pouvez expérimenter (en anglais) avec différents *tokenizers* avec l'application ci-dessous : <iframe src="https://agents-course-the-tokenizer-playground.static.hf.space" frameborder="0" width="850" height="450" ></iframe> Chaque LLM possède des ***tokens* spéciaux** propres au modèle. Le LLM utilise ces *tokens* pour ouvrir et fermer les composants structurés de sa génération. Par exemple, pour indiquer le début ou la fin d'une séquence, d'un message ou d'une réponse. De plus, les instructions (ou *prompt*) que nous passons au modèle sont également structurées avec des *tokens* spéciaux. Le plus important d'entre eux est le ***token* de fin de séquence** (EOS). Les formes des tokens *spéciaux* varient grandement selon les fournisseurs de modèles. Le tableau ci-dessous illustre cette diversité : <table> <thead> <tr> <th><strong>Modèle</strong></th> <th><strong>Fournisseur</strong></th> <th><strong>Token EOS</strong></th> <th><strong>Fonctionnalité</strong></th> </tr> </thead> <tbody> <tr> <td><strong>GPT4</strong></td> <td>OpenAI</td> <td><code>&lt;|endoftext|&gt;</code></td> <td>Fin du texte du message</td> </tr> <tr> <td><strong>Llama 3</strong></td> <td>Meta (Facebook AI Research)</td> <td><code>&lt;|eot_id|&gt;</code></td> <td>Fin de la séquence</td> </tr> <tr> <td><strong>Deepseek-R1</strong></td> <td>DeepSeek</td> <td><code>&lt;|end_of_sentence|&gt;</code></td> <td>Fin du texte du message</td> </tr> <tr> <td><strong>SmolLM2</strong></td> <td>Hugging Face</td> <td><code>&lt;|im_end|&gt;</code></td> <td>Fin de l'instruction ou du message</td> </tr> <tr> <td><strong>Gemma</strong></td> <td>Google</td> <td><code>&lt;end_of_turn&gt;</code></td> <td>Fin du tour de conversation</td> </tr> </tbody> </table> <Tip> Nous ne vous demandons pas de mémoriser ces <i>tokens</i> spéciaux mais il est important d'apprécier leur diversité et le rôle qu'ils jouent dans la génération de texte par les LLM. Si vous souhaitez en savoir plus sur les <i>tokens</i> spéciaux, vous pouvez consulter la configuration du modèle dans son dépôt sur le 🤗 Hub. Par exemple, vous pouvez trouver les <i>tokens</i> spéciaux du modèle SmolLM2 dans le fichier <a href="https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct/blob/main/tokenizer_config.json">tokenizer_config.json</a>. </Tip> ## Comprendre la prédiction du *token* suivant On dit que les LLM sont **autoregressifs**, ce qui signifie que **la sortie d'une passe devient l'entrée de la suivante**. Cette boucle continue jusqu'à ce que le modèle prédise que le *token* suivant est le *token EOS*, moment où le modèle peut s'arrêter. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AutoregressionSchema.gif" alt="Gif visuel de décodage autoregressif" width="60%"> En d'autres termes, un LLM décodera le texte jusqu'à atteindre le *token EOS*. Mais que se passe-t-il lors d'une boucle de décodage unique ? Bien que le processus complet puisse être assez technique dans le cadre de l'apprentissage des agents, voici un aperçu succinct : - Une fois le texte d'entrée **tokenisé**, le modèle calcule une représentation de la séquence qui capture des informations sur la signification et la position de chaque *token*. - Cette représentation est ensuite traitée par le modèle pour produire des scores classant la probabilité que chaque *token* de son vocabulaire soit le suivant dans la séquence. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/DecodingFinal.gif" alt="Gif visuel du décodage" width="60%"> En se basant sur ces scores, plusieurs stratégies existent pour sélectionner les *tokens* afin de compléter la phrase. - La stratégie de décodage la plus simple consiste à toujours choisir le *token* ayant le score maximum. Vous pouvez interagir vous-même avec le processus de décodage de SmolLM2 dans ce *Space* (n'oubliez pas, il décode jusqu'à atteindre un token **EOS** qui est **<|im_end|>** pour ce modèle) : <iframe src="https://agents-course-decoding-visualizer.hf.space" frameborder="0" width="850" height="450" ></iframe> - Mais il existe des stratégies de décodage plus avancées. Par exemple, le *beam search* (recherche par faisceaux) explore plusieurs séquences candidates pour trouver celle ayant le score total maximum, même si certains *tokens* individuels présentent des scores plus faibles. <iframe src="https://agents-course-beam-search-visualizer.hf.space" frameborder="0" width="850" height="450" ></iframe> Si vous souhaitez en savoir plus sur le décodage, vous pouvez jeter un œil au [cours de NLP](https://huggingface.co/learn/llm-course/fr/chapter1/1). ## L'attention est tout ce dont vous avez besoin Un aspect clé de l'architecture *transformer* est **l'attention**. Lors de la prédiction du mot suivant, tous les mots d'une phrase ne sont pas également importants ; des mots comme « France » et « capitale » dans la phrase *« La capitale de la France est … »* portent le plus de sens. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AttentionSceneFinal.gif" alt="Gif visuel de l'Attention" width="60%"> Ce processus d'identification des mots les plus pertinents pour prédire le *token* suivant s'est révélé incroyablement efficace. Bien que le principe de base des LLM — prédire le *token* suivant — soit resté constant depuis GPT-2, des avancées significatives ont été réalisées lors de la mise à l'échelle des réseaux de neurones et dans le fonctionnement du mécanisme d'attention pour des séquences toujours plus longues. Si vous avez déjà interagi avec des LLM, vous connaissez probablement le terme *longueur de contexte*, qui fait référence au nombre maximum de *tokens* que le LLM peut traiter ainsi qu'à la _durée d'attention_ maximale dont il dispose. ## L'importance de bien formuler les instructions au LLM Étant donné que la seule tâche d'un LLM est de prédire le *token* suivant en examinant chaque *token* d'entrée, et de choisir ceux qui sont « importants », la formulation de votre séquence d'entrée revêt une importance capitale. La séquence d'entrée que vous fournissez à un LLM est appelée _prompt_. Une conception minutieuse du *prompt* facilite **l'orientation de la génération du LLM vers la sortie souhaitée**. ## Comment sont entraînés les LLM ? Les LLM sont entraînés sur de grands ensembles de données textuelles, où ils apprennent à prédire le mot suivant dans une séquence grâce à un objectif d'apprentissage autosupervisé ou de modélisation du langage masqué. Grâce à cet apprentissage autosupervisé, le modèle apprend la structure de la langue et les **motifs sous-jacents du texte, ce qui lui permet de généraliser à des données inédites**. Après ce _pré-entraînement_ initial, les LLM peuvent être spécialisé via un apprentissage supervisé pour réaliser des tâches spécifiques. Par exemple, certains modèles sont entraînés pour des structures conversationnelles ou l'utilisation d'outils, tandis que d'autres se concentrent sur la classification ou la génération de code. ## Comment puis-je utiliser les LLM ? Vous avez deux options principales : 1. **Exécuter localement** (si vous disposez du matériel nécessaire). 2. **Utiliser un service Cloud/API** (par exemple, via l'API d'inférence sans serveur d'Hugging Face). Tout au long de ce cours, nous utiliserons principalement des modèles via des API du Hub d'Hugging Face. Par la suite, nous explorerons comment exécuter ces modèles localement sur votre matériel. ## Comment les LLM sont-ils utilisés dans les agents ? Les LLM sont un composant clé des agents, **fournissant la base pour comprendre et générer le langage humain**. Ils peuvent interpréter les instructions de l'utilisateur, maintenir le contexte dans les conversations, définir un plan et décider quels outils utiliser. Nous explorerons ces étapes en détail dans cette Unité, mais pour l'instant, ce qu'il faut retenir, c'est que le LLM est **le cerveau de l'agent**. --- Cela fait beaucoup d'informations ! Nous avons couvert les bases de ce que sont les LLM, comment ils fonctionnent, et leur rôle pour les agents. Si vous souhaitez plonger encore plus profondément dans le monde fascinant des modèles de langage et du traitement du langage naturel, n'hésitez pas à consulter notre <a href="https://huggingface.co/learn/llm-course/fr/chapter1/1" target="_blank">cours gratuit sur le NLP</a>. Maintenant que nous comprenons le fonctionnement des LLM, il est temps de voir **comment ils structurent leurs générations dans un contexte conversationnel**. Pour exécuter le <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit1/dummy_agent_library.ipynb" target="_blank"><i>notebook</i></a>, **vous avez besoin d'un *token* d'authentication Hugging Face** que vous pouvez obtenir sur la page <a href="https://hf.co/settings/tokens" target="_blank">https://hf.co/settings/tokens</a>. Vous devez également demander l'accès aux <a href="https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct" target="_blank">modèles Llama 3.2 de Meta</a>.
agents-course/units/fr/unit1/what-are-llms.mdx/0
{ "file_path": "agents-course/units/fr/unit1/what-are-llms.mdx", "repo_id": "agents-course", "token_count": 5237 }
11
# Quiz rapide 2 (non noté) [[quiz2]] Quoi ?! Un autre Quiz ? Nous savons, nous savons, ... 😅 Mais ce court quiz non noté est là pour **vous aider à renforcer les concepts clés que vous venez d'apprendre**. Ce quiz couvre les *workflows* d'agents et les interactions, composants essentiels pour construire des agents efficaces. ### Q1 : Quel est le but de l'`AgentWorkflow` dans LlamaIndex ? <Question choices={[ { text: "Pour exécuter un ou plusieurs agents avec des outils", explain: "Oui, l'<code>AgentWorkflow</code> est la façon principale de créer rapidement un système avec un ou plusieurs agents.", correct: true }, { text: "Pour créer un seul agent qui peut interroger vos données sans mémoire", explain: "Non, l'<code>AgentWorkflow</code> est peut faire plus que cela, le <code>QueryEngine</code> est pour des requêtes simples sur vos données.", }, { text: "Pour construire automatiquement des outils pour les agents", explain: "L'<code>AgentWorkflow</code> ne construit pas d'outils, c'est le travail du développeur.", }, { text: "Pour gérer la mémoire et l'état des agents", explain: "Gérer la mémoire et l'état n'est pas l'objectif principal de l'<code>AgentWorkflow</code>.", } ]} /> --- ### Q2 : Quel objet est utilisé pour garder une trace de l'état du *workflow* ? <Question choices={[ { text: "<code>State</code>", explain: "<code>State</code> n'est pas le bon objet pour la gestion d'état du <i>workflow</i>.", }, { text: "<code>Context</code>", explain: "<code>Context</code> est le bon objet utilisé pour garder une trace de l'état du workflow.", correct: true }, { text: "<code>WorkflowState</code>", explain: "<code>WorkflowState</code> n'est pas le bon objet.", }, { text: "<code>Management</code>", explain: "<code>Management</code> n'est pas un objet valide pour l'état du <i>workflow</i>.", } ]} /> --- ### Q3 : Quelle méthode devrait être utilisée si vous voulez qu'un agent se souvienne des interactions précédentes ? <Question choices={[ { text: "<code>run(query_str)</code>", explain: "<code>.run(query_str)</code> ne maintient pas l'historique de conversation.", }, { text: "<code>chat(query_str, ctx=ctx)</code>", explain: "<code>chat()</code> n'est pas une méthode valide sur les <i>workflows</i>.", }, { text: "<code>interact(query_str)</code>", explain: "<code>interact()</code> n'est pas une méthode valide pour les interactions d'agents.", }, { text: "<code>run(query_str, ctx=ctx)</code>", explain: "En passant et maintenant le contexte, nous pouvons maintenir l'état !", correct: true } ]} /> --- ### Q4 : Quelle est une caractéristique clé du RAG agentique ? <Question choices={[ { text: "Il ne peut utiliser que des outils basés sur des documents, pour répondre aux questions dans un <i>workflow</i> RAG", explain: "Le RAG agentique peut utiliser différents outils, incluant les outils basés sur des documents.", }, { text: "Il répond automatiquement aux questions sans outils, comme un chatbot", explain: "Le RAG agentique utilise bien des outils pour répondre aux questions.", }, { text: "Il peut décider d'utiliser n'importe quel outil pour répondre aux questions, incluant les outils RAG", explain: "Le RAG agentique a la flexibilité d'utiliser différents outils pour répondre aux questions.", correct: true }, { text: "Il ne fonctionne qu'avec les <i>Function Calling Agents</i>", explain: "Le RAG agentique n'est pas limité aux <i>Function Calling Agents</i>.", } ]} /> --- Vous avez compris ? Parfait ! Maintenant faisons **un bref récapitulatif de l'unité !**
agents-course/units/fr/unit2/llama-index/quiz2.mdx/0
{ "file_path": "agents-course/units/fr/unit2/llama-index/quiz2.mdx", "repo_id": "agents-course", "token_count": 1299 }
12
- title: Раздел 0. Добро пожаловать на курс! sections: - local: unit0/introduction title: Добро пожаловать на курс 🤗 - local: unit0/onboarding title: Вводная часть - local: unit0/discord101 title: (Необязательно) Discord 101 - title: Прямой эфир 1. Как работает курс, вопросы и ответы sections: - local: communication/live1 title: Прямой эфир 1. Как работает курс, вопросы и ответы - title: Раздел 1. Введение в Агентов sections: - local: unit1/introduction title: Введение - local: unit1/what-are-agents title: Что такое Агент? - local: unit1/quiz1 title: Быстрый тест 1 - local: unit1/what-are-llms title: Что такое LLM? - local: unit1/messages-and-special-tokens title: Сообщения и специальные токены - local: unit1/tools title: Что такое инструменты? - local: unit1/quiz2 title: Быстрый тест 2 - local: unit1/agent-steps-and-structure title: Понимание ИИ агентов через цикл "Мысль-Действие-Наблюдение" - local: unit1/thoughts title: Мышление, внутренние рассуждения и подход Re-Act - local: unit1/actions title: Действия, позволяющие агенту взаимодействовать с окружающей средой - local: unit1/observations title: Наблюдение, интеграция обратной связи для Осмысления и Адаптации - local: unit1/dummy-agent-library title: Библиотека фиктивного агента - local: unit1/tutorial title: Давайте создадим нашего первого агента используя Smolagents - local: unit1/final-quiz title: Раздел 1 Финальный тест - local: unit1/get-your-certificate title: Получите свой сертификат - local: unit1/conclusion title: Заключение - title: Бонусный раздел 1. Дообучение LLM для вызова функций sections: - local: bonus-unit1/introduction title: Введение - local: bonus-unit1/what-is-function-calling title: Что такое вызов функции? - local: bonus-unit1/fine-tuning title: Давайте дообучим вашу модель для вызова функций - local: bonus-unit1/conclusion title: Заключение - title: Когда будут опубликованы следующие шаги? sections: - local: communication/next-units title: Следующие разделы
agents-course/units/ru-RU/_toctree.yml/0
{ "file_path": "agents-course/units/ru-RU/_toctree.yml", "repo_id": "agents-course", "token_count": 1501 }
13
# Получите свой сертификат <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-unit1sub5DONE.jpg" alt="Раздел 1 планирование"/> Теперь, когда вы успешно прошли этот тест, **вы можете получить свой сертификат 🎓**. Чтобы получить этот сертификат, вам необходимо пройти раздел 1 курса Агенты и **сдать на 80% итоговый тест**. <iframe src="https://agents-course-unit1-certification-app.hf.space" frameborder="0" width="850" height="450" ></iframe> Вы также можете ознакомиться с процессом сертификации 👉 [здесь](https://huggingface.co/spaces/agents-course/unit1-certification-app) Как только вы получите сертификат, вы можете добавить его в свой LinkedIn 🧑‍💼 или поделиться им в X, Bluesky и т. д. **Мы будем очень горды и с удовольствием поздравим вас, если вы добавите тэг @huggingface**! 🤗
agents-course/units/ru-RU/unit1/get-your-certificate.mdx/0
{ "file_path": "agents-course/units/ru-RU/unit1/get-your-certificate.mdx", "repo_id": "agents-course", "token_count": 654 }
14
# Tools là gì? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-2.jpg" alt="Unit 1 planning"/> Một khía cạnh quan trọng của AI agents là khả năng thực hiện **hành động**. Như ta đã thấy, điều này xảy ra thông qua việc sử dụng **Tools** (công cụ). Trong phần này, ta sẽ tìm hiểu Tools là gì, cách thiết kế chúng hiệu quả và cách tích hợp vào Agent thông qua System Message. Bằng cách cung cấp đúng Tools cho Agent - và mô tả rõ ràng cách chúng hoạt động - bạn có thể nâng cao đáng kể khả năng của AI. Cùng tìm hiểu nhé! ## AI Tools là gì? **Tool là một hàm được cung cấp cho LLM**. Hàm này cần đáp ứng **một mục tiêu rõ ràng**. Dưới đây là những Tools phổ biến trong AI agents: | Tool | Mô tả | |----------------|---------------------------------------------------------------| | Web Search | Cho phép agent truy cập thông tin cập nhật từ internet. | | Image Generation | Tạo hình ảnh dựa trên mô tả văn bản. | | Retrieval | Truy xuất thông tin từ nguồn bên ngoài. | | API Interface | Tương tác với API bên ngoài (GitHub, YouTube, Spotify, v.v.). | Đây chỉ là ví dụ - bạn hoàn toàn có thể tạo Tool cho bất kỳ use case nào! Một Tool tốt cần **bổ sung năng lực của LLM**. Ví dụ: nếu cần tính toán số học, việc cung cấp **công cụ máy tính** cho LLM sẽ cho kết quả tốt hơn so với dựa vào khả năng tự nhiên của mô hình. Hơn nữa, **LLM dự đoán phần tiếp theo của prompt dựa trên dữ liệu huấn luyện**, nghĩa là kiến thức của chúng chỉ bao gồm sự kiện trước thời điểm huấn luyện. Do đó, nếu agent cần dữ liệu mới nhất, bạn phải cung cấp thông qua Tools. Ví dụ: nếu hỏi trực tiếp LLM (không dùng công cụ tìm kiếm) về thời tiết hôm nay, LLM có thể "bịa" (hallucinate) ra một thông tin thời tiết ngẫu nhiên. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/weather.jpg" alt="Weather"/> - Một Tool cần chứa: - **Mô tả bằng văn bản** về chức năng - *Callable* (thứ để thực thi hành động) - *Arguments* với kiểu dữ liệu - (Tùy chọn) Đầu ra với kiểu dữ liệu ## Tools hoạt động thế nào? Như đã biết, LLM chỉ nhận đầu vào dạng text và tạo đầu ra dạng text. Chúng không thể tự gọi Tools. Khi nói về _cung cấp Tools cho Agent_, nghĩa là ta **dạy** LLM về sự tồn tại của Tools và yêu cầu mô hình tạo text để kích hoạt Tools khi cần. Ví dụ: nếu cung cấp Tool kiểm tra thời tiết từ Internet, khi hỏi LLM về thời tiết Paris, LLM sẽ nhận ra cần dùng Tool "weather". LLM sẽ tạo _text_ dạng code để gọi Tool. **Agent** có nhiệm vụ phân tích đầu ra của LLM, nhận diện lệnh gọi Tool và thực thi thay cho LLM. Đầu ra từ Tool sẽ được gửi lại LLM để tổng hợp trả lời (response) cuối cho người dùng. Đầu ra từ Tool là một loại message khác trong hội thoại. Các bước gọi Tool thường không hiển thị cho người dùng: Agent lấy hội thoại, gọi Tool(s), nhận đầu ra, thêm chúng vào hội thoại và gửi lại LLM. Từ góc độ người dùng, trông như LLM tự dùng Tool nhưng thực chất là code ứng dụng (**Agent**) thực hiện. Chúng ta sẽ nói thêm về quy trình này trong các bài sau. ## Cách cung cấp Tools cho LLM? Câu trả lời đầy đủ có vẻ phức tạp, nhưng về cơ bản ta dùng system prompt để cung cấp mô tả Tools cho mô hình: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt.png" alt="System prompt for tools"/> Để điều này hoạt động, ta cần chính xác về: 1. **Chức năng của Tool** 2. **Đầu vào mà nó mong đợi** Đây là lý do mô tả Tools thường dùng cấu trúc chính xác như ngôn ngữ máy tính hoặc JSON. Không _bắt buộc_ phải làm vậy, bất kỳ định dạng chính xác nào cũng được. Nếu lý thuyết quá trừu tượng, hãy xem qua ví dụ cụ thể. Ta sẽ triển khai **calculator** Tool đơn giản để nhân hai số nguyên. Đây là code Python: ```python def calculator(a: int, b: int) -> int: """Nhân hai số nguyên.""" return a * b ``` Tool của ta tên `calculator`, **nhân hai số nguyên** và cần các đầu vào: - **`a`** (*int*): Số nguyên - **`b`** (*int*): Số nguyên Đầu ra của Tool là số nguyên: - (*int*): Tích của `a` và `b` Tất cả chi tiết này đều quan trọng. Hãy tổng hợp chúng thành chuỗi mô tả Tool cho LLM: ```text Tool Name: calculator, Description: Multiply two integers., Arguments: a: int, b: int, Outputs: int ``` > **Nhắc nhở:** Mô tả text này là *thứ ta muốn LLM biết về Tool*. Khi đưa chuỗi trên vào đầu vào của LLM, mô hình sẽ nhận diện nó là Tool và biết cần truyền đầu vào gì, mong đợi đầu ra gì. Nếu muốn cung cấp nhiều Tools, ta cần nhất quán định dạng. Quá trình này có thể mong manh và dễ bỏ sót chi tiết. Có cách nào tốt hơn? ### Tự động định dạng phần Tools Tool của ta được viết bằng Python, và phần triển khai đã cung cấp mọi thứ cần thiết: - Tên mô tả: `calculator` - Mô tả dài trong docstring: `Multiply two integers.` - Đầu vào và kiểu dữ liệu: hàm mong đợi hai `int` - Kiểu đầu ra. Có lý do để mọi người dùng ngôn ngữ lập trình: chúng biểu đạt tốt, ngắn gọn và chính xác. Ta có thể đưa mã nguồn Python làm _đặc tả_ Tool cho LLM, nhưng cách triển khai Tool không quan trọng. Điều quan trọng là tên, chức năng, đầu vào và đầu ra. Ta sẽ tận dụng tính năng introspection của Python để tự động xây dựng mô tả Tool từ mã nguồn. Điều kiện là phần triển khai Tool phải dùng type hints, docstrings và tên hàm hợp lý. Ta sẽ viết code để trích xuất thông tin từ mã nguồn. Sau đó, ta chỉ cần dùng Python decorator để đánh dấu hàm `calculator` là Tool: ```python @tool def calculator(a: int, b: int) -> int: """Multiply two integers.""" return a * b print(calculator.to_string()) ``` Chú ý decorator `@tool` trước định nghĩa hàm. Với phần triển khai sẽ học tiếp theo, ta có thể tự động lấy text mô tả Tool thông qua hàm `to_string()` từ decorator: ```text Tool Name: calculator, Description: Multiply two integers., Arguments: a: int, b: int, Outputs: int ``` Như bạn thấy, nó giống hệt phần ta viết tay trước đó! ### Triển khai Tool tổng quát Ta tạo lớp `Tool` tổng quát để tái sử dụng khi cần dùng Tool. > **Lưu ý:** Ví dụ này là giả định nhưng gần với phần triển khai thực tế trong các thư viện. ```python class Tool: """ Lớp đại diện cho Tool có thể tái sử dụng. Thuộc tính: name (str): Tên Tool description (str): Mô tả chức năng func (callable): Hàm được wrap arguments (list): Danh sách tham số outputs (str hoặc list): Kiểu dữ liệu trả về """ def __init__(self, name: str, description: str, func: callable, arguments: list, outputs: str): self.name = name self.description = description self.func = func self.arguments = arguments self.outputs = outputs def to_string(self) -> str: """ Trả về chuỗi biểu diễn Tool, bao gồm tên, mô tả, arguments và outputs. """ args_str = ", ".join([ f"{arg_name}: {arg_type}" for arg_name, arg_type in self.arguments ]) return ( f"Tool Name: {self.name}," f" Description: {self.description}," f" Arguments: {args_str}," f" Outputs: {self.outputs}" ) def __call__(self, *args, **kwargs): """ Gọi hàm cơ sở với arguments được cung cấp. """ return self.func(*args, **kwargs) ``` Trông có vẻ phức tạp, nhưng nếu xem kỹ ta sẽ hiểu cách hoạt động. Lớp **`Tool`** bao gồm: - **`name`** (*str*): Tên Tool - **`description`** (*str*): Mô tả chức năng - **`function`** (*callable*): Hàm thực thi - **`arguments`** (*list*): Tham số đầu vào - **`outputs`** (*str* hoặc *list*): Đầu ra mong đợi - **`__call__()`**: Gọi hàm khi Tool được invoke - **`to_string()`**: Chuyển thuộc tính Tool thành chuỗi mô tả Ta có thể tạo Tool bằng code như sau: ```python calculator_tool = Tool( "calculator", # tên "Multiply two integers.", # mô tả calculator, # hàm gọi [("a", "int"), ("b", "int")], # đầu vào (tên và kiểu) "int", # đầu ra ) ``` Nhưng ta cũng có thể dùng module `inspect` của Python để tự động lấy thông tin! Đây chính là cách decorator `@tool` hoạt động. > Nếu quan tâm, bạn có thể xem phần code decorator bên dưới. <details> <summary> decorator code</summary> ```python def tool(func): """ Decorator tạo instance Tool từ hàm được cung cấp. """ # Lấy signature của hàm signature = inspect.signature(func) # Trích xuất cặp (tên tham số, kiểu dữ liệu) cho đầu vào arguments = [] for param in signature.parameters.values(): annotation_name = ( param.annotation.__name__ if hasattr(param.annotation, '__name__') else str(param.annotation) ) arguments.append((param.name, annotation_name)) # Xác định kiểu trả về return_annotation = signature.return_annotation if return_annotation is inspect._empty: outputs = "Không có chú thích kiểu trả về" else: outputs = ( return_annotation.__name__ if hasattr(return_annotation, '__name__') else str(return_annotation) ) # Dùng docstring của hàm làm mô tả (mặc định nếu không có) description = func.__doc__ or "Không có mô tả." # Tên hàm trở thành tên Tool name = func.__name__ # Trả về instance Tool mới return Tool( name=name, description=description, func=func, arguments=arguments, outputs=outputs ) ``` </details> Tóm lại, với decorator này ta có thể triển khai Tool như sau: ```python @tool def calculator(a: int, b: int) -> int: """Multiply two integers.""" return a * b print(calculator.to_string()) ``` Và dùng method `to_string` của `Tool` để tự động lấy text mô tả phù hợp cho LLM: ```text Tool Name: calculator, Description: Multiply two integers., Arguments: a: int, b: int, Outputs: int ``` Mô tả này được **đưa vào** system prompt. Xem ví dụ ban đầu sau khi thay thế `tools_description`: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt_tools.png" alt="System prompt for tools"/> Trong phần [Actions](actions), ta sẽ học cách Agent **Gọi** Tool vừa tạo. --- Tools đóng vai trò quan trọng trong việc nâng cao năng lực của AI agents. Tóm lại, ta đã học: - *Tools là gì*: Các hàm mở rộng khả năng của LLM như tính toán hay truy cập dữ liệu ngoài - *Cách định nghĩa Tool*: Bằng cách cung cấp mô tả rõ ràng, đầu vào, đầu ra và hàm thực thi - *Tại sao Tools quan trọng*: Chúng giúp Agent vượt giới hạn của mô hình tĩnh, xử lý tác vụ thời gian thực và thực hiện hành động chuyên biệt Giờ ta có thể chuyển sang [Agent Workflow](agent-steps-and-structure) để xem cách Agent quan sát, tư duy và hành động. Đây là **tổng hợp mọi thứ đã học** và đặt nền móng để bạn tạo AI agent chức năng hoàn chỉnh. Nhưng trước hết, hãy cùng làm Kiểm tra nhanh!
agents-course/units/vi/unit1/tools.mdx/0
{ "file_path": "agents-course/units/vi/unit1/tools.mdx", "repo_id": "agents-course", "token_count": 7532 }
15
# 直播第一课:课程体系解读与首次答疑会 在本期智能体课程的首场直播中,我们详细解析了课程运行机制(涵盖课程范围、单元结构、实践挑战等核心要素),并针对学员疑问进行现场解答。 <iframe width="560" height="315" src="https://www.youtube.com/embed/iLVyYDbdSmM?si=TCX5Ai3uZuKLXq45" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> 要获取后续直播排期,请关注我们的 **Discord 动态**. 系统也将同步发送邮件提醒。若无法实时参与,学员无需担心,我们**对所有直播课程都会进行全程录制存档**。
agents-course/units/zh-CN/communication/live1.mdx/0
{ "file_path": "agents-course/units/zh-CN/communication/live1.mdx", "repo_id": "agents-course", "token_count": 467 }
16
# 思维机制:内部推理与 ReAct 方法 <Tip> 本节将深入探讨 AI 智能体的内部运作机制——其推理与规划能力。我们将解析智能体如何通过内部对话分析信息,将复杂问题分解为可管理的步骤,并决策下一步行动。同时介绍 ReAct 方法,是一种鼓励模型在行动前"逐步思考"的提示技术。 </Tip> 思维(Thought)代表着智能体**解决任务的内部推理与规划过程**。 这利用了智能体的大型语言模型 (LLM) 能力**来分析其 prompt 中的信息**。 可将其视为智能体的内部对话,在此过程中它会考量当前任务并制定应对策略。 智能体的思维负责获取当前观察结果,并决定下一步应采取的行动。 通过这一过程,智能体能够**将复杂问题分解为更小、更易管理的步骤**,反思过往经验,并根据新信息持续调整计划。 以下是常见思维模式的示例: | 思维类型 | 示例 | |------------------|---------------------------------------------------------------------| | Planning(规划) | "I need to break this task into three steps: 1) gather data, 2) analyze trends, 3) generate report"("我需要将任务分解为三步:1)收集数据 2)分析趋势 3)生成报告") | | Analysis(分析) | "Based on the error message, the issue appears to be with the database connection parameters"("根据错误信息,问题似乎出在数据库连接参数") | | Decision Making(决策) | "Given the user's budget constraints, I should recommend the mid-tier option"("考虑到用户的预算限制,应推荐中端选项") | | Problem Solving(问题解决) | "To optimize this code, I should first profile it to identify bottlenecks"("优化此代码需先进行性能分析定位瓶颈") | | Memory Integration(记忆整合) | "The user mentioned their preference for Python earlier, so I'll provide examples in Python"("用户先前提到偏好 Python,因此我将提供 Python 示例") | | Self-Reflection(自我反思) | "My last approach didn't work well, I should try a different strategy"("上次方法效果不佳,应尝试不同策略") | | Goal Setting(目标设定) | "To complete this task, I need to first establish the acceptance criteria"("完成此任务需先确定验收标准") | | Prioritization(优先级排序) | "The security vulnerability should be addressed before adding new features"("在添加新功能前应先修复安全漏洞") | > **注意:** 对于专为 function-calling 微调的 LLMs,思维过程是可选的。 > *若您不熟悉 function-calling 概念,后续"行动"章节将提供详细说明。* ## ReAct 方法 核心方法是 **ReAct 方法**,即"推理"(Reasoning/Think)与"行动"(Acting/Act)的结合。 ReAct 是一种简单的提示技术,在让 LLM 解码后续 token 前添加"Let's think step by step"(让我们逐步思考)的提示。 通过提示模型"逐步思考",可以引导解码过程生成**计划**而非直接输出最终解决方案,因为模型被鼓励将问题**分解**为*子任务*。 这种方法使模型能够更详细地考虑各个子步骤,通常比直接生成最终方案产生更少错误。 <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/ReAct.png" alt="ReAct"/> <figcaption>图 (d) 展示了 ReAct 方法示例,我们通过"Let's think step by step"提示模型 </figcaption> </figure> <Tip> 近期推理策略受到广泛关注,这体现在 Deepseek R1 或 OpenAI 的 o1 等模型的开发中。这些模型经过微调,被训练为"先思考再回答"。 它们通过特殊标记(`<thought>` 和 `</thought>`)来界定 _思考_ 部分。这不仅是类似 ReAct 的提示技巧,更是通过分析数千个示范案例,让模型学习生成这些思考段的训练方法。 </Tip> --- 现在我们已经深入理解了思维过程,接下来将更深入探讨流程的第二部分:行动。
agents-course/units/zh-CN/unit1/thoughts.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit1/thoughts.mdx", "repo_id": "agents-course", "token_count": 2482 }
17
# 结语 恭喜完成第二单元 `llama-index` 模块的学习 🥳 您已掌握 `llama-index` 的核心基础,并学会了如何构建自主式工作流! 现在凭借掌握的 `llama-index` 技能,您可以开始创建解决实际任务的搜索引擎。 在本单元的下个模块中,您将学习**如何用 LangGraph 构建智能体**。 最后,我们诚挚希望**听取您对课程的评价和改进建议**。 如有任何反馈,请👉[填写此表单](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### 持续学习,保持热情 🤗
agents-course/units/zh-CN/unit2/llama-index/conclusion.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit2/llama-index/conclusion.mdx", "repo_id": "agents-course", "token_count": 436 }
18
<CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/tools.ipynb"}, ]} /> # 工具 正如我们在[第一单元](https://huggingface.co/learn/agents-course/unit1/tools)所探讨的,智能体通过工具执行各类操作。在`smolagents`框架中,工具被视为 **LLM 可以在智能体系统中调用的函数**。 要使LLM能够调用工具,需要为其提供包含以下关键要素的**接口描述**: - **名称**:工具的标识名称 - **工具描述**:工具的功能说明 - **输入类型及描述**:工具接受的参数说明 - **输出类型**:工具的返回结果类型 以韦恩庄园筹备派对为例,Alfred 需要多种工具来收集信息——从搜索餐饮服务到寻找派对主题创意。以下是一个简单搜索工具的接口示例: - **名称:** `web_search` - **工具描述:** 根据特定查询进行网络搜索 - **输入:** `query` (字符串) - 需要查找的搜索关键词 - **输出:** 包含搜索结果的字符串 通过使用这些工具,Alfred 能够做出明智决策并收集派对筹备所需的所有信息。 下方动画展示了工具调用的管理流程: ![来自 https://huggingface.co/docs/smolagents/conceptual_guides/react 的智能体流程](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif) ## 工具创建方法 在`smolagents`中,可以通过两种方式定义工具: 1. **使用`@tool`装饰器**创建基于函数的简单工具 2. **创建`Tool`的子类**实现复杂功能 ### `@tool`装饰器 `@tool`装饰器是**定义简单工具的推荐方式**。在底层,smolagents 会从 Python 函数解析基本信息。因此,清晰的函数命名和规范的文档字符串(docstring)能让 LLM 更易理解工具用途。 使用此方法时,我们需要定义包含以下要素的函数: - **明确描述性的函数名称**:帮助LLM理解其用途 - **输入输出的类型提示**:确保正确使用 - **详细描述**:包含明确描述各参数的`Args:`部分,这些描述为 LLM 提供关键上下文信息 #### 创建餐饮评分查询工具 <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-catering.jpg" alt="Alfred Catering"/> <Tip> 您可以通过 <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/tools.ipynb" target="_blank">此 Notebook</a> 跟随代码实践,该文件支持在 Google Colab 中运行。 </Tip> 假设 Alfred 已确定派对菜单,但需要为大量宾客准备食物。为此,他希望雇佣餐饮服务并需要找到当地评分最高的选择。 以下是通过`@tool`装饰器实现该功能的示例: ```python from smolagents import CodeAgent, InferenceClientModel, tool # 假设我们有一个获取最高评分餐饮服务的函数 @tool def catering_service_tool(query: str) -> str: """ This tool returns the highest-rated catering service in Gotham City. Args: query: A search term for finding catering services. """ # 示例餐饮服务及评分列表 services = { "Gotham Catering Co.": 4.9, "Wayne Manor Catering": 4.8, "Gotham City Events": 4.7, } # 查找评分最高的餐饮服务(模拟搜索查询过滤) best_service = max(services, key=services.get) return best_service agent = CodeAgent(tools=[catering_service_tool], model=InferenceClientModel()) # 运行智能体寻找最佳餐饮服务 result = agent.run( "Can you give me the name of the highest-rated catering service in Gotham City?" ) print(result) # Output: Gotham Catering Co. ``` ### 通过Python类定义工具 此方法需要创建[`Tool`](https://huggingface.co/docs/smolagents/v1.8.1/en/reference/tools#smolagents.Tool)的子类。对于复杂工具,我们可以通过类封装函数及其元数据来帮助 LLM 理解使用方式。在类中需要定义: - `name`: 工具名称 - `description`: 用于构建智能体系统提示的描述 - `inputs`: 包含`type`和`description`的字典,帮助Python解释器处理输入 - `output_type`: 指定期望的输出类型 - `forward`: 包含执行逻辑的方法 以下是通过`Tool`类构建工具并与`CodeAgent`集成的示例: #### 创建超级英雄主题派对创意生成工具 Alfred 计划在庄园举办**超级英雄主题派对**,但需要独特创意让活动与众不同。作为完美管家,他希望用新颖主题给宾客带来惊喜。 为此,我们可以创建根据类别生成派对创意的工具,帮助 Alfred 找到最惊艳的主题方案: ```python from smolagents import Tool, CodeAgent, InferenceClientModel class SuperheroPartyThemeTool(Tool): name = "superhero_party_theme_generator" description = """ This tool suggests creative superhero-themed party ideas based on a category. It returns a unique party theme idea.""" inputs = { "category": { "type": "string", "description": "The type of superhero party (e.g., 'classic heroes', 'villain masquerade', 'futuristic Gotham').", } } output_type = "string" def forward(self, category: str): themes = { "classic heroes": "Justice League Gala: Guests come dressed as their favorite DC heroes with themed cocktails like 'The Kryptonite Punch'.", "villain masquerade": "Gotham Rogues' Ball: A mysterious masquerade where guests dress as classic Batman villains.", "futuristic Gotham": "Neo-Gotham Night: A cyberpunk-style party inspired by Batman Beyond, with neon decorations and futuristic gadgets." } return themes.get(category.lower(), "Themed party idea not found. Try 'classic heroes', 'villain masquerade', or 'futuristic Gotham'.") # 实例化工具 party_theme_tool = SuperheroPartyThemeTool() agent = CodeAgent(tools=[party_theme_tool], model=InferenceClientModel()) # 运行智能体生成派对主题 result = agent.run( "What would be a good superhero party idea for a 'villain masquerade' theme?" ) print(result) # Output: "Gotham Rogues' Ball: A mysterious masquerade where guests dress as classic Batman villains." ``` 借助此工具,Alfred 将成为终极超级管家,为宾客呈现难忘的超级英雄主题派对!🦸♂️🦸♀️ ## 默认工具箱 `smolagents` 自带一组预构建工具,可直接注入到您的智能体中。[默认工具箱](https://huggingface.co/docs/smolagents/guided_tour?build-a-tool=Decorate+a+function+with+%40tool#default-toolbox) 包含: - **PythonInterpreterTool** - **FinalAnswerTool** - **UserInputTool** - **DuckDuckGoSearchTool** - **GoogleSearchTool** - **VisitWebpageTool** Alfred 可以使用多种工具来确保韦恩庄园的完美派对: - 首先,他可以使用 `DuckDuckGoSearchTool` 搜索创意超级英雄主题派对灵感 - 对于餐饮,他依赖 `GoogleSearchTool` 查找哥谭市评分最高的服务 - 要管理座位安排,Alfred 可以通过 `PythonInterpreterTool` 运行计算 - 收集完所有信息后,他使用 `FinalAnswerTool` 整合计划 通过这些工具,Alfred 确保派对既出众又顺利。🦇💡 ## 共享与导入工具 **smolagents** 最强大的功能之一是能够将自定义工具共享到 Hub 并无缝集成社区创建的工具。这包括与 **HF Spaces** 和 **LangChain 工具**的连接,显著增强了 Alfred 策划难忘韦恩庄园派对的能力。🎭 通过这些集成,Alfred 可以利用高级活动策划工具——无论是调整灯光营造完美氛围、为派对策划理想歌单,还是与哥谭市最优秀的餐饮服务商协调。 以下是展示这些功能如何提升派对体验的示例: ### 向 Hub 共享工具 与社区分享自定义工具非常简单!只需使用 `push_to_hub()` 方法将其上传到您的 Hugging Face 账户。 例如,Alfred 可以分享他的 `party_theme_tool` 以帮助其他人找到哥谭市最好的餐饮服务。具体操作如下: ```python party_theme_tool.push_to_hub("{your_username}/party_theme_tool", token="<YOUR_HUGGINGFACEHUB_API_TOKEN>") ``` ### 从 Hub 导入工具 您可以使用 `load_tool()` 函数轻松导入其他用户创建的工具。例如,Alfred 可能希望使用 AI 生成派对的宣传图片。无需从头构建工具,他可以直接使用社区预定义的方案: ```python from smolagents import load_tool, CodeAgent, InferenceClientModel image_generation_tool = load_tool( "m-ric/text-to-image", trust_remote_code=True ) agent = CodeAgent( tools=[image_generation_tool], model=InferenceClientModel() ) agent.run("Generate an image of a luxurious superhero-themed party at Wayne Manor with made-up superheros.") ``` ### 将 Hugging Face Space 导入为工具 您可以使用 `Tool.from_space()` 将 HF Space 作为工具导入。这开启了与社区数千个 Space 集成的可能性,从图像生成到数据分析均可实现。 工具将通过 `gradio_client` 连接 Space 的后端,请确保已通过 `pip` 安装该依赖(如果尚未安装)。 对于本次派对,Alfred 可以使用现有的 HF Space 生成公告所需的 AI 图像(替代之前提到的预建工具)。让我们开始构建: ```python from smolagents import CodeAgent, InferenceClientModel, Tool image_generation_tool = Tool.from_space( "black-forest-labs/FLUX.1-schnell", name="image_generator", description="Generate an image from a prompt" ) model = InferenceClientModel("Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[image_generation_tool], model=model) agent.run( "Improve this prompt, then generate an image of it.", additional_args={'user_prompt': 'A grand superhero-themed party at Wayne Manor, with Alfred overseeing a luxurious gala'} ) ``` ### 导入 LangChain 工具 我们将在后续章节讨论 `LangChain` 框架。目前需要注意的是,您可以在 smolagents 工作流中复用 LangChain 工具! 您可以使用 `Tool.from_langchain()` 方法轻松加载 LangChain 工具。 追求完美的 Alfred 正在筹备一场盛大的超级英雄之夜活动(趁韦恩一家外出时),为确保每个细节都超出预期,他借助 LangChain 工具来寻找顶级的娱乐创意。 具体实现如下: ```python from langchain.agents import load_tools from smolagents import CodeAgent, InferenceClientModel, Tool search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) agent = CodeAgent(tools=[search_tool], model=model) agent.run("Search for luxury entertainment ideas for a superhero-themed event, such as live performances and interactive experiences.") ``` 通过此设置,Alfred 能快速发现高端娱乐选项,确保哥谭的精英宾客获得难忘体验。该工具帮助他策划韦恩庄园的完美超级英雄主题活动!🎉 ## 资源 - [工具教程](https://huggingface.co/docs/smolagents/tutorials/tools) - 通过本教程学习如何高效使用工具 - [工具文档](https://huggingface.co/docs/smolagents/v1.8.1/en/reference/tools) - 全面的工具参考文档 - [工具使用导览](https://huggingface.co/docs/smolagents/v1.8.1/en/guided_tour#tools) - 逐步指导如何构建和使用工具 - [构建高效智能体](https://huggingface.co/docs/smolagents/tutorials/building_good_agents) - 关于开发可靠高性能自定义函数智能体的最佳实践指南
agents-course/units/zh-CN/unit2/smolagents/tools.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit2/smolagents/tools.mdx", "repo_id": "agents-course", "token_count": 6379 }
19
[build] rustflags = ["-C", "target-cpu=native"] [target.wasm32-unknown-unknown] rustflags = ["-C", "target-feature=+simd128", "--cfg", 'getrandom_backend="wasm_js"'] [target.x86_64-apple-darwin] rustflags = ["-C", "target-feature=-avx,-avx2"]
candle/.cargo/config.toml/0
{ "file_path": "candle/.cargo/config.toml", "repo_id": "candle", "token_count": 101 }
20
[book] authors = ["Nicolas Patry"] language = "en" multilingual = false src = "src" title = "Candle Documentation"
candle/candle-book/book.toml/0
{ "file_path": "candle/candle-book/book.toml", "repo_id": "candle", "token_count": 38 }
21
# Candle MNIST Tutorial ## Introduction This tutorial provides an introduction to Candle by implementing and training a neural network for MNIST digit classification from scratch. Throughout this tutorial, you will learn the basics of: - Tensor operations and model construction - Creating and implementing neural network layers - Parameter initialization - Training loop implementation - Saving and loading trained models ## Getting Started Before proceeding, please ensure that you have properly installed Candle by following the instructions in the [Installation](../installation.md) guide.
candle/candle-book/src/guide/mnist/intro.md/0
{ "file_path": "candle/candle-book/src/guide/mnist/intro.md", "repo_id": "candle", "token_count": 116 }
22
# Training Training starts with data. We're going to use the huggingface hub and start with the Hello world dataset of machine learning, MNIST. Let's start with downloading `MNIST` from [huggingface](https://huggingface.co/datasets/mnist). This requires [`hf-hub`](https://github.com/huggingface/hf-hub). ```bash cargo add hf-hub ``` This is going to be very hands-on for now. ```rust,ignore {{#include ../../../candle-examples/src/lib.rs:book_training_1}} ``` This uses the standardized `parquet` files from the `refs/convert/parquet` branch on every dataset. Our handles are now [`parquet::file::serialized_reader::SerializedFileReader`]. We can inspect the content of the files with: ```rust,ignore {{#include ../../../candle-examples/src/lib.rs:book_training_2}} ``` You should see something like: ```bash Column id 1, name label, value 6 Column id 0, name image, value {bytes: [137, ....] Column id 1, name label, value 8 Column id 0, name image, value {bytes: [137, ....] ``` So each row contains 2 columns (image, label) with image being saved as bytes. Let's put them into a useful struct.
candle/candle-book/src/training/training.md/0
{ "file_path": "candle/candle-book/src/training/training.md", "repo_id": "candle", "token_count": 361 }
23
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::Result; use candle_core::{Device, Tensor}; // xs: [1024, 64, 1924], c Tensor[dims 128, 64, 8; f32, cuda:0] Conv1dConfig { padding: 0, stride: 4, dilation: 1, groups: 1 } fn main() -> Result<()> { let device = Device::new_cuda(0)?; let x = Tensor::randn(0f32, 1.0, (1024, 64, 1924), &device)?; let c = Tensor::randn(0f32, 1.0, (128, 64, 8), &device)?; let _x1 = x.conv1d(&c, 0, 4, 1, 1)?; drop(_x1); for _ in 0..20 { let start_time = std::time::Instant::now(); let _x1 = x.conv1d(&c, 0, 4, 1, 1)?; device.synchronize()?; println!("conv1d: {:?}", start_time.elapsed()); } Ok(()) }
candle/candle-core/examples/cuda_basics.rs/0
{ "file_path": "candle/candle-core/examples/cuda_basics.rs", "repo_id": "candle", "token_count": 363 }
24
use crate::WithDType; use cudarc; use cudarc::cudnn::safe::{ConvForward, Cudnn}; use cudarc::driver::{CudaSlice, CudaView, DeviceRepr, ValidAsZeroBits}; use std::cell::RefCell; use std::collections::HashMap; use std::sync::Arc; // The cudnn handles are stored per thread here rather than on the CudaDevice as they are neither // send nor sync. thread_local! { static CUDNN: RefCell<HashMap<crate::cuda_backend::DeviceId, Arc<Cudnn>>> = HashMap::new().into(); } impl From<cudarc::cudnn::CudnnError> for crate::Error { fn from(err: cudarc::cudnn::CudnnError) -> Self { crate::Error::wrap(err) } } impl From<cudarc::driver::DriverError> for crate::Error { fn from(err: cudarc::driver::DriverError) -> Self { crate::Error::wrap(err) } } pub(crate) fn launch_conv2d< T: DeviceRepr + WithDType + ValidAsZeroBits + cudarc::cudnn::CudnnDataType, Y: cudarc::cudnn::CudnnDataType, >( src: &CudaView<T>, src_l: &crate::Layout, filter: &CudaView<T>, dst: &mut CudaSlice<T>, params: &crate::conv::ParamsConv2D, dev: &crate::cuda_backend::CudaDevice, ) -> crate::Result<()> { use crate::conv::CudnnFwdAlgo as CandleAlgo; use cudarc::cudnn::sys::cudnnConvolutionFwdAlgo_t as A; let device_id = dev.id(); let cudnn = CUDNN.with(|cudnn| { if let Some(cudnn) = cudnn.borrow().get(&device_id) { return Ok(cudnn.clone()); } let c = Cudnn::new(dev.cuda_stream()); if let Ok(c) = &c { cudnn.borrow_mut().insert(device_id, c.clone()); } c })?; let conv = cudnn.create_conv2d::<Y>( /* pad */ [params.padding as i32, params.padding as i32], /* stride */ [params.stride as i32, params.stride as i32], /* dilation */ [params.dilation as i32, params.dilation as i32], cudarc::cudnn::sys::cudnnConvolutionMode_t::CUDNN_CROSS_CORRELATION, )?; let x_shape = [ params.b_size as i32, params.c_in as i32, params.i_h as i32, params.i_w as i32, ]; // Note that `src` already starts at the proper offset. let x = if src_l.is_contiguous() { cudnn.create_4d_tensor::<T>( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, x_shape, )? } else { let s = src_l.stride(); cudnn.create_4d_tensor_ex::<T>( x_shape, [s[0] as i32, s[1] as i32, s[2] as i32, s[3] as i32], )? }; let w = cudnn.create_4d_filter::<T>( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [ params.c_out as i32, params.c_in as i32, params.k_h as i32, params.k_w as i32, ], )?; let (w_out, h_out) = (params.out_w() as i32, params.out_h() as i32); let y = cudnn.create_4d_tensor::<T>( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [params.b_size as i32, params.c_out as i32, h_out, w_out], )?; let conv2d = ConvForward { conv: &conv, x: &x, w: &w, y: &y, }; let alg = match params.cudnn_fwd_algo { None => conv2d.pick_algorithm()?, Some(CandleAlgo::ImplicitGemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, Some(CandleAlgo::ImplicitPrecompGemm) => { A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM } Some(CandleAlgo::Gemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_GEMM, Some(CandleAlgo::Direct) => A::CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, Some(CandleAlgo::Fft) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT, Some(CandleAlgo::FftTiling) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, Some(CandleAlgo::Winograd) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, Some(CandleAlgo::WinogradNonFused) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, Some(CandleAlgo::Count) => A::CUDNN_CONVOLUTION_FWD_ALGO_COUNT, }; let workspace_size = conv2d.get_workspace_size(alg)?; let mut workspace = dev.cuda_stream().alloc_zeros::<u8>(workspace_size)?; unsafe { conv2d.launch::<CudaSlice<u8>, _, _, _>( alg, Some(&mut workspace), (T::one(), T::zero()), src, filter, dst, )?; } Ok(()) } pub(crate) fn launch_conv1d< T: DeviceRepr + WithDType + ValidAsZeroBits + cudarc::cudnn::CudnnDataType, Y: cudarc::cudnn::CudnnDataType, >( src: &CudaView<T>, src_l: &crate::Layout, filter: &CudaView<T>, dst: &mut CudaSlice<T>, params: &crate::conv::ParamsConv1D, dev: &crate::cuda_backend::CudaDevice, ) -> crate::Result<()> { use crate::conv::CudnnFwdAlgo as CandleAlgo; use cudarc::cudnn::sys::cudnnConvolutionFwdAlgo_t as A; let device_id = dev.id(); let cudnn = CUDNN.with(|cudnn| { if let Some(cudnn) = cudnn.borrow().get(&device_id) { return Ok(cudnn.clone()); } let c = Cudnn::new(dev.cuda_stream()); if let Ok(c) = &c { cudnn.borrow_mut().insert(device_id, c.clone()); } c })?; let conv = cudnn.create_conv2d::<Y>( /* pad */ [params.padding as i32, 0], /* stride */ [params.stride as i32, 1], /* dilation */ [params.dilation as i32, 1], cudarc::cudnn::sys::cudnnConvolutionMode_t::CUDNN_CROSS_CORRELATION, )?; // https://docs.nvidia.com/deeplearning/cudnn/backend/latest/api/cudnn-ops-library.html#cudnnsettensornddescriptor // > Tensors are restricted to having at least 4 dimensions, and at most CUDNN_DIM_MAX // > dimensions (defined in cudnn.h). When working with lower dimensional data, it is // > recommended that the user create a 4D tensor, and set the size along unused dimensions // > to 1. let x_shape = [ params.b_size as i32, params.c_in as i32, params.l_in as i32, 1, ]; // Note that `src` already starts at the proper offset. let x = if src_l.is_contiguous() { cudnn.create_4d_tensor::<T>( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, x_shape, )? } else { let s = src_l.stride(); cudnn.create_4d_tensor_ex::<T>(x_shape, [s[0] as i32, s[1] as i32, s[2] as i32, 1i32])? }; let w = cudnn.create_4d_filter::<T>( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [ params.c_out as i32, params.c_in as i32, params.k_size as i32, 1, ], )?; let l_out = params.l_out() as i32; let y = cudnn.create_4d_tensor::<T>( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [params.b_size as i32, params.c_out as i32, l_out, 1], )?; let conv1d = ConvForward { conv: &conv, x: &x, w: &w, y: &y, }; let alg = match params.cudnn_fwd_algo { None => conv1d.pick_algorithm()?, Some(CandleAlgo::ImplicitGemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, Some(CandleAlgo::ImplicitPrecompGemm) => { A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM } Some(CandleAlgo::Gemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_GEMM, Some(CandleAlgo::Direct) => A::CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, Some(CandleAlgo::Fft) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT, Some(CandleAlgo::FftTiling) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, Some(CandleAlgo::Winograd) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, Some(CandleAlgo::WinogradNonFused) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, Some(CandleAlgo::Count) => A::CUDNN_CONVOLUTION_FWD_ALGO_COUNT, }; let workspace_size = conv1d.get_workspace_size(alg)?; let mut workspace = dev.cuda_stream().alloc_zeros::<u8>(workspace_size)?; unsafe { conv1d.launch::<CudaSlice<u8>, _, _, _>( alg, Some(&mut workspace), (T::one(), T::zero()), src, filter, dst, )?; } Ok(()) }
candle/candle-core/src/cuda_backend/cudnn.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/cudnn.rs", "repo_id": "candle", "token_count": 4227 }
25
//! Implementation of Backend traits for Metal //! use crate::backend::{BackendDevice, BackendStorage}; use crate::conv::{ParamsConv1D, ParamsConv2D, ParamsConvTranspose1D, ParamsConvTranspose2D}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, CpuStorageRef, DType, Layout, Result, Shape}; use candle_metal_kernels::{BufferOffset, CallConvTranspose2dCfg, Kernels}; use metal::{Buffer, MTLResourceOptions, NSUInteger}; use std::collections::HashMap; use std::ffi::c_void; use std::sync::{Arc, Mutex, PoisonError, RwLock, TryLockError}; mod device; pub use device::{DeviceId, MetalDevice}; pub fn buffer_o<'a>(buffer: &'a Buffer, l: &Layout, dtype: DType) -> BufferOffset<'a> { BufferOffset { buffer, offset_in_bytes: l.start_offset() * dtype.size_in_bytes(), } } /// Simple way to catch lock error without /// depending on T #[derive(thiserror::Error, Debug)] pub enum LockError { #[error("{0}")] Poisoned(String), #[error("Would block")] WouldBlock, } impl<T> From<TryLockError<T>> for MetalError { fn from(value: TryLockError<T>) -> Self { match value { TryLockError::Poisoned(p) => MetalError::LockError(LockError::Poisoned(p.to_string())), TryLockError::WouldBlock => MetalError::LockError(LockError::WouldBlock), } } } impl<T> From<PoisonError<T>> for MetalError { fn from(p: PoisonError<T>) -> Self { MetalError::LockError(LockError::Poisoned(p.to_string())) } } /// Metal related errors #[derive(thiserror::Error, Debug)] pub enum MetalError { #[error("{0}")] Message(String), #[error(transparent)] KernelError(#[from] candle_metal_kernels::MetalKernelError), #[error("{0:?}")] LockError(LockError), #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedDType { msg: &'static str, expected: DType, got: DType, }, } impl From<String> for MetalError { fn from(e: String) -> Self { MetalError::Message(e) } } #[derive(Debug, Clone)] pub struct MetalStorage { /// The actual buffer containing the data. buffer: Arc<metal::Buffer>, /// a reference to the device owning this buffer device: MetalDevice, /// The count of allocated elements in the buffer count: usize, /// The dtype is kept since buffers are untyped. dtype: DType, } impl BackendStorage for MetalStorage { type Device = MetalDevice; fn try_clone(&self, _: &Layout) -> Result<Self> { Ok(self.clone()) } fn dtype(&self) -> DType { self.dtype } fn device(&self) -> &Self::Device { &self.device } fn to_cpu_storage(&self) -> Result<CpuStorage> { match self.dtype { DType::U8 => Ok(CpuStorage::U8(self.to_cpu()?)), DType::U32 => Ok(CpuStorage::U32(self.to_cpu()?)), DType::I64 => Ok(CpuStorage::I64(self.to_cpu()?)), DType::F16 => Ok(CpuStorage::F16(self.to_cpu()?)), DType::BF16 => Ok(CpuStorage::BF16(self.to_cpu()?)), DType::F32 => Ok(CpuStorage::F32(self.to_cpu()?)), DType::F64 => Ok(CpuStorage::F64(self.to_cpu()?)), DType::F8E4M3 => Ok(CpuStorage::F64(self.to_cpu()?)), } } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let el = shape.elem_count(); let dtype = self.dtype; let buffer = device.new_buffer(el, self.dtype, "affine")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, dtype); if layout.is_contiguous() { let name = match self.dtype { DType::F32 => "affine_f32", DType::F16 => "affine_f16", DType::BF16 => "affine_bf16", DType::U8 => "affine_u8", DType::U32 => "affine_u32", dtype => crate::bail!("Metal contiguous affine {dtype:?} not implemented"), }; candle_metal_kernels::call_affine( &device.device, &command_buffer, &device.kernels, name, el, src, &buffer, mul as f32, add as f32, ) .map_err(MetalError::from)?; } else { let name = match self.dtype { DType::F32 => "affine_f32_strided", DType::F16 => "affine_f16_strided", DType::BF16 => "affine_bf16_strided", dtype => crate::bail!("Metal strided affine {dtype:?} not implemented"), }; candle_metal_kernels::call_affine_strided( &device.device, &command_buffer, &device.kernels, name, layout.dims(), src, layout.stride(), &buffer, mul as f32, add as f32, ) .map_err(MetalError::from)?; } Ok(Self::new(buffer, device.clone(), el, dtype)) } fn powf(&self, layout: &Layout, pow: f64) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let el = shape.elem_count(); let dtype = self.dtype; let buffer = device.new_buffer(el, self.dtype, "powf")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, dtype); if layout.is_contiguous() { let name = match self.dtype { DType::F32 => "powf_f32", DType::F16 => "powf_f16", DType::BF16 => "powf_bf16", dtype => crate::bail!("Metal contiguous powf {dtype:?} not implemented"), }; candle_metal_kernels::call_powf( &device.device, &command_buffer, &device.kernels, name, el, src, &buffer, pow as f32, ) .map_err(MetalError::from)?; } else { let name = match self.dtype { DType::F32 => "powf_f32_strided", DType::F16 => "powf_f16_strided", DType::BF16 => "powf_bf16_strided", dtype => crate::bail!("Metal strided powf {dtype:?} not implemented"), }; candle_metal_kernels::call_powf_strided( &device.device, &command_buffer, &device.kernels, name, layout.dims(), src, layout.stride(), &buffer, pow as f32, ) .map_err(MetalError::from)?; } Ok(Self::new(buffer, device.clone(), el, dtype)) } fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let el = shape.elem_count(); let dtype = self.dtype; let buffer = device.new_buffer(el, self.dtype, "elu")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, self.dtype); if layout.is_contiguous() { let name = match self.dtype { DType::F32 => "elu_f32", DType::F16 => "elu_f16", DType::BF16 => "elu_bf16", dtype => crate::bail!("Metal contiguous elu {dtype:?} not implemented"), }; candle_metal_kernels::call_elu( &device.device, &command_buffer, &device.kernels, name, el, src, &buffer, alpha as f32, ) .map_err(MetalError::from)?; } else { let name = match self.dtype { DType::F32 => "elu_f32_strided", DType::F16 => "elu_f16_strided", DType::BF16 => "elu_bf16_strided", dtype => crate::bail!("Metal strided elu {dtype:?} not implemented"), }; candle_metal_kernels::call_elu_strided( &device.device, &command_buffer, &device.kernels, name, layout.dims(), src, layout.stride(), &buffer, alpha as f32, ) .map_err(MetalError::from)?; } Ok(Self::new(buffer, device.clone(), el, dtype)) } fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> { let device = self.device.clone(); let src_stride = layout.stride(); let src_dims = layout.shape().dims(); // Source dims and strides with the sum dims at the end. let mut dims = vec![]; let mut stride = vec![]; let mut dst_el: usize = 1; for (dim_idx, &d) in src_dims.iter().enumerate() { if !sum_dims.contains(&dim_idx) { dst_el *= d; dims.push(d); stride.push(src_stride[dim_idx]); } } for &dim_idx in sum_dims.iter() { dims.push(src_dims[dim_idx]); stride.push(src_stride[dim_idx]); } let reduction_shape = Shape::from(dims.clone()); if layout.is_contiguous() && reduction_shape.is_contiguous(&stride) { let (name, check_empty, return_index) = match (op, self.dtype) { (ReduceOp::Sum, DType::F32) => ("fast_sum_f32", false, false), (ReduceOp::Min, DType::F32) => ("fast_min_f32", true, false), (ReduceOp::Max, DType::F32) => ("fast_max_f32", true, false), (ReduceOp::ArgMin, DType::F32) => ("fast_argmin_f32", true, true), (ReduceOp::ArgMax, DType::F32) => ("fast_argmax_f32", true, true), (ReduceOp::Sum, DType::U32) => ("fast_sum_u32", false, false), (ReduceOp::Min, DType::U32) => ("fast_min_u32", true, false), (ReduceOp::Max, DType::U32) => ("fast_max_u32", true, false), (ReduceOp::ArgMin, DType::U32) => ("fast_argmin_u32", true, true), (ReduceOp::ArgMax, DType::U32) => ("fast_argmax_u32", true, true), (ReduceOp::Sum, DType::F16) => ("fast_sum_f16", false, false), (ReduceOp::Min, DType::F16) => ("fast_min_f16", true, false), (ReduceOp::Max, DType::F16) => ("fast_max_f16", true, false), (ReduceOp::ArgMin, DType::F16) => ("fast_argmin_f16", true, true), (ReduceOp::ArgMax, DType::F16) => ("fast_argmax_f16", true, true), (ReduceOp::Sum, DType::BF16) => ("fast_sum_bf16", false, false), (ReduceOp::Min, DType::BF16) => ("fast_min_bf16", true, false), (ReduceOp::Max, DType::BF16) => ("fast_max_bf16", true, false), (ReduceOp::ArgMin, DType::BF16) => ("fast_argmin_bf16", true, true), (ReduceOp::ArgMax, DType::BF16) => ("fast_argmax_bf16", true, true), (ReduceOp::Sum, DType::I64) => ("fast_sum_i64", false, false), (ReduceOp::Min, DType::I64) => ("fast_min_i64", true, false), (ReduceOp::Max, DType::I64) => ("fast_max_i64", true, false), (ReduceOp::ArgMin, DType::I64) => ("fast_argmin_i64", true, true), (ReduceOp::ArgMax, DType::I64) => ("fast_argmax_i64", true, true), (ReduceOp::Sum, DType::U8) => ("fast_sum_u8", false, false), (ReduceOp::Min, DType::U8) => ("fast_min_u8", true, false), (ReduceOp::Max, DType::U8) => ("fast_max_u8", true, false), (ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8", true, true), (ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8", true, true), (k, dtype) => { crate::bail!("Metal contiguous reduce op {k:?} {dtype:?} not implemented") } }; if check_empty && layout.shape().elem_count() == 0 { Err(crate::Error::EmptyTensor { op: "reduce" }.bt())? } let dtype = if return_index { DType::U32 } else { self.dtype }; let buffer = device.new_buffer(dst_el, dtype, "reduce")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, self.dtype); candle_metal_kernels::call_reduce_contiguous( &device.device, &command_buffer, &device.kernels, name, src_dims, dst_el, src, &buffer, ) .map_err(MetalError::from)?; return Ok(Self::new(buffer, device, dst_el, dtype)); } let (name, check_empty, return_index) = match (op, self.dtype) { (ReduceOp::Sum, DType::F32) => ("fast_sum_f32_strided", false, false), (ReduceOp::Min, DType::F32) => ("fast_min_f32_strided", true, false), (ReduceOp::Max, DType::F32) => ("fast_max_f32_strided", true, false), (ReduceOp::ArgMin, DType::F32) => ("fast_argmin_f32_strided", true, true), (ReduceOp::ArgMax, DType::F32) => ("fast_argmax_f32_strided", true, true), (ReduceOp::Sum, DType::U32) => ("fast_sum_u32_strided", false, false), (ReduceOp::Min, DType::U32) => ("fast_min_u32_strided", true, false), (ReduceOp::Max, DType::U32) => ("fast_max_u32_strided", true, false), (ReduceOp::ArgMin, DType::U32) => ("fast_argmin_u32_strided", true, true), (ReduceOp::ArgMax, DType::U32) => ("fast_argmax_u32_strided", true, true), (ReduceOp::Sum, DType::F16) => ("fast_sum_f16_strided", false, false), (ReduceOp::Min, DType::F16) => ("fast_min_f16_strided", true, false), (ReduceOp::Max, DType::F16) => ("fast_max_f16_strided", true, false), (ReduceOp::ArgMin, DType::F16) => ("fast_argmin_f16_strided", true, true), (ReduceOp::ArgMax, DType::F16) => ("fast_argmax_f16_strided", true, true), (ReduceOp::Sum, DType::BF16) => ("fast_sum_bf16_strided", false, false), (ReduceOp::Min, DType::BF16) => ("fast_min_bf16_strided", true, false), (ReduceOp::Max, DType::BF16) => ("fast_max_bf16_strided", true, false), (ReduceOp::ArgMin, DType::BF16) => ("fast_argmin_bf16_strided", true, true), (ReduceOp::ArgMax, DType::BF16) => ("fast_argmax_bf16_strided", true, true), (ReduceOp::Sum, DType::I64) => ("fast_sum_i64_strided", false, false), (ReduceOp::Min, DType::I64) => ("fast_min_i64_strided", true, false), (ReduceOp::Max, DType::I64) => ("fast_max_i64_strided", true, false), (ReduceOp::ArgMin, DType::I64) => ("fast_argmin_i64_strided", true, true), (ReduceOp::ArgMax, DType::I64) => ("fast_argmax_i64_strided", true, true), (ReduceOp::Sum, DType::U8) => ("fast_sum_u8_strided", false, false), (ReduceOp::Min, DType::U8) => ("fast_min_u8_strided", true, false), (ReduceOp::Max, DType::U8) => ("fast_max_u8_strided", true, false), (ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8_strided", true, true), (ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8_strided", true, true), (k, dtype) => crate::bail!("Metal strided reduce op {k:?} {dtype:?} not implemented"), }; if check_empty && layout.shape().elem_count() == 0 { Err(crate::Error::EmptyTensor { op: "reduce" }.bt())? } let dtype = if return_index { DType::U32 } else { self.dtype }; let buffer = device.new_buffer(dst_el, dtype, "reduce")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, self.dtype); candle_metal_kernels::call_reduce_strided( &device.device, &command_buffer, &device.kernels, name, &dims, &stride, dst_el, src, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device, dst_el, dtype)) } fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> { let name = match op { CmpOp::Eq => "eq", CmpOp::Ne => "ne", CmpOp::Le => "le", CmpOp::Ge => "ge", CmpOp::Lt => "lt", CmpOp::Gt => "gt", }; self.binary(name, rhs, lhs_l, rhs_l) } fn const_set(&mut self, s: crate::scalar::Scalar, l: &Layout) -> Result<()> { use crate::scalar::Scalar; fn set<S: crate::WithDType + candle_metal_kernels::utils::EncoderParam>( self_: &mut MetalStorage, s: S, l: &Layout, ) -> Result<()> { let device = self_.device(); let dtype = self_.dtype; let shape = l.shape(); let el_count = shape.elem_count(); let command_buffer = device.command_buffer()?; command_buffer.set_label("const-set"); let dst = buffer_o(&self_.buffer, l, self_.dtype); match (el_count % 2, dtype, l.is_contiguous()) { (0, DType::BF16 | DType::F16, true) => { use candle_metal_kernels::unary::contiguous_tiled; let kernel_name = match dtype { DType::F16 => contiguous_tiled::const_set::HALF, DType::BF16 => contiguous_tiled::const_set::BFLOAT, _ => crate::bail!("internal bug in const_set"), }; candle_metal_kernels::call_const_set_contiguous_tiled( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, s, dst, ) .map_err(MetalError::from)?; } (_, _, true) => { use candle_metal_kernels::unary::contiguous; let kernel_name = match dtype { DType::F16 => contiguous::const_set::HALF, DType::BF16 => contiguous::const_set::BFLOAT, DType::F32 => contiguous::const_set::FLOAT, DType::I64 => contiguous::const_set::I64, DType::U32 => contiguous::const_set::U32, DType::U8 => contiguous::const_set::U8, DType::F8E4M3 => crate::bail!("unsupported const-set f8e4m3"), DType::F64 => crate::bail!("unsupported const-set f64"), }; candle_metal_kernels::call_const_set_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, s, dst, ) .map_err(MetalError::from)?; } (_, _, false) => { use candle_metal_kernels::unary::strided; let kernel_name = match dtype { DType::F16 => strided::const_set::HALF, DType::BF16 => strided::const_set::BFLOAT, DType::F32 => strided::const_set::FLOAT, DType::I64 => strided::const_set::I64, DType::U32 => strided::const_set::U32, DType::U8 => strided::const_set::U8, DType::F8E4M3 => crate::bail!("unsupported const-set f8e4m3"), DType::F64 => crate::bail!("unsupported const-set f64"), }; candle_metal_kernels::call_const_set_strided( &device.device, &command_buffer, &device.kernels, kernel_name, l.dims(), s, l.stride(), dst, ) .map_err(MetalError::from)?; } } Ok(()) } match (self.dtype, s) { (DType::U8, Scalar::U8(s)) => set(self, s, l), (DType::U32, Scalar::U32(s)) => set(self, s, l), (DType::I64, Scalar::I64(s)) => set(self, s, l), (DType::F16, Scalar::F16(s)) => set(self, s, l), (DType::BF16, Scalar::BF16(s)) => set(self, s, l), (DType::F32, Scalar::F32(s)) => set(self, s, l), (DType::F64, Scalar::F64(s)) => set(self, s, l), _ => crate::bail!("dtype mismatch, expected {:?}, got {:?}", self.dtype, s), } } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { let device = self.device(); let shape = layout.shape(); let el_count = shape.elem_count(); let buffer = device.new_buffer(el_count, dtype, "todtype")?; let command_buffer = device.command_buffer()?; let src = buffer_o(&self.buffer, layout, self.dtype); if layout.is_contiguous() { let kernel_name = match (self.dtype, dtype) { (DType::U32, DType::BF16) => "cast_u32_bf16", (DType::U32, DType::F16) => "cast_u32_f16", (DType::U32, DType::F32) => "cast_u32_f32", (DType::U32, DType::I64) => "cast_u32_i64", (DType::U32, DType::U8) => "cast_u32_u8", (DType::U8, DType::BF16) => "cast_u8_bf16", (DType::U8, DType::F16) => "cast_u8_f16", (DType::U8, DType::F32) => "cast_u8_f32", (DType::U8, DType::I64) => "cast_u8_i64", (DType::U8, DType::U32) => "cast_u8_u32", (DType::F32, DType::BF16) => "cast_f32_bf16", (DType::F32, DType::F16) => "cast_f32_f16", (DType::F32, DType::I64) => "cast_f32_i64", (DType::F32, DType::U32) => "cast_f32_u32", (DType::F32, DType::U8) => "cast_f32_u8", (DType::I64, DType::BF16) => "cast_i64_bf16", (DType::I64, DType::F16) => "cast_i64_f16", (DType::I64, DType::F32) => "cast_i64_f32", (DType::I64, DType::U32) => "cast_i64_u32", (DType::I64, DType::U8) => "cast_i64_u8", (DType::F16, DType::BF16) => "cast_f16_bf16", (DType::F16, DType::F32) => "cast_f16_f32", (DType::F16, DType::I64) => "cast_f16_i64", (DType::F16, DType::U32) => "cast_f16_u32", (DType::F16, DType::U8) => "cast_f16_u8", (DType::BF16, DType::F16) => "cast_bf16_f16", (DType::BF16, DType::F32) => "cast_bf16_f32", (DType::BF16, DType::I64) => "cast_bf16_i64", (DType::BF16, DType::U32) => "cast_bf16_u32", (DType::BF16, DType::U8) => "cast_bf16_u8", (left, right) => { crate::bail!("Metal contiguous to_dtype {left:?} {right:?} not implemented") } }; candle_metal_kernels::call_cast_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } else { let kernel_name = match (self.dtype, dtype) { (DType::BF16, DType::F16) => "cast_bf16_f16_strided", (DType::BF16, DType::F32) => "cast_bf16_f32_strided", (DType::BF16, DType::I64) => "cast_bf16_i64_strided", (DType::BF16, DType::U32) => "cast_bf16_u32_strided", (DType::BF16, DType::U8) => "cast_bf16_u8_strided", (DType::F16, DType::BF16) => "cast_f16_bf16_strided", (DType::F16, DType::F32) => "cast_f16_f32_strided", (DType::F16, DType::I64) => "cast_f16_i64_strided", (DType::F16, DType::U32) => "cast_f16_u32_strided", (DType::F16, DType::U8) => "cast_f16_u8_strided", (DType::F32, DType::BF16) => "cast_f32_bf16_strided", (DType::F32, DType::F16) => "cast_f32_f16_strided", (DType::F32, DType::I64) => "cast_f32_i64_strided", (DType::F32, DType::U32) => "cast_f32_u32_strided", (DType::F32, DType::U8) => "cast_f32_u8_strided", (DType::I64, DType::F32) => "cast_i64_f32_strided", (DType::I64, DType::BF16) => "cast_i64_bf16_strided", (DType::I64, DType::F16) => "cast_i64_f16_strided", (DType::I64, DType::U32) => "cast_i64_u32_strided", (DType::I64, DType::U8) => "cast_i64_u8_strided", (DType::U32, DType::BF16) => "cast_u32_bf16_strided", (DType::U32, DType::F16) => "cast_u32_f16_strided", (DType::U32, DType::F32) => "cast_u32_f32_strided", (DType::U32, DType::I64) => "cast_u32_i64_strided", (DType::U32, DType::U8) => "cast_u32_u8_strided", (DType::U8, DType::BF16) => "cast_u8_bf16_strided", (DType::U8, DType::F16) => "cast_u8_f16_strided", (DType::U8, DType::F32) => "cast_u8_f32_strided", (DType::U8, DType::I64) => "cast_u8_i64_strided", (DType::U8, DType::U32) => "cast_u8_u32_strided", (left, right) => { crate::bail!("Metal strided to_dtype {left:?} {right:?} not implemented") } }; candle_metal_kernels::call_cast_strided( &device.device, &command_buffer, &device.kernels, kernel_name, layout.dims(), src, layout.stride(), &buffer, ) .map_err(MetalError::from)?; } command_buffer.set_label("to_dtype"); Ok(Self::new(buffer, device.clone(), el_count, dtype)) } fn unary_impl<B: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { let device = self.device(); let dtype = self.dtype; let shape = layout.shape(); let el_count = shape.elem_count(); let buffer = device.new_buffer(el_count, dtype, B::KERNEL)?; let command_buffer = device.command_buffer()?; command_buffer.set_label(B::KERNEL); let src = buffer_o(&self.buffer, layout, self.dtype); match (el_count % 2, dtype, layout.is_contiguous()) { (0, DType::BF16 | DType::F16, true) => { use candle_metal_kernels::unary::contiguous_tiled; let kernel_name = match (B::KERNEL, dtype) { ("uabs", DType::F16) => contiguous_tiled::abs::HALF, ("uabs", DType::F32) => contiguous_tiled::abs::FLOAT, ("uabs", DType::BF16) => contiguous_tiled::abs::BFLOAT, ("uceil", DType::F16) => contiguous_tiled::ceil::HALF, ("uceil", DType::F32) => contiguous_tiled::ceil::FLOAT, ("uceil", DType::BF16) => contiguous_tiled::ceil::BFLOAT, ("ucos", DType::F16) => contiguous_tiled::cos::HALF, ("ucos", DType::F32) => contiguous_tiled::cos::FLOAT, ("ucos", DType::BF16) => contiguous_tiled::cos::BFLOAT, ("uerf", DType::F16) => contiguous_tiled::erf::HALF, ("uerf", DType::F32) => contiguous_tiled::erf::FLOAT, ("uerf", DType::BF16) => contiguous_tiled::erf::BFLOAT, ("uexp", DType::F16) => contiguous_tiled::exp::HALF, ("uexp", DType::F32) => contiguous_tiled::exp::FLOAT, ("uexp", DType::BF16) => contiguous_tiled::exp::BFLOAT, ("ufloor", DType::F16) => contiguous_tiled::floor::HALF, ("ufloor", DType::F32) => contiguous_tiled::floor::FLOAT, ("ufloor", DType::BF16) => contiguous_tiled::floor::BFLOAT, ("ugelu_erf", DType::F16) => contiguous_tiled::gelu_erf::HALF, ("ugelu_erf", DType::F32) => contiguous_tiled::gelu_erf::FLOAT, ("ugelu_erf", DType::BF16) => contiguous_tiled::gelu_erf::BFLOAT, ("ugelu", DType::F16) => contiguous_tiled::gelu::HALF, ("ugelu", DType::F32) => contiguous_tiled::gelu::FLOAT, ("ugelu", DType::BF16) => contiguous_tiled::gelu::BFLOAT, ("ulog", DType::F16) => contiguous_tiled::log::HALF, ("ulog", DType::F32) => contiguous_tiled::log::FLOAT, ("ulog", DType::BF16) => contiguous_tiled::log::BFLOAT, ("uneg", DType::F16) => contiguous_tiled::neg::HALF, ("uneg", DType::F32) => contiguous_tiled::neg::FLOAT, ("uneg", DType::BF16) => contiguous_tiled::neg::BFLOAT, ("urecip", DType::F16) => contiguous_tiled::recip::HALF, ("urecip", DType::F32) => contiguous_tiled::recip::FLOAT, ("urecip", DType::BF16) => contiguous_tiled::recip::BFLOAT, ("urelu", DType::F16) => contiguous_tiled::relu::HALF, ("urelu", DType::F32) => contiguous_tiled::relu::FLOAT, ("urelu", DType::BF16) => contiguous_tiled::relu::BFLOAT, ("uround", DType::F16) => contiguous_tiled::round::HALF, ("uround", DType::F32) => contiguous_tiled::round::FLOAT, ("uround", DType::BF16) => contiguous_tiled::round::BFLOAT, ("usilu", DType::F16) => contiguous_tiled::silu::HALF, ("usilu", DType::F32) => contiguous_tiled::silu::FLOAT, ("usilu", DType::BF16) => contiguous_tiled::silu::BFLOAT, ("usin", DType::F16) => contiguous_tiled::sin::HALF, ("usin", DType::F32) => contiguous_tiled::sin::FLOAT, ("usin", DType::BF16) => contiguous_tiled::sin::BFLOAT, ("usqr", DType::F16) => contiguous_tiled::sqr::HALF, ("usqr", DType::F32) => contiguous_tiled::sqr::FLOAT, ("usqr", DType::BF16) => contiguous_tiled::sqr::BFLOAT, ("usqrt", DType::F16) => contiguous_tiled::sqrt::HALF, ("usqrt", DType::F32) => contiguous_tiled::sqrt::FLOAT, ("usqrt", DType::BF16) => contiguous_tiled::sqrt::BFLOAT, ("utanh", DType::F16) => contiguous_tiled::tanh::HALF, ("utanh", DType::F32) => contiguous_tiled::tanh::FLOAT, ("utanh", DType::BF16) => contiguous_tiled::tanh::BFLOAT, ("usign", DType::F16) => contiguous_tiled::sign::HALF, ("usign", DType::F32) => contiguous_tiled::sign::FLOAT, ("usign", DType::BF16) => contiguous_tiled::sign::BFLOAT, ("usign", DType::I64) => contiguous_tiled::sign::I64, (name, dtype) => { crate::bail!( "Metal contiguous_tiled unary {name} {dtype:?} not implemented" ) } }; candle_metal_kernels::call_unary_contiguous_tiled( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, true) => { use candle_metal_kernels::unary::contiguous; let kernel_name = match (B::KERNEL, dtype) { ("uabs", DType::F16) => contiguous::abs::HALF, ("uabs", DType::F32) => contiguous::abs::FLOAT, ("uabs", DType::BF16) => contiguous::abs::BFLOAT, ("uceil", DType::F16) => contiguous::ceil::HALF, ("uceil", DType::F32) => contiguous::ceil::FLOAT, ("uceil", DType::BF16) => contiguous::ceil::BFLOAT, ("ucos", DType::F16) => contiguous::cos::HALF, ("ucos", DType::F32) => contiguous::cos::FLOAT, ("ucos", DType::BF16) => contiguous::cos::BFLOAT, ("uerf", DType::F16) => contiguous::erf::HALF, ("uerf", DType::F32) => contiguous::erf::FLOAT, ("uerf", DType::BF16) => contiguous::erf::BFLOAT, ("uexp", DType::F16) => contiguous::exp::HALF, ("uexp", DType::F32) => contiguous::exp::FLOAT, ("uexp", DType::BF16) => contiguous::exp::BFLOAT, ("ufloor", DType::F16) => contiguous::floor::HALF, ("ufloor", DType::F32) => contiguous::floor::FLOAT, ("ufloor", DType::BF16) => contiguous::floor::BFLOAT, ("ugelu_erf", DType::F16) => contiguous::gelu_erf::HALF, ("ugelu_erf", DType::F32) => contiguous::gelu_erf::FLOAT, ("ugelu_erf", DType::BF16) => contiguous::gelu_erf::BFLOAT, ("ugelu", DType::F16) => contiguous::gelu::HALF, ("ugelu", DType::F32) => contiguous::gelu::FLOAT, ("ugelu", DType::BF16) => contiguous::gelu::BFLOAT, ("ulog", DType::F16) => contiguous::log::HALF, ("ulog", DType::F32) => contiguous::log::FLOAT, ("ulog", DType::BF16) => contiguous::log::BFLOAT, ("uneg", DType::F16) => contiguous::neg::HALF, ("uneg", DType::F32) => contiguous::neg::FLOAT, ("uneg", DType::BF16) => contiguous::neg::BFLOAT, ("urecip", DType::F16) => contiguous::recip::HALF, ("urecip", DType::F32) => contiguous::recip::FLOAT, ("urecip", DType::BF16) => contiguous::recip::BFLOAT, ("urelu", DType::F16) => contiguous::relu::HALF, ("urelu", DType::F32) => contiguous::relu::FLOAT, ("urelu", DType::BF16) => contiguous::relu::BFLOAT, ("uround", DType::F16) => contiguous::round::HALF, ("uround", DType::F32) => contiguous::round::FLOAT, ("uround", DType::BF16) => contiguous::round::BFLOAT, ("usilu", DType::F16) => contiguous::silu::HALF, ("usilu", DType::F32) => contiguous::silu::FLOAT, ("usilu", DType::BF16) => contiguous::silu::BFLOAT, ("usin", DType::F16) => contiguous::sin::HALF, ("usin", DType::F32) => contiguous::sin::FLOAT, ("usin", DType::BF16) => contiguous::sin::BFLOAT, ("usqr", DType::F16) => contiguous::sqr::HALF, ("usqr", DType::F32) => contiguous::sqr::FLOAT, ("usqr", DType::BF16) => contiguous::sqr::BFLOAT, ("usqrt", DType::F16) => contiguous::sqrt::HALF, ("usqrt", DType::F32) => contiguous::sqrt::FLOAT, ("usqrt", DType::BF16) => contiguous::sqrt::BFLOAT, ("utanh", DType::F16) => contiguous::tanh::HALF, ("utanh", DType::F32) => contiguous::tanh::FLOAT, ("utanh", DType::BF16) => contiguous::tanh::BFLOAT, ("usign", DType::F16) => contiguous::sign::HALF, ("usign", DType::F32) => contiguous::sign::FLOAT, ("usign", DType::BF16) => contiguous::sign::BFLOAT, ("usign", DType::I64) => contiguous::sign::I64, (name, dtype) => { crate::bail!("Metal contiguous unary {name} {dtype:?} not implemented") } }; candle_metal_kernels::call_unary_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, false) => { use candle_metal_kernels::unary::strided; let kernel_name = match (B::KERNEL, dtype) { ("ucos", DType::F32) => strided::cos::FLOAT, ("usin", DType::F32) => strided::sin::FLOAT, ("usqr", DType::F32) => strided::sqr::FLOAT, ("usqrt", DType::F32) => strided::sqrt::FLOAT, ("uneg", DType::F32) => strided::neg::FLOAT, ("uexp", DType::F32) => strided::exp::FLOAT, ("ulog", DType::F32) => strided::log::FLOAT, ("ugelu", DType::F32) => strided::gelu::FLOAT, ("ugelu_erf", DType::F32) => strided::gelu_erf::FLOAT, ("uerf", DType::F32) => strided::erf::FLOAT, ("usilu", DType::F32) => strided::silu::FLOAT, ("uabs", DType::F32) => strided::abs::FLOAT, ("uceil", DType::F32) => strided::ceil::FLOAT, ("ufloor", DType::F32) => strided::floor::FLOAT, ("urelu", DType::F32) => strided::relu::FLOAT, ("uround", DType::F32) => strided::round::FLOAT, ("utanh", DType::F32) => strided::tanh::FLOAT, ("ucos", DType::F16) => strided::cos::HALF, ("usin", DType::F16) => strided::sin::HALF, ("usqr", DType::F16) => strided::sqr::HALF, ("usqrt", DType::F16) => strided::sqrt::HALF, ("uneg", DType::F16) => strided::neg::HALF, ("uexp", DType::F16) => strided::exp::HALF, ("ulog", DType::F16) => strided::log::HALF, ("ugelu", DType::F16) => strided::gelu::HALF, ("ugelu_erf", DType::F16) => strided::gelu_erf::HALF, ("uerf", DType::F16) => strided::erf::HALF, ("usilu", DType::F16) => strided::silu::HALF, ("uabs", DType::F16) => strided::abs::HALF, ("uceil", DType::F16) => strided::ceil::HALF, ("ufloor", DType::F16) => strided::floor::HALF, ("urelu", DType::F16) => strided::relu::HALF, ("uround", DType::F16) => strided::round::HALF, ("utanh", DType::F16) => strided::tanh::HALF, ("ucos", DType::BF16) => strided::cos::BFLOAT, ("usin", DType::BF16) => strided::sin::BFLOAT, ("usqr", DType::BF16) => strided::sqr::BFLOAT, ("usqrt", DType::BF16) => strided::sqrt::BFLOAT, ("uneg", DType::BF16) => strided::neg::BFLOAT, ("uexp", DType::BF16) => strided::exp::BFLOAT, ("ulog", DType::BF16) => strided::log::BFLOAT, ("ugelu", DType::BF16) => strided::gelu::BFLOAT, ("ugelu_erf", DType::BF16) => strided::gelu_erf::BFLOAT, ("uerf", DType::BF16) => strided::erf::BFLOAT, ("usilu", DType::BF16) => strided::silu::BFLOAT, ("uabs", DType::BF16) => strided::abs::BFLOAT, ("uceil", DType::BF16) => strided::ceil::BFLOAT, ("ufloor", DType::BF16) => strided::floor::BFLOAT, ("urelu", DType::BF16) => strided::relu::BFLOAT, ("uround", DType::BF16) => strided::round::BFLOAT, ("utanh", DType::BF16) => strided::tanh::BFLOAT, (name, dtype) => { crate::bail!("Metal strided unary {name} {dtype:?} not implemented") } }; let dst = BufferOffset::zero_offset(&buffer); candle_metal_kernels::call_unary_strided( &device.device, &command_buffer, &device.kernels, kernel_name, layout.dims(), src, layout.stride(), dst, ) .map_err(MetalError::from)?; } } Ok(Self::new(buffer, device.clone(), el_count, dtype)) } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { self.binary(B::KERNEL, rhs, lhs_l, rhs_l) } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { let device = self.device.clone(); let shape = t_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); let dtype = t.dtype; let buffer = self.device.new_buffer(el, dtype, "where")?; let command_buffer = self.device.command_buffer()?; if t.dtype() != f.dtype() { crate::bail!( "Invalid where: different dtypes for values {:?} != {:?}", t.dtype(), f.dtype() ); } let name = match (self.dtype, t.dtype()) { (DType::U8, DType::F32) => "where_u8_f32", (DType::U32, DType::F32) => "where_u32_f32", (DType::U8, DType::BF16) => "where_u8_bf16", (DType::U8, DType::F16) => "where_u8_f16", (DType::U8, DType::I64) => "where_u8_i64", (DType::U8, DType::U32) => "where_u8_u32", (DType::U8, DType::U8) => "where_u8_u8", (left, right) => crate::bail!("Metal where_cond {left:?} {right:?} not implemented"), }; let src = buffer_o(&self.buffer, layout, self.dtype); let t = buffer_o(&t.buffer, t_l, t.dtype); let f = buffer_o(&f.buffer, f_l, f.dtype); candle_metal_kernels::call_where_cond_strided( &device.device, &command_buffer, &device.kernels, name, dims, src, layout.stride(), t, t_l.stride(), f, f_l.stride(), &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device, el, dtype)) } fn conv1d( &self, layout: &Layout, kernel: &Self, kernel_l: &Layout, params: &ParamsConv1D, ) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let dims = shape.dims(); let strides = layout.stride(); let stride = params.stride; let dilation = params.dilation; let padding = params.padding; let k_size = params.k_size; let l_out = (dims[2] + 2 * padding - dilation * (k_size - 1) - 1) / stride + 1; let dst_el = dims[0] * l_out * dims[1] * k_size; let dst = self .device .new_buffer(dst_el, self.dtype, "conv1d_im2col")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "im2col1d_f32", dtype => crate::bail!("Metal conv1d {dtype:?} not implemented"), }; let src = buffer_o(&self.buffer, layout, self.dtype); candle_metal_kernels::call_im2col1d_strided( &self.device.device, &command_buffer, &self.device.kernels, name, layout.shape().dims(), strides, (k_size, stride, padding, dilation), src, &dst, ) .map_err(MetalError::from)?; let col = Self { buffer: dst, device, count: dst_el, dtype: self.dtype, }; let l_out = params.l_out(); let b = params.b_size; let n = params.c_out; let k = params.k_size * params.c_in; let m = l_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, l_out, n)).transpose(1, 2)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose1d( &self, layout: &Layout, k: &Self, k_layout: &Layout, params: &ParamsConvTranspose1D, ) -> Result<Self> { const USE_COL2IM_CONV1D_TR: bool = true; let can_use_col2im = k_layout.is_contiguous() && params.dilation == 1 && params.padding == 0 && params.output_padding == 0; let l_out = params.l_out(); let dst_el = params.c_out * l_out * params.b_size; let buffer = if USE_COL2IM_CONV1D_TR && can_use_col2im { let (b_size, c_in, l_in) = layout.shape().dims3()?; let (c_in2, c_out, k_size) = k_layout.shape().dims3()?; if c_in != c_in2 { crate::bail!( "convtr1d: shape mismatch on c_in {:?} {:?}", layout.shape(), k_layout.shape() ) } let buffer = self .device .new_buffer(dst_el, self.dtype, "conv_transpose1d")?; let name = match self.dtype { DType::F32 => "col2im1d_f32", DType::U32 => "col2im1d_u32", DType::U8 => "col2im1d_u8", dtype => crate::bail!("metal col2im1d {dtype:?} not implemented"), }; let col = { // This merges the last two dimensions of the kernel together. let kernel_l_mm = Layout::new( (b_size, c_in, k_size * c_out).into(), vec![0, k_size * c_out, 1], k_layout.start_offset(), ); self.matmul( k, (b_size, l_in, c_out * k_size, c_in), &layout.transpose(1, 2)?, &kernel_l_mm, )? }; // It is important for the command buffer to be obtained *after* the matmul // kernel has run, otherwise we might use a command-buffer that has been committed // already resulting in the following error. // _status < MTLCommandBufferStatusCommitted > // -[IOGPUMetalCommandBuffer setCurrentCommandEncoder:] let command_buffer = self.device.command_buffer()?; candle_metal_kernels::call_col2im1d( &self.device.device, &command_buffer, &self.device.kernels, name, &[b_size, l_in, c_out, k_size], params.k_size, params.stride, BufferOffset::zero_offset(&col.buffer), &buffer, ) .map_err(MetalError::from)?; buffer } else { let buffer = self .device .new_buffer(dst_el, self.dtype, "conv_transpose1d")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "conv_transpose1d_f32", DType::F16 => "conv_transpose1d_f16", DType::BF16 => "conv_transpose1d_bf16", DType::U32 => "conv_transpose1d_u32", DType::U8 => "conv_transpose1d_u8", dtype => crate::bail!("Metal conv_transpose1d {dtype:?} not implemented"), }; candle_metal_kernels::call_conv_transpose1d( &self.device.device, &command_buffer, &self.device.kernels, name, params.dilation, params.stride, params.padding, params.output_padding, params.c_out, l_out, params.b_size, layout.dims(), layout.stride(), k_layout.dims(), k_layout.stride(), &self.buffer, layout.start_offset() * self.dtype.size_in_bytes(), &k.buffer, k_layout.start_offset() * k.dtype.size_in_bytes(), &buffer, ) .map_err(MetalError::from)?; buffer }; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn conv2d( &self, layout: &Layout, kernel: &Self, kernel_l: &Layout, params: &ParamsConv2D, ) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let dims = shape.dims(); let stride = params.stride; let dilation = params.dilation; let padding = params.padding; let h_k = params.k_h; let w_k = params.k_w; let h = dims[2]; let w = dims[3]; let h_out = (h + 2 * padding - dilation * (h_k - 1) - 1) / stride + 1; let w_out = (w + 2 * padding - dilation * (w_k - 1) - 1) / stride + 1; let dst_el = dims[0] * h_out * w_out * dims[1] * h_k * w_k; let dst = self .device .new_buffer(dst_el, self.dtype, "conv2d_im2col")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "im2col_f32", DType::F16 => "im2col_f16", DType::BF16 => "im2col_bf16", DType::U8 => "im2col_u8", DType::U32 => "im2col_u32", dtype => crate::bail!("Metal conv2d {dtype:?} not implemented"), }; let src = buffer_o(&self.buffer, layout, self.dtype); candle_metal_kernels::call_im2col_strided( &self.device.device, &command_buffer, &self.device.kernels, name, layout.shape().dims(), layout.stride(), (h_k, w_k, stride, padding, dilation), src, &dst, ) .map_err(MetalError::from)?; let col = Self { buffer: dst, device, count: dst_el, dtype: self.dtype, }; let h_out = params.out_h(); let w_out = params.out_w(); let b = params.b_size; let n = params.c_out; let k = params.k_h * params.k_w * params.c_in; let m = h_out * w_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, h_out, w_out, n)) .transpose(1, 2)? .transpose(1, 3)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &ParamsConvTranspose2D, ) -> Result<Self> { // Kernel shape: (c_in_k, c_out, h_k, w_k) // Input shape: (b_size, c_in, h_in, w_in) let (out_w, out_h) = (params.out_w(), params.out_h()); let dst_el = params.c_out * out_w * out_h * params.b_size; let dims = l.dims(); if dims.len() != 4 { crate::bail!("unexpected input shape for conv_transpose2d {dims:?}, expected 4") } let k_dims = kernel_l.dims(); if k_dims.len() != 4 { crate::bail!("unexpected kernel shape for conv_transpose2d {k_dims:?}, expected 4") } let buffer = self .device .new_buffer(dst_el, self.dtype, "conv_transpose2d")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "conv_transpose2d_f32", DType::F16 => "conv_transpose2d_f16", DType::BF16 => "conv_transpose2d_bf16", dtype => crate::bail!("Metal conv_transpose2d {dtype:?} not implemented"), }; candle_metal_kernels::call_conv_transpose2d( &self.device.device, &command_buffer, &self.device.kernels, name, CallConvTranspose2dCfg { dilation: params.dilation, stride: params.stride, padding: params.padding, output_padding: params.output_padding, c_out: params.c_out, out_h, out_w, b_size: params.b_size, input_dims: l.dims(), input_stride: l.stride(), kernel_dims: kernel_l.dims(), kernel_stride: kernel_l.stride(), input_offset: l.start_offset() * self.dtype.size_in_bytes(), kernel_offset: kernel_l.start_offset() * kernel.dtype.size_in_bytes(), }, &self.buffer, &kernel.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn avg_pool2d( &self, inp_l: &Layout, (w_k, h_k): (usize, usize), (w_stride, h_stride): (usize, usize), ) -> Result<Self> { let shape = inp_l.shape(); let (b_size, channels, width, height) = shape.dims4()?; let strides = inp_l.stride(); let name = match self.dtype { DType::F32 => "avg_pool2d_f32", DType::F16 => "avg_pool2d_f16", DType::BF16 => "avg_pool2d_bf16", DType::U8 => "avg_pool2d_u8", DType::U32 => "avg_pool2d_u32", dtype => crate::bail!("Metal avg_pool2d {dtype:?} not implemented"), }; let out_w = (width - w_k) / w_stride + 1; let out_h = (height - h_k) / h_stride + 1; let dst_el = out_w * out_h * b_size * channels; let buffer = self.device.new_buffer(dst_el, self.dtype, "avg_pool2d")?; let command_buffers = self.device.command_buffer()?; candle_metal_kernels::call_pool2d( &self.device.device, &command_buffers, &self.device.kernels, name, inp_l.dims(), strides, out_w, out_h, w_k, h_k, w_stride, h_stride, &self.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn max_pool2d( &self, inp_l: &Layout, (w_k, h_k): (usize, usize), (w_stride, h_stride): (usize, usize), ) -> Result<Self> { let shape = inp_l.shape(); let (b_size, channels, width, height) = shape.dims4()?; let strides = inp_l.stride(); let name = match self.dtype { DType::F32 => "max_pool2d_f32", DType::F16 => "max_pool2d_f16", DType::BF16 => "max_pool2d_bf16", DType::U8 => "max_pool2d_u8", DType::U32 => "max_pool2d_u32", dtype => crate::bail!("Metal max_pool2d {dtype:?} not implemented"), }; let out_w = (width - w_k) / w_stride + 1; let out_h = (height - h_k) / h_stride + 1; let dst_el = out_w * out_h * b_size * channels; let buffer = self.device.new_buffer(dst_el, self.dtype, "max_pool2d")?; let command_buffers = self.device.command_buffer()?; candle_metal_kernels::call_pool2d( &self.device.device, &command_buffers, &self.device.kernels, name, inp_l.dims(), strides, out_w, out_h, w_k, h_k, w_stride, h_stride, &self.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> { crate::bail!("Metal upsample_nearest1d not implemented") } fn upsample_nearest2d(&self, inp_l: &Layout, out_w: usize, out_h: usize) -> Result<Self> { // let inp = &inp.slice(inp_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let strides = inp_l.stride(); if dims.len() != 4 { crate::bail!("unexpected input shape for upsample {dims:?}") } let name = match self.dtype { DType::F32 => "upsample_nearest2d_f32", DType::F16 => "upsample_nearest2d_f16", DType::BF16 => "upsample_nearest2d_bf16", DType::U8 => "upsample_nearest2d_u8", DType::U32 => "upsample_nearest2d_u32", dtype => crate::bail!("Metal upsample_nearest2d {dtype:?} not implemented"), }; let dst_el = out_w * out_h * dims[0] * dims[1]; let buffer = self .device .new_buffer(dst_el, self.dtype, "upsample_nearest2d")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, inp_l, self.dtype); candle_metal_kernels::call_upsample_nearest_2d( &self.device.device, &command_buffer, &self.device.kernels, name, dims, strides, out_w, out_h, src, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn gather(&self, src_l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> { if !ids_l.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "gather" }.bt()); }; let ids_el = ids_l.dims()[dim]; let dst_el = ids_l.shape().elem_count(); let dtype = self.dtype; let device = self.device(); let buffer = device.new_buffer(dst_el, dtype, "gather")?; let name = match (ids.dtype, self.dtype) { (DType::U32, DType::F32) => "gather_u32_f32", (DType::U32, DType::F16) => "gather_u32_f16", (DType::U32, DType::BF16) => "gather_u32_bf16", (DType::U32, DType::U32) => "gather_u32_u32", (DType::U32, DType::I64) => "gather_u32_i64", (DType::I64, DType::F32) => "gather_i64_f32", (DType::I64, DType::F16) => "gather_i64_f16", (DType::I64, DType::BF16) => "gather_i64_bf16", (DType::I64, DType::U32) => "gather_i64_u32", (DType::I64, DType::I64) => "gather_i64_i64", (left, right) => crate::bail!("Metal gather {left:?} {right:?} not implemented"), }; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, src_l, dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_gather( &device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), ids_el, dim, src, ids, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device.clone(), dst_el, dtype)) } fn scatter_set( &mut self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<()> { if !l.is_contiguous() || !ids_l.is_contiguous() || !src_l.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "scatter" }.bt()); }; let name = match (ids.dtype, self.dtype) { (DType::U8, DType::F32) => "s_u8_f32", (DType::U8, DType::F16) => "s_u8_f16", (DType::U8, DType::BF16) => "s_u8_bf16", (DType::U32, DType::U32) => "s_u32_u32", (DType::U32, DType::F32) => "s_u32_f32", (DType::U32, DType::F16) => "s_u32_f16", (DType::U32, DType::BF16) => "s_u32_bf16", (DType::I64, DType::F32) => "s_i64_f32", (DType::I64, DType::F16) => "s_i64_f16", (DType::I64, DType::BF16) => "s_i64_bf16", _ => Err(MetalError::UnexpectedDType { msg: "scatter ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let command_buffer = self.device.command_buffer()?; let dst = buffer_o(&self.buffer, l, self.dtype); let src = buffer_o(&src.buffer, src_l, src.dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_scatter( &self.device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), l.dims(), dim, src, ids, dst, ) .map_err(MetalError::from)?; Ok(()) } fn scatter_add_set( &mut self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<()> { if !l.is_contiguous() || !ids_l.is_contiguous() || !src_l.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "scatter-add" }.bt()); }; let name = match (ids.dtype, self.dtype) { (DType::U8, DType::F32) => "sa_u8_f32", (DType::U8, DType::F16) => "sa_u8_f16", (DType::U8, DType::BF16) => "sa_u8_bf16", (DType::U32, DType::U32) => "sa_u32_u32", (DType::U32, DType::F32) => "sa_u32_f32", (DType::U32, DType::F16) => "sa_u32_f16", (DType::U32, DType::BF16) => "sa_u32_bf16", (DType::I64, DType::F32) => "sa_i64_f32", (DType::I64, DType::F16) => "sa_i64_f16", (DType::I64, DType::BF16) => "sa_i64_bf16", _ => Err(MetalError::UnexpectedDType { msg: "scatter-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let command_buffer = self.device.command_buffer()?; let dst = buffer_o(&self.buffer, l, self.dtype); let src = buffer_o(&src.buffer, src_l, src.dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_scatter( &self.device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), l.dims(), dim, src, ids, dst, ) .map_err(MetalError::from)?; Ok(()) } fn index_select(&self, ids: &Self, src_l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { if !ids_l.is_contiguous() { crate::bail!("Metal index_select requires contiguous ids") } let left_size: usize = src_l.dims()[..dim].iter().product(); let right_size: usize = src_l.dims()[dim + 1..].iter().product(); let ids_el = ids_l.shape().elem_count(); let dst_el = ids_el * left_size * right_size; let dtype = self.dtype; let device = self.device(); let buffer = device.new_buffer(dst_el, dtype, "index_select")?; let name = match (ids.dtype, self.dtype) { (DType::U8, DType::U8) => "is_u8_u8", (DType::U8, DType::U32) => "is_u8_u32", (DType::U8, DType::I64) => "is_u8_i64", (DType::U8, DType::BF16) => "is_u8_bf16", (DType::U8, DType::F32) => "is_u8_f32", (DType::U8, DType::F16) => "is_u8_f16", (DType::U32, DType::U8) => "is_u32_u8", (DType::U32, DType::U32) => "is_u32_u32", (DType::U32, DType::I64) => "is_u32_i64", (DType::U32, DType::F32) => "is_u32_f32", (DType::U32, DType::F16) => "is_u32_f16", (DType::U32, DType::BF16) => "is_u32_bf16", (DType::I64, DType::U8) => "is_i64_u8", (DType::I64, DType::U32) => "is_i64_u32", (DType::I64, DType::I64) => "is_i64_i64", (DType::I64, DType::F32) => "is_i64_f32", (DType::I64, DType::F16) => "is_i64_f16", (DType::I64, DType::BF16) => "is_i64_bf16", (left, right) => { crate::bail!("Metal contiguous index_select {left:?} {right:?} not implemented") } }; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, src_l, dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_index_select( &device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), ids_el, dim, src_l.is_contiguous(), src_l.dims(), src_l.stride(), src, ids, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device.clone(), dst_el, dtype)) } fn index_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { let mut acc = self.device.zeros_impl(l.shape(), self.dtype())?; self.copy_strided_src(&mut acc, 0, l)?; if !ids_l.is_contiguous() || !src_l.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "index-add" }.bt()); }; let name = match (ids.dtype, self.dtype) { (DType::I64, DType::BF16) => "ia_i64_bf16", (DType::I64, DType::F16) => "ia_i64_f16", (DType::I64, DType::F32) => "ia_i64_f32", (DType::I64, DType::I64) => "ia_i64_i64", (DType::I64, DType::U32) => "ia_i64_u32", (DType::I64, DType::U8) => "ia_i64_u8", (DType::U32, DType::BF16) => "ia_u32_bf16", (DType::U32, DType::F16) => "ia_u32_f16", (DType::U32, DType::F32) => "ia_u32_f32", (DType::U32, DType::I64) => "ia_u32_i64", (DType::U32, DType::U32) => "ia_u32_u32", (DType::U32, DType::U8) => "ia_u32_u8", (DType::U8, DType::BF16) => "ia_u8_bf16", (DType::U8, DType::F16) => "ia_u8_f16", (DType::U8, DType::F32) => "ia_u8_f32", (DType::U8, DType::I64) => "ia_u8_i64", (DType::U8, DType::U32) => "ia_u8_u32", (DType::U8, DType::U8) => "ia_u8_u8", _ => Err(MetalError::UnexpectedDType { msg: "index-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&src.buffer, src_l, src.dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_index_add( &self.device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), l.dims(), ids_l.dims(), dim, src, ids, &acc.buffer, ) .map_err(MetalError::from)?; Ok(acc) } fn matmul( &self, rhs: &Self, (b, m, n, k): (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let buffer = self.device.new_buffer(b * m * n, self.dtype, "matmul")?; let command_buffer = self.device.command_buffer()?; command_buffer.set_label("matmul"); let dtype = match self.dtype { DType::F32 => candle_metal_kernels::GemmDType::F32, DType::F16 => candle_metal_kernels::GemmDType::F16, DType::BF16 => candle_metal_kernels::GemmDType::BF16, dtype => { return Err( MetalError::Message(format!("mlx matmul doesn't support {dtype:?}")).into(), ) } }; candle_metal_kernels::call_mlx_gemm( &self.device.device, &command_buffer, &self.device.kernels, dtype, (b, m, n, k), lhs_l.stride(), lhs_l.start_offset() * self.dtype.size_in_bytes(), &self.buffer, rhs_l.stride(), rhs_l.start_offset() * rhs.dtype.size_in_bytes(), &rhs.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new( buffer, self.device.clone(), b * m * n, self.dtype(), )) } fn copy2d( &self, dst: &mut Self, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o: usize, dst_o: usize, ) -> Result<()> { if self.dtype() != dst.dtype() { crate::bail!( "copy2d with inconsistent dtypes {:?} {:?}", self.dtype(), dst.dtype() ) } let command_buffer = self.device.command_buffer()?; if src_s == d2 && dst_s == d2 { command_buffer.set_label("copy2d_contiguous"); let blit = command_buffer.new_blit_command_encoder(); blit.set_label("copy2d_contiguous"); let src_offset = (src_o * self.dtype.size_in_bytes()) as NSUInteger; let length = (d1 * d2 * self.dtype.size_in_bytes()) as NSUInteger; let dst_offset = (dst_o * dst.dtype().size_in_bytes()) as NSUInteger; blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length); blit.end_encoding(); } else { let el_count = d1 * d2; if el_count == 0 { return Ok(()); } let kernel_name = match self.dtype { DType::F32 => candle_metal_kernels::copy2d::FLOAT, DType::F16 => candle_metal_kernels::copy2d::HALF, DType::BF16 => candle_metal_kernels::copy2d::BFLOAT, DType::I64 => candle_metal_kernels::copy2d::I64, DType::U32 => candle_metal_kernels::copy2d::U32, DType::U8 => candle_metal_kernels::copy2d::U8, dtype => crate::bail!("Metal copy2d {dtype:?} not implemented"), }; candle_metal_kernels::call_copy2d( &self.device.device, &command_buffer, &self.device.kernels, kernel_name, &self.buffer, &dst.buffer, d1, d2, src_s, dst_s, src_o * self.dtype.size_in_bytes(), dst_o * self.dtype.size_in_bytes(), ) .map_err(MetalError::from)?; command_buffer.set_label("copy2d"); } Ok(()) } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { let command_buffer = self.device.command_buffer()?; if src_l.is_contiguous() && self.dtype == dst.dtype() { command_buffer.set_label("copy_contiguous"); let blit = command_buffer.new_blit_command_encoder(); blit.set_label("copy_contiguous"); let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger; let length = (src_l.shape().elem_count() * self.dtype.size_in_bytes()) as NSUInteger; let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger; blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length); blit.end_encoding(); } else { let src_shape = src_l.shape(); let el_count = src_shape.elem_count(); if el_count == 0 { return Ok(()); } let kernel_name = match self.dtype { DType::F32 => candle_metal_kernels::unary::strided::copy::FLOAT, DType::F16 => candle_metal_kernels::unary::strided::copy::HALF, DType::BF16 => candle_metal_kernels::unary::strided::copy::BFLOAT, DType::I64 => candle_metal_kernels::unary::strided::copy::I64, DType::U32 => candle_metal_kernels::unary::strided::copy::U32, DType::U8 => candle_metal_kernels::unary::strided::copy::U8, dtype => crate::bail!("Metal copy_strided {dtype:?} not implemented"), }; let src = buffer_o(&self.buffer, src_l, self.dtype); let dst = BufferOffset { buffer: &dst.buffer, offset_in_bytes: dst_offset * dst.dtype.size_in_bytes(), }; candle_metal_kernels::call_unary_strided( &self.device.device, &command_buffer, &self.device.kernels, kernel_name, src_l.dims(), src, src_l.stride(), dst, ) .map_err(MetalError::from)?; command_buffer.set_label("copy_strided"); } Ok(()) } } impl MetalStorage { pub fn new(buffer: Arc<Buffer>, device: MetalDevice, count: usize, dtype: DType) -> Self { Self { buffer, device, count, dtype, } } pub fn buffer(&self) -> &Buffer { &self.buffer } pub fn binary( &self, op: &'static str, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let device = self.device(); let shape = lhs_l.shape(); let el_count = shape.elem_count(); let command_buffer = device.command_buffer()?; let lhs = buffer_o(&self.buffer, lhs_l, self.dtype); let rhs = buffer_o(&rhs.buffer, rhs_l, rhs.dtype); let (buffer, dtype) = if lhs_l.is_contiguous() && rhs_l.is_contiguous() && &op[..1] != "b" { use candle_metal_kernels::binary::contiguous; let (kernel_name, dtype) = match (op, self.dtype) { ("add", DType::F32) => (contiguous::add::FLOAT, self.dtype), ("sub", DType::F32) => (contiguous::sub::FLOAT, self.dtype), ("mul", DType::F32) => (contiguous::mul::FLOAT, self.dtype), ("div", DType::F32) => (contiguous::div::FLOAT, self.dtype), ("eq", DType::F32) => (contiguous::eq::FLOAT, DType::U8), ("ne", DType::F32) => (contiguous::ne::FLOAT, DType::U8), ("le", DType::F32) => (contiguous::le::FLOAT, DType::U8), ("lt", DType::F32) => (contiguous::lt::FLOAT, DType::U8), ("ge", DType::F32) => (contiguous::ge::FLOAT, DType::U8), ("gt", DType::F32) => (contiguous::gt::FLOAT, DType::U8), ("add", DType::F16) => (contiguous::add::HALF, self.dtype), ("sub", DType::F16) => (contiguous::sub::HALF, self.dtype), ("mul", DType::F16) => (contiguous::mul::HALF, self.dtype), ("div", DType::F16) => (contiguous::div::HALF, self.dtype), ("eq", DType::F16) => (contiguous::eq::HALF, DType::U8), ("ne", DType::F16) => (contiguous::ne::HALF, DType::U8), ("le", DType::F16) => (contiguous::le::HALF, DType::U8), ("lt", DType::F16) => (contiguous::lt::HALF, DType::U8), ("ge", DType::F16) => (contiguous::ge::HALF, DType::U8), ("gt", DType::F16) => (contiguous::gt::HALF, DType::U8), ("add", DType::BF16) => (contiguous::add::BFLOAT, self.dtype), ("sub", DType::BF16) => (contiguous::sub::BFLOAT, self.dtype), ("mul", DType::BF16) => (contiguous::mul::BFLOAT, self.dtype), ("div", DType::BF16) => (contiguous::div::BFLOAT, self.dtype), ("eq", DType::BF16) => (contiguous::eq::BFLOAT, DType::U8), ("ne", DType::BF16) => (contiguous::ne::BFLOAT, DType::U8), ("le", DType::BF16) => (contiguous::le::BFLOAT, DType::U8), ("lt", DType::BF16) => (contiguous::lt::BFLOAT, DType::U8), ("ge", DType::BF16) => (contiguous::ge::BFLOAT, DType::U8), ("gt", DType::BF16) => (contiguous::gt::BFLOAT, DType::U8), ("add", DType::I64) => (contiguous::add::I64, self.dtype), ("sub", DType::I64) => (contiguous::sub::I64, self.dtype), ("mul", DType::I64) => (contiguous::mul::I64, self.dtype), ("div", DType::I64) => (contiguous::div::I64, self.dtype), ("eq", DType::I64) => (contiguous::eq::I64, DType::U8), ("ne", DType::I64) => (contiguous::ne::I64, DType::U8), ("le", DType::I64) => (contiguous::le::I64, DType::U8), ("lt", DType::I64) => (contiguous::lt::I64, DType::U8), ("ge", DType::I64) => (contiguous::ge::I64, DType::U8), ("gt", DType::I64) => (contiguous::gt::I64, DType::U8), ("add", DType::U32) => (contiguous::add::U32, self.dtype), ("sub", DType::U32) => (contiguous::sub::U32, self.dtype), ("mul", DType::U32) => (contiguous::mul::U32, self.dtype), ("div", DType::U32) => (contiguous::div::U32, self.dtype), ("eq", DType::U32) => (contiguous::eq::U32, DType::U8), ("ne", DType::U32) => (contiguous::ne::U32, DType::U8), ("le", DType::U32) => (contiguous::le::U32, DType::U8), ("lt", DType::U32) => (contiguous::lt::U32, DType::U8), ("ge", DType::U32) => (contiguous::ge::U32, DType::U8), ("gt", DType::U32) => (contiguous::gt::U32, DType::U8), ("add", DType::U8) => (contiguous::add::U8, self.dtype), ("sub", DType::U8) => (contiguous::sub::U8, self.dtype), ("mul", DType::U8) => (contiguous::mul::U8, self.dtype), ("div", DType::U8) => (contiguous::div::U8, self.dtype), ("eq", DType::U8) => (contiguous::eq::U8, DType::U8), ("ne", DType::U8) => (contiguous::ne::U8, DType::U8), ("le", DType::U8) => (contiguous::le::U8, DType::U8), ("lt", DType::U8) => (contiguous::lt::U8, DType::U8), ("ge", DType::U8) => (contiguous::ge::U8, DType::U8), ("gt", DType::U8) => (contiguous::gt::U8, DType::U8), (name, dtype) => { crate::bail!("Metal contiguous binary {name} {dtype:?} not implemented") } }; let buffer = device.new_buffer(el_count, dtype, op)?; candle_metal_kernels::call_binary_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, lhs, rhs, &buffer, ) .map_err(MetalError::from)?; (buffer, dtype) } else { use candle_metal_kernels::binary::strided; let (kernel_name, dtype) = match (op, self.dtype) { ("badd", DType::F32) => (strided::add::FLOAT, self.dtype), ("bsub", DType::F32) => (strided::sub::FLOAT, self.dtype), ("bmul", DType::F32) => (strided::mul::FLOAT, self.dtype), ("bdiv", DType::F32) => (strided::div::FLOAT, self.dtype), ("bminimum", DType::F32) => (strided::min::FLOAT, self.dtype), ("bmaximum", DType::F32) => (strided::max::FLOAT, self.dtype), ("eq", DType::F32) => (strided::eq::FLOAT, DType::U8), ("ne", DType::F32) => (strided::ne::FLOAT, DType::U8), ("le", DType::F32) => (strided::le::FLOAT, DType::U8), ("lt", DType::F32) => (strided::lt::FLOAT, DType::U8), ("ge", DType::F32) => (strided::ge::FLOAT, DType::U8), ("gt", DType::F32) => (strided::gt::FLOAT, DType::U8), ("badd", DType::F16) => (strided::add::HALF, self.dtype), ("bsub", DType::F16) => (strided::sub::HALF, self.dtype), ("bmul", DType::F16) => (strided::mul::HALF, self.dtype), ("bdiv", DType::F16) => (strided::div::HALF, self.dtype), ("bminimum", DType::F16) => (strided::min::HALF, self.dtype), ("bmaximum", DType::F16) => (strided::max::HALF, self.dtype), ("eq", DType::F16) => (strided::eq::HALF, DType::U8), ("ne", DType::F16) => (strided::ne::HALF, DType::U8), ("le", DType::F16) => (strided::le::HALF, DType::U8), ("lt", DType::F16) => (strided::lt::HALF, DType::U8), ("ge", DType::F16) => (strided::ge::HALF, DType::U8), ("gt", DType::F16) => (strided::gt::HALF, DType::U8), ("badd", DType::BF16) => (strided::add::BFLOAT, self.dtype), ("bsub", DType::BF16) => (strided::sub::BFLOAT, self.dtype), ("bmul", DType::BF16) => (strided::mul::BFLOAT, self.dtype), ("bdiv", DType::BF16) => (strided::div::BFLOAT, self.dtype), ("bminimum", DType::BF16) => (strided::min::BFLOAT, self.dtype), ("bmaximum", DType::BF16) => (strided::max::BFLOAT, self.dtype), ("eq", DType::BF16) => (strided::eq::BFLOAT, DType::U8), ("ne", DType::BF16) => (strided::ne::BFLOAT, DType::U8), ("le", DType::BF16) => (strided::le::BFLOAT, DType::U8), ("lt", DType::BF16) => (strided::lt::BFLOAT, DType::U8), ("ge", DType::BF16) => (strided::ge::BFLOAT, DType::U8), ("gt", DType::BF16) => (strided::gt::BFLOAT, DType::U8), ("badd", DType::I64) => (strided::add::I64, self.dtype), ("bsub", DType::I64) => (strided::sub::I64, self.dtype), ("bmul", DType::I64) => (strided::mul::I64, self.dtype), ("bdiv", DType::I64) => (strided::div::I64, self.dtype), ("bminimum", DType::I64) => (strided::min::I64, self.dtype), ("bmaximum", DType::I64) => (strided::max::I64, self.dtype), ("eq", DType::I64) => (strided::eq::I64, DType::U8), ("ne", DType::I64) => (strided::ne::I64, DType::U8), ("le", DType::I64) => (strided::le::I64, DType::U8), ("lt", DType::I64) => (strided::lt::I64, DType::U8), ("ge", DType::I64) => (strided::ge::I64, DType::U8), ("gt", DType::I64) => (strided::gt::I64, DType::U8), ("badd", DType::U32) => (strided::add::U32, self.dtype), ("bsub", DType::U32) => (strided::sub::U32, self.dtype), ("bmul", DType::U32) => (strided::mul::U32, self.dtype), ("bdiv", DType::U32) => (strided::div::U32, self.dtype), ("bminimum", DType::U32) => (strided::min::U32, self.dtype), ("bmaximum", DType::U32) => (strided::max::U32, self.dtype), ("eq", DType::U32) => (strided::eq::U32, DType::U8), ("ne", DType::U32) => (strided::ne::U32, DType::U8), ("le", DType::U32) => (strided::le::U32, DType::U8), ("lt", DType::U32) => (strided::lt::U32, DType::U8), ("ge", DType::U32) => (strided::ge::U32, DType::U8), ("gt", DType::U32) => (strided::gt::U32, DType::U8), ("badd", DType::U8) => (strided::add::U8, self.dtype), ("bsub", DType::U8) => (strided::sub::U8, self.dtype), ("bmul", DType::U8) => (strided::mul::U8, self.dtype), ("bdiv", DType::U8) => (strided::div::U8, self.dtype), ("bminimum", DType::U8) => (strided::min::U8, self.dtype), ("bmaximum", DType::U8) => (strided::max::U8, self.dtype), ("eq", DType::U8) => (strided::eq::U8, DType::U8), ("ne", DType::U8) => (strided::ne::U8, DType::U8), ("le", DType::U8) => (strided::le::U8, DType::U8), ("lt", DType::U8) => (strided::lt::U8, DType::U8), ("ge", DType::U8) => (strided::ge::U8, DType::U8), ("gt", DType::U8) => (strided::gt::U8, DType::U8), (name, dtype) => { crate::bail!("Metal strided binary {name} {dtype:?} not implemented") } }; let buffer = device.new_buffer(el_count, dtype, op)?; candle_metal_kernels::call_binary_strided( &device.device, &command_buffer, &device.kernels, kernel_name, lhs_l.dims(), lhs, lhs_l.stride(), rhs, rhs_l.stride(), &buffer, ) .map_err(MetalError::from)?; (buffer, dtype) }; command_buffer.set_label("binary"); Ok(Self::new(buffer, device.clone(), el_count, dtype)) } pub(crate) fn to_cpu<T: Clone>(&self) -> Result<Vec<T>> { let size = (self.count * self.dtype.size_in_bytes()) as NSUInteger; let buffer = self.device.new_buffer_managed(size)?; { let command_buffer = self.device.command_buffer()?; command_buffer.set_label("to_cpu"); let blit = command_buffer.new_blit_command_encoder(); blit.set_label("blit_to_cpu"); blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, size); blit.end_encoding(); } self.device.wait_until_completed()?; Ok(read_to_vec(&buffer, self.count)) } } impl BackendDevice for MetalDevice { type Storage = MetalStorage; fn new(ordinal: usize) -> Result<Self> { let device = metal::Device::all().swap_remove(ordinal); let command_queue = device.new_command_queue(); let kernels = Arc::new(Kernels::new()); let seed = Arc::new(Mutex::new(device.new_buffer_with_data( [299792458].as_ptr() as *const c_void, 4, MTLResourceOptions::StorageModeManaged, ))); let commands = device::Commands::new(command_queue)?; Ok(Self { id: DeviceId::new(), device, commands: Arc::new(RwLock::new(commands)), buffers: Arc::new(RwLock::new(HashMap::new())), kernels, seed, }) } fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Metal { gpu_id: self.registry_id() as usize, } } fn same_device(&self, rhs: &Self) -> bool { self.id == rhs.id } unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> { let buffer = self.new_buffer(shape.elem_count(), dtype, "alloc-uninit")?; Ok(MetalStorage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> { let size = shape.elem_count() * dtype.size_in_bytes(); let buffer = self.allocate_zeros(size)?; Ok(MetalStorage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn storage_from_slice<T: crate::WithDType>(&self, s: &[T]) -> Result<Self::Storage> { let (count, buffer) = match T::cpu_storage_ref(s) { CpuStorageRef::U8(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::U32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::I64(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::BF16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::F16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::F32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::F64(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::F8E4M3(_) => crate::bail!("Metal device does not yet support F8E4M3."), }; Ok(Self::Storage::new(buffer?, self.clone(), count, T::DTYPE)) } fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<Self::Storage> { let (count, buffer) = match storage { CpuStorage::U8(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::U32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::I64(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::BF16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::F16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::F32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::F64(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::F8E4M3(_) => crate::bail!("Metal device does not yet support F8E4M3."), }; Ok(Self::Storage::new( buffer?, self.clone(), count, storage.dtype(), )) } fn storage_from_cpu_storage_owned(&self, storage: CpuStorage) -> Result<Self::Storage> { self.storage_from_cpu_storage(&storage) } fn rand_uniform( &self, shape: &Shape, dtype: DType, min: f64, max: f64, ) -> Result<Self::Storage> { let name = match dtype { DType::F32 => "rand_uniform_f32", DType::F16 => "rand_uniform_f16", DType::BF16 => "rand_uniform_bf16", dtype => crate::bail!("rand_uniform not implemented for {dtype:?}"), }; let buffer = self.new_buffer(shape.elem_count(), dtype, "rand_uniform")?; let command_buffer = self.command_buffer()?; candle_metal_kernels::call_random_uniform( &self.device, &command_buffer, &self.kernels, name, min as f32, max as f32, shape.elem_count(), &self.seed.lock().unwrap(), &buffer, ) .map_err(MetalError::from)?; Ok(Self::Storage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn rand_normal( &self, shape: &Shape, dtype: DType, mean: f64, stddev: f64, ) -> Result<Self::Storage> { let name = match dtype { DType::F32 => "rand_normal_f32", DType::F16 => "rand_normal_f16", DType::BF16 => "rand_normal_bf16", dtype => crate::bail!("rand_uniform not implemented for {dtype:?}"), }; let buffer = self.new_buffer(shape.elem_count(), dtype, "rand_normal")?; let command_buffer = self.command_buffer()?; candle_metal_kernels::call_random_normal( &self.device, &command_buffer, &self.kernels, name, mean as f32, stddev as f32, shape.elem_count(), &self.seed.lock().unwrap(), &buffer, ) .map_err(MetalError::from)?; Ok(Self::Storage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn set_seed(&self, seed: u64) -> Result<()> { let seed: u32 = seed.try_into().map_err(|_| { MetalError::Message("Metal seed must be less than or equal to u32::MAX".to_string()) })?; let seed_buffer = self.seed.try_lock().map_err(MetalError::from)?; let contents = seed_buffer.contents(); unsafe { std::ptr::copy([seed].as_ptr(), contents as *mut u32, 1); } seed_buffer.did_modify_range(metal::NSRange::new(0, 4)); Ok(()) } fn synchronize(&self) -> Result<()> { self.wait_until_completed() } } fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> { let ptr = buffer.contents() as *const T; assert!(!ptr.is_null()); let slice = unsafe { std::slice::from_raw_parts(ptr, n) }; slice.to_vec() }
candle/candle-core/src/metal_backend/mod.rs/0
{ "file_path": "candle/candle-core/src/metal_backend/mod.rs", "repo_id": "candle", "token_count": 54045 }
26