text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
@torch.no_grad() def forward(self, position_ids: torch.Tensor = None, past_key_values_length: int = 0): bsz, seq_len = position_ids.size() position_ids += self.offset # Expand embeddings if needed. `position_ids.max()` is NOT used to keep torch.fx compatibility. max_pos = 2 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
10,550
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
class XGLMAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel"""
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size()
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape)
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 if attn_weights.dtype == torch.float16: attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" )
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value
10,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
class XGLMDecoderLayer(nn.Module): def __init__(self, config: XGLMConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = XGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout if config.add_cross_attention: self.encoder_attn = XGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim)
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`):
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
hidden_states = self.self_attn_layer_norm(hidden_states)
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states)
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs
10,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
class XGLMPreTrainedModel(PreTrainedModel): config_class = XGLMConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["XGLMDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
10,553
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
class XGLMModel(XGLMPreTrainedModel): """ Transformer decoder consisting of *config.num_layers* layers. Each layer is a [`XGLMDecoderLayer`] Args: config: XGLMConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: XGLMConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = XGLMScaledWordEmbedding( config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale )
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
self.embed_positions = XGLMSinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, config.pad_token_id, ) self.layers = nn.ModuleList([XGLMDecoderLayer(config) for _ in range(config.num_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
@add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None,
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
if position_ids is None: position_ids = torch.arange( past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=input_ids.device if input_ids is not None else inputs_embeds.device, ) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] )
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
hidden_states = inputs_embeds + self.embed_positions(position_ids, past_key_values_length) hidden_states = nn.functional.dropout(hidden_states, p=float(self.dropout), training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache =" " False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
past_key_value = past_key_values[idx] if past_key_values is not None else None
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None),
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0]
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,)
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, )
10,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
class XGLMForCausalLM(XGLMPreTrainedModel, GenerationMixin): base_model_prefix = "model" _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = XGLMModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings
10,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
@add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None,
10,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """
10,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
10,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0])
10,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
loss = None if labels is not None: # shift labels and add a pad token to the end shift_labels = labels.new_zeros(labels.shape) shift_labels[:, :-1] = labels[:, 1:].clone() shift_labels[:, -1] = self.config.pad_token_id loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), shift_labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
10,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
@staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
10,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
class BitLinear(nn.Module): def __init__(self, in_features: int, out_features: int, bias: bool, device=None, dtype=None): super().__init__() self.dtype = dtype self.in_features = in_features self.out_features = out_features self.register_buffer( "weight", torch.zeros( (out_features // VALUES_PER_ITEM, in_features), dtype=torch.uint8, device=device, ), ) self.register_buffer( "weight_scale", torch.ones( (1), dtype=dtype, device=device, ), ) if bias: self.register_buffer("bias", torch.zeros((out_features), dtype=dtype, device=device)) else: self.bias = None
10,556
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/bitnet.py
@torch.compile def activation_quant(self, input, num_bits=8): """ Activation function : Performs symmetric, per-token quantization on the input activations. Parameters: ----------- x : torch.Tensor Input activations to be quantized. num_bits : int, optional (default=8) Number of bits to use for quantization, determining the quantization range. Returns: -------- result : torch.Tensor Quantized activation tensor, with values mapped to an `int8` range. scale : torch.Tensor The per-channel scaling factors used to quantize the tensor. """ Qn = -(2 ** (num_bits - 1)) Qp = 2 ** (num_bits - 1) - 1 scale = Qp / input.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5) result = (input * scale).round().clamp(Qn, Qp) return result.to(torch.int8), scale
10,556
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/bitnet.py
@torch.compile def post_quant_process(self, input, input_scale, weight_scale): out = input / (input_scale * weight_scale) return out def forward(self, input): w = self.weight w_quant = unpack_weights(w, dtype=self.dtype) input_quant, input_scale = self.activation_quant(input) y = F.linear(input_quant.to(self.dtype), w_quant) y = self.post_quant_process(y, self.weight_scale, input_scale) if self.bias is not None: y += self.bias.view(1, -1).expand_as(y) return y
10,556
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/bitnet.py
class HfDeepSpeedConfig(DeepSpeedConfig): """ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. A `weakref` of this object is stored in the module's globals to be able to access the config from areas where things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore it's important that this object remains alive while the program is still running. [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic the DeepSpeed configuration is not modified in any way. Args: config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. """
10,557
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
def __init__(self, config_file_or_dict): # set global weakref object set_hf_deepspeed_config(self) dep_version_check("accelerate") dep_version_check("deepspeed") super().__init__(config_file_or_dict)
10,557
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig): """ The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the same lifespan as the latter. """ def __init__(self, config_file_or_dict): super().__init__(config_file_or_dict) self._dtype = None self.mismatches = [] def dtype(self): if self._dtype is None: raise ValueError("trainer_config_process() wasn't called yet to tell dtype") return self._dtype def is_auto(self, ds_key_long): val = self.get_value(ds_key_long) if val is None: return False else: return val == "auto" def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True): """ A utility method that massages the config file and can optionally verify that the values match. 1. Replace "auto" values with `TrainingArguments` value.
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer config values and if mismatched add the entry to `self.mismatched` - will assert during `trainer_config_finalize` for one or more mismatches. """ config, ds_key = self.find_config_node(ds_key_long) if config is None: return if config.get(ds_key) == "auto": config[ds_key] = hf_val return if not must_match: return ds_val = config.get(ds_key) if ds_val is not None and ds_val != hf_val: self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}") fill_only = partialmethod(fill_match, must_match=False)
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
def trainer_config_process(self, args, auto_find_batch_size=False): """ Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object creation. """ # DeepSpeed does: # train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps self.fill_match( "train_micro_batch_size_per_gpu", args.per_device_train_batch_size, "per_device_train_batch_size", not auto_find_batch_size, ) self.fill_match( "gradient_accumulation_steps", args.gradient_accumulation_steps, "gradient_accumulation_steps", ) self.fill_match( "train_batch_size", train_batch_size, "train_batch_size (calculated)", not auto_find_batch_size, )
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm")
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate") self.fill_match( "optimizer.params.betas", [args.adam_beta1, args.adam_beta2], "adam_beta1+adam_beta2", ) self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon") self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay") self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate") # total_num_steps - will get set in trainer_config_finalize # fp16 if args.fp16 or args.fp16_full_eval: fp16_backend = "apex" if args.fp16_backend == "apex" else "amp" else: fp16_backend = None
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
if args.save_on_each_node: # deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True self.config["checkpoint"] = self.config.get("checkpoint", {}) self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node # amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set # any here unless the user did the work self.fill_match( "fp16.enabled", ((args.fp16 or args.fp16_full_eval) and fp16_backend == "amp"), "fp16|fp16_full_eval+fp16_backend(amp)", ) # apex: delegates amp work to apex (which needs to be available), but it cannot be used with any # ZeRO features self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)") self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level")
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval") # deepspeed's default mode is fp16 unless there is a config that says differently if self.is_true("bf16.enabled"): self._dtype = torch.bfloat16 elif self.is_false("fp16.enabled"): self._dtype = torch.float32 else: self._dtype = torch.float16 def trainer_config_finalize(self, args, model, num_training_steps): """ This stage is run after we have the model and know num_training_steps. Now we can complete the configuration process. """ # zero
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
# deal with config keys that use `auto` value and rely on model's hidden_size hidden_size_based_keys = [ "zero_optimization.reduce_bucket_size", "zero_optimization.stage3_prefetch_bucket_size", "zero_optimization.stage3_param_persistence_threshold", ] hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)]
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
if len(hidden_size_auto_keys) > 0: if hasattr(model.config, "hidden_size"): hidden_size = model.config.hidden_size elif hasattr(model.config, "hidden_sizes"): # if there are many hidden sizes pick the largest one hidden_size = max(model.config.hidden_sizes) elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_size"): hidden_size = model.config.text_config.hidden_size elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_sizes"): # if there are many hidden sizes pick the largest one hidden_size = max(model.config.text_config.hidden_sizes) else: raise ValueError( "The model's config file has neither `hidden_size` nor `hidden_sizes` entry, " "therefore it's not possible to automatically fill out the following `auto` entries "
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " "`auto` values for these keys with an integer value of your choice." )
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size) if self.is_zero3(): # automatically assign the optimal config values based on model config self.fill_only( "zero_optimization.stage3_prefetch_bucket_size", int(0.9 * hidden_size * hidden_size), ) self.fill_only( "zero_optimization.stage3_param_persistence_threshold", 10 * hidden_size, ) # scheduler self.fill_match( "scheduler.params.total_num_steps", num_training_steps, "num_training_steps (calculated)", ) self.fill_match( "scheduler.params.warmup_num_steps", args.get_warmup_steps(num_training_steps), "warmup_steps", )
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
if len(self.mismatches) > 0: mismatches = "\n".join(self.mismatches) raise ValueError( "Please correct the following DeepSpeed config values that mismatch TrainingArguments" f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'." )
10,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/deepspeed.py
class HiggsLinear(torch.nn.Module): def __init__( self, in_features: int, out_features: int, num_bits: int, bias=True, dtype: torch.dtype = None, device: torch.device = None, group_size: int = 256, hadamard_size: int = 1024, ): super().__init__() self.in_features = in_features self.out_features = out_features self.num_bits = num_bits self.group_size = group_size self.hadamard_size = hadamard_size self.num_sms_packed = nn.Parameter(torch.tensor(-1, dtype=torch.int32, device=device), requires_grad=False) assert in_features % group_size == 0 assert num_bits in [2, 3, 4]
10,559
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/higgs.py
self.weight = nn.Parameter( torch.empty((out_features * num_bits // 16, in_features), dtype=torch.int16, device=device), requires_grad=False, ) self.scales = nn.Parameter( torch.empty((out_features, in_features // group_size), dtype=dtype, device=device), requires_grad=False ) self.tables = nn.Parameter(torch.empty((2**num_bits,), dtype=dtype, device=device), requires_grad=False) self.tables2 = nn.Parameter( torch.empty((2**num_bits, 2**num_bits, 2), dtype=dtype, device=device), requires_grad=False ) if bias: self.bias = nn.Parameter(torch.empty(out_features, device=device, dtype=dtype), requires_grad=False) else: self.register_parameter("bias", None) self.workspace = None # must be set externally to be reused among layers def forward(self, x): x = pad_to_block(x, [-1], self.hadamard_size)
10,559
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/higgs.py
if self.workspace is None: raise Exception("Workspace must be set before calling forward") return flute.qgemm_hadamard( x, self.weight, self.scales, self.tables, self.tables2.view(dtype=torch.float32), self.workspace, self.num_bits, self.group_size, self.hadamard_size, )
10,559
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/higgs.py
class PeftAdapterMixin: """ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT library: https://huggingface.co/docs/peft/index Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods that anyone can load, train and run with this mixin class: - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3 - AdaLora: https://arxiv.org/abs/2303.10512 Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable" into a torch module. For using these methods, please refer to the usage guide of PEFT library. With this mixin, if the correct PEFT version is installed, it is possible to:
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
- Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model - Attach new adapters in the model and train them with Trainer or by your own. - Attach multiple adapters and iteratively activate / deactivate them - Activate / deactivate all adapters from the model. - Get the `state_dict` of the active adapter. """ _hf_peft_config_loaded = False
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
def load_adapter( self, peft_model_id: Optional[str] = None, adapter_name: Optional[str] = None, revision: Optional[str] = None, token: Optional[str] = None, device_map: Optional[str] = "auto", max_memory: Optional[str] = None, offload_folder: Optional[str] = None, offload_index: Optional[int] = None, peft_config: Dict[str, Any] = None, adapter_state_dict: Optional[Dict[str, "torch.Tensor"]] = None, low_cpu_mem_usage: bool = False, is_trainable: bool = False, adapter_kwargs: Optional[Dict[str, Any]] = None, ) -> None: """ Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft Requires peft as a backend to load the adapter weights.
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
Args: peft_model_id (`str`, *optional*): The identifier of the model to look for on the Hub, or a local path to the saved adapter config file and adapter weights. adapter_name (`str`, *optional*): The adapter name to use. If not set, will use the default adapter. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip>
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
token (`str`, `optional`): Whether to use authentication token to load the remote folder. Useful to load private repositories that are on HuggingFace Hub. You might need to call `huggingface-cli login` and paste your tokens to cache it. device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0.
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, `optional`): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, `optional`): `offload_index` argument to be passed to `accelerate.dispatch_model` method. peft_config (`Dict[str, Any]`, *optional*): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
methods. This argument is used in case users directly pass PEFT state dicts adapter_state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the adapter to load. This argument is used in case users directly pass PEFT state dicts low_cpu_mem_usage (`bool`, *optional*, defaults to `False`): Reduce memory usage while loading the PEFT adapter. This should also speed up the loading process. Requires PEFT version 0.13.0 or higher. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. adapter_kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and `find_adapter_config_file` method. """
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
check_peft_version(min_version=MIN_PEFT_VERSION)
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# peft only supports low_cpu_mem_usage starting from v0.13.0 peft_load_kwargs = {} if low_cpu_mem_usage: min_version_lcmu = "0.13.0" if version.parse(importlib.metadata.version("peft")) >= version.parse(min_version_lcmu): peft_load_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage else: raise ValueError( "The version of PEFT you are using does not support `low_cpu_mem_usage` yet, " f"please install PEFT >= {min_version_lcmu}." ) adapter_name = adapter_name if adapter_name is not None else "default" if adapter_kwargs is None: adapter_kwargs = {} from peft import PeftConfig, inject_adapter_in_model, load_peft_weights from peft.utils import set_peft_model_state_dict
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
if self._hf_peft_config_loaded and adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if peft_model_id is None and (adapter_state_dict is None and peft_config is None): raise ValueError( "You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter." ) if "device" not in adapter_kwargs: device = self.device if not hasattr(self, "hf_device_map") else list(self.hf_device_map.values())[0] else: device = adapter_kwargs.pop("device") # To avoid PEFT errors later on with safetensors. if isinstance(device, torch.device): device = str(device)
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# We keep `revision` in the signature for backward compatibility if revision is not None and "revision" not in adapter_kwargs: adapter_kwargs["revision"] = revision elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]: logger.error( "You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. " "The one in `adapter_kwargs` will be used." ) # Override token with adapter_kwargs' token if "token" in adapter_kwargs: token = adapter_kwargs.pop("token") if peft_config is None: adapter_config_file = find_adapter_config_file( peft_model_id, token=token, **adapter_kwargs, )
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
if adapter_config_file is None: raise ValueError( f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the " "adapter model." ) peft_config = PeftConfig.from_pretrained( peft_model_id, token=token, **adapter_kwargs, ) peft_config.inference_mode = not is_trainable # Create and add fresh new adapters into the model. inject_adapter_in_model(peft_config, self, adapter_name, **peft_load_kwargs) if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True if peft_model_id is not None: adapter_state_dict = load_peft_weights(peft_model_id, token=token, device=device, **adapter_kwargs)
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility processed_adapter_state_dict = {} prefix = "base_model.model." for key, value in adapter_state_dict.items(): if key.startswith(prefix): new_key = key[len(prefix) :] else: new_key = key processed_adapter_state_dict[new_key] = value # Load state dict incompatible_keys = set_peft_model_state_dict( self, processed_adapter_state_dict, adapter_name, **peft_load_kwargs )
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
if incompatible_keys is not None: err_msg = "" origin_name = peft_model_id if peft_model_id is not None else "state_dict" # Check for unexpected keys. if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0: err_msg = ( f"Loading adapter weights from {origin_name} led to unexpected keys not found in the model: " f"{', '.join(incompatible_keys.unexpected_keys)}. " )
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# Check for missing keys. missing_keys = getattr(incompatible_keys, "missing_keys", None) if missing_keys: # Filter missing keys specific to the current adapter, as missing base model keys are expected. lora_missing_keys = [k for k in missing_keys if "lora_" in k and adapter_name in k] if lora_missing_keys: err_msg += ( f"Loading adapter weights from {origin_name} led to missing keys in the model: " f"{', '.join(lora_missing_keys)}" ) if err_msg: logger.warning(err_msg) if peft_config.inference_mode: self.eval()
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# Re-dispatch model and hooks in case the model is offloaded to CPU / Disk. if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): self._dispatch_accelerate_model( device_map=device_map, max_memory=max_memory, offload_folder=offload_folder, offload_index=offload_index, ) def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> None: r""" If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the default adapter name). Args: adapter_config (`~peft.PeftConfig`): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) from peft import PeftConfig, inject_adapter_in_model adapter_name = adapter_name or "default"
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if not isinstance(adapter_config, PeftConfig): raise TypeError(f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.") # Retrieve the name or path of the model, one could also use self.config._name_or_path # but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100 adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None) inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name)
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
def set_adapter(self, adapter_name: Union[List[str], str]) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters.
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
Args: adapter_name (`Union[List[str], str]`): The name of the adapter to set. Can be also a list of strings to set multiple adapters. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") elif isinstance(adapter_name, list): missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError( f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." f" current loaded adapters are: {list(self.peft_config.keys())}" ) elif adapter_name not in self.peft_config: raise ValueError( f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}" )
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper _adapters_has_been_set = False for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): # For backward compatbility with previous PEFT versions if hasattr(module, "set_adapter"): module.set_adapter(adapter_name) else: module.active_adapter = adapter_name _adapters_has_been_set = True if not _adapters_has_been_set: raise ValueError( "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." ) def disable_adapters(self) -> None: r""" If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
Disable all adapters that are attached to the model. This leads to inferring with the base model only. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): # The recent version of PEFT need to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=False) else: module.disable_adapters = True
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
def enable_adapters(self) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Enable adapters that are attached to the model. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): # The recent version of PEFT need to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=True) else: module.disable_adapters = False
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
def active_adapters(self) -> List[str]: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters for inference) returns the list of all active adapters so that users can deal with them accordingly. For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return a single string. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): active_adapters = module.active_adapter break # For previous PEFT versions if isinstance(active_adapters, str): active_adapters = [active_adapters] return active_adapters def active_adapter(self) -> str: warnings.warn( "The `active_adapter` method is deprecated and will be removed in a future version.", FutureWarning ) return self.active_adapters()[0] def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter. If no adapter_name is passed, the active adapter is used. Args: adapter_name (`str`, *optional*): The name of the adapter to get the state dict from. If no name is passed, the active adapter is used. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft import get_peft_model_state_dict if adapter_name is None: adapter_name = self.active_adapters()[0] adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name) return adapter_state_dict
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
def _dispatch_accelerate_model( self, device_map: str, max_memory: Optional[int] = None, offload_folder: Optional[str] = None, offload_index: Optional[int] = None, ) -> None: """ Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with accelerate (i.e. with `device_map=xxx`)
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
Args: device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0.
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, *optional*): The offload_index argument to be passed to `accelerate.dispatch_model` method. """ dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_folder, **dispatch_model_kwargs, ) def delete_adapter(self, adapter_names: Union[List[str], str]) -> None: """ Delete an adapter's LoRA layers from the underlying model. Args: adapter_names (`Union[List[str], str]`): The name(s) of the adapter(s) to delete. Example:
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic" ) pipeline.delete_adapters("cinematic") ``` """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer if isinstance(adapter_names, str): adapter_names = [adapter_names]
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# Check that all adapter names are present in the config missing_adapters = [name for name in adapter_names if name not in self.peft_config] if missing_adapters: raise ValueError( f"The following adapter(s) are not present and cannot be deleted: {', '.join(missing_adapters)}" ) for adapter_name in adapter_names: for module in self.modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "delete_adapter"): module.delete_adapter(adapter_name) else: raise ValueError( "The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1" )
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
# For transformers integration - we need to pop the adapter from the config if getattr(self, "_hf_peft_config_loaded", False) and hasattr(self, "peft_config"): self.peft_config.pop(adapter_name, None) # In case all adapters are deleted, we need to delete the config # and make sure to set the flag to False if len(self.peft_config) == 0: del self.peft_config self._hf_peft_config_loaded = False
10,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/peft.py
class FbgemmFp8Linear(torch.nn.Module): def __init__(self, in_features, out_features, bias, weight_dtype=torch.float32): super().__init__() self.in_features = in_features self.out_features = out_features self.register_buffer("weight", torch.zeros((out_features, in_features), dtype=torch.float8_e4m3fn)) self.register_buffer("weight_scale", torch.zeros((out_features, 1), dtype=weight_dtype)) self.register_buffer("input_scale_ub", torch.zeros([1], dtype=torch.float), persistent=False) if bias: self.register_buffer("bias", torch.zeros((self.out_features), dtype=weight_dtype)) else: self.bias = None
10,561
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/fbgemm_fp8.py
def forward(self, x): num_tokens = None # quantize_fp8_per_row will squash the leading dimensions, so save the desired shape here output_shape = (*x.shape[:-1], -1) # x_quantized and x_scale are not necessarily on the same device as x, this is an issue. # https://github.com/pytorch/FBGEMM/blob/e08af8539c391437f447173863df0f3f6f6f1855/fbgemm_gpu/experimental/gen_ai/src/quantize/quantize.cu#L1237C3-L1237C45 x_quantized, x_scale = torch.ops.fbgemm.quantize_fp8_per_row( x.view(-1, x.shape[-1]), num_tokens, self.input_scale_ub ) # moving x_quantized, x_scale here creates glibberish output ... However, if we move the output, it works # x_quantized, x_scale = x_quantized.to(x.device), x_scale.to(x.device)
10,561
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/fbgemm_fp8.py
# The computation still happens on the device where self.weight is even if x_quantized is not on the same device as self.weight output = torch.ops.fbgemm.f8f8bf16_rowwise( x_quantized, self.weight, x_scale, self.weight_scale, use_fast_accum=True ) output = output + self.bias if self.bias is not None else output # Hacky for now, we have the output to the device of x output = output.to(x.device) output = output.reshape(output_shape) del x_quantized, x_scale return output
10,561
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/fbgemm_fp8.py
class GGUFTokenizerSkeleton: def __init__(self, dict_): for k, v in dict_.items(): setattr(self, k, v) if not hasattr(self, "merges"): if not hasattr(self, "tokens") or not hasattr(self, "scores"): raise ValueError( "tokens and scores need to be passed for a LLaMa tokenizer without merges to be instantiated." ) tokens = self.tokens scores = self.scores vocab = {t: scores[i] for i, t in enumerate(tokens)}
10,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/ggml.py
logger.warning("Merges were not in checkpoint, building merges on the fly.") merges = [] for merge, piece_score in tqdm(vocab.items()): local = [] for index in range(1, len(merge)): piece_l, piece_r = merge[:index], merge[index:] if piece_l in tokens and piece_r in tokens: local.append((piece_l, piece_r, piece_score)) local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]), reverse=True) merges.extend(local) merges = sorted(merges, key=lambda val: val[2], reverse=True) merges = [(val[0], val[1]) for val in merges] self.merges = merges else: self.merges = [tuple(merge.split(" ")) for merge in self.merges] if not hasattr(self, "scores"): self.scores = [None for _ in range(len(self.tokens))]
10,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/integrations/ggml.py