| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """PyTorch InternLM2 model.""" |
| | import copy |
| | import math |
| | import queue |
| | import threading |
| | import warnings |
| | from typing import List, Optional, Tuple, Union |
| |
|
| | import torch |
| | import torch.utils.checkpoint |
| | from einops import rearrange |
| | from PIL import Image |
| | from torch import nn |
| | from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
| | from torchvision import transforms |
| | from torchvision.transforms.functional import InterpolationMode |
| | from transformers.activations import ACT2FN |
| | from transformers.modeling_outputs import (BaseModelOutputWithPast, |
| | CausalLMOutputWithPast, |
| | SequenceClassifierOutputWithPast) |
| | from transformers.modeling_utils import PreTrainedModel |
| | from transformers.utils import (add_start_docstrings, |
| | add_start_docstrings_to_model_forward, logging, |
| | replace_return_docstrings) |
| |
|
| | try: |
| | from transformers.generation.streamers import BaseStreamer |
| | except: |
| | BaseStreamer = None |
| |
|
| | from .build_mlp import PLoRA, build_vision_projector, build_vision_tower |
| | from .configuration_internlm import InternLMConfig as InternLM2Config |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | _CONFIG_FOR_DOC = 'InternLM2Config' |
| |
|
| |
|
| | |
| | def _make_causal_mask(input_ids_shape: torch.Size, |
| | dtype: torch.dtype, |
| | device: torch.device, |
| | past_key_values_length: int = 0): |
| | """Make causal mask used for bi-directional self-attention.""" |
| | bsz, tgt_len = input_ids_shape |
| | mask = torch.full((tgt_len, tgt_len), |
| | torch.tensor(torch.finfo(dtype).min, device=device), |
| | device=device) |
| | mask_cond = torch.arange(mask.size(-1), device=device) |
| | mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) |
| | mask = mask.to(dtype) |
| |
|
| | if past_key_values_length > 0: |
| | mask = torch.cat([ |
| | torch.zeros( |
| | tgt_len, past_key_values_length, dtype=dtype, device=device), |
| | mask |
| | ], |
| | dim=-1) |
| | return mask[None, None, :, :].expand(bsz, 1, tgt_len, |
| | tgt_len + past_key_values_length) |
| |
|
| |
|
| | |
| | def _expand_mask(mask: torch.Tensor, |
| | dtype: torch.dtype, |
| | tgt_len: Optional[int] = None): |
| | """Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, |
| | src_seq_len]`.""" |
| | bsz, src_len = mask.size() |
| | tgt_len = tgt_len if tgt_len is not None else src_len |
| |
|
| | expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, |
| | src_len).to(dtype) |
| |
|
| | inverted_mask = 1.0 - expanded_mask |
| |
|
| | return inverted_mask.masked_fill( |
| | inverted_mask.to(torch.bool), |
| | torch.finfo(dtype).min) |
| |
|
| |
|
| | class InternLM2RMSNorm(nn.Module): |
| |
|
| | def __init__(self, hidden_size, eps=1e-6): |
| | """InternLM2RMSNorm is equivalent to T5LayerNorm.""" |
| | super().__init__() |
| | self.weight = nn.Parameter(torch.ones(hidden_size)) |
| | self.variance_epsilon = eps |
| |
|
| | def forward(self, hidden_states): |
| | input_dtype = hidden_states.dtype |
| | hidden_states = hidden_states.to(torch.float32) |
| | variance = hidden_states.pow(2).mean(-1, keepdim=True) |
| | hidden_states = hidden_states * torch.rsqrt(variance + |
| | self.variance_epsilon) |
| | return self.weight * hidden_states.to(input_dtype) |
| |
|
| |
|
| | class InternLM2RotaryEmbedding(nn.Module): |
| |
|
| | def __init__(self, |
| | dim, |
| | max_position_embeddings=2048, |
| | base=10000, |
| | device=None): |
| | super().__init__() |
| |
|
| | self.dim = dim |
| | self.max_position_embeddings = max_position_embeddings |
| | self.base = base |
| | inv_freq = 1.0 / ( |
| | self.base |
| | **(torch.arange(0, self.dim, 2).float().to(device) / self.dim)) |
| | self.register_buffer('inv_freq', inv_freq, persistent=False) |
| |
|
| | |
| | self._set_cos_sin_cache( |
| | seq_len=max_position_embeddings, |
| | device=self.inv_freq.device, |
| | dtype=torch.get_default_dtype()) |
| |
|
| | def _set_cos_sin_cache(self, seq_len, device, dtype): |
| | self.max_seq_len_cached = seq_len |
| | t = torch.arange( |
| | self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) |
| |
|
| | freqs = torch.einsum('i,j->ij', t, self.inv_freq) |
| | |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | self.register_buffer( |
| | 'cos_cached', emb.cos().to(dtype), persistent=False) |
| | self.register_buffer( |
| | 'sin_cached', emb.sin().to(dtype), persistent=False) |
| |
|
| | def forward(self, x, seq_len=None): |
| | |
| | if seq_len > self.max_seq_len_cached: |
| | self._set_cos_sin_cache( |
| | seq_len=seq_len, device=x.device, dtype=x.dtype) |
| |
|
| | return ( |
| | self.cos_cached[:seq_len].to(dtype=x.dtype), |
| | self.sin_cached[:seq_len].to(dtype=x.dtype), |
| | ) |
| |
|
| |
|
| | class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding): |
| | """InternLM2RotaryEmbedding extended with linear scaling. |
| | |
| | Credits to the Reddit user /u/kaiokendev |
| | """ |
| |
|
| | def __init__(self, |
| | dim, |
| | max_position_embeddings=2048, |
| | base=10000, |
| | device=None, |
| | scaling_factor=1.0): |
| | self.scaling_factor = scaling_factor |
| | super().__init__(dim, max_position_embeddings, base, device) |
| |
|
| | def _set_cos_sin_cache(self, seq_len, device, dtype): |
| | self.max_seq_len_cached = seq_len |
| | t = torch.arange( |
| | self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) |
| | t = t / self.scaling_factor |
| |
|
| | freqs = torch.einsum('i,j->ij', t, self.inv_freq) |
| | |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | self.register_buffer( |
| | 'cos_cached', emb.cos().to(dtype), persistent=False) |
| | self.register_buffer( |
| | 'sin_cached', emb.sin().to(dtype), persistent=False) |
| |
|
| |
|
| | class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding): |
| | """InternLM2RotaryEmbedding extended with Dynamic NTK scaling. |
| | |
| | Credits to the Reddit users /u/bloc97 and /u/emozilla. |
| | """ |
| |
|
| | def __init__(self, |
| | dim, |
| | max_position_embeddings=2048, |
| | base=10000, |
| | device=None, |
| | scaling_factor=1.0): |
| | self.scaling_factor = scaling_factor |
| | super().__init__(dim, max_position_embeddings, base, device) |
| |
|
| | def _set_cos_sin_cache(self, seq_len, device, dtype): |
| | self.max_seq_len_cached = seq_len |
| |
|
| | if seq_len > self.max_position_embeddings: |
| | base = self.base * ((self.scaling_factor * seq_len / |
| | self.max_position_embeddings) - |
| | (self.scaling_factor - 1))**( |
| | self.dim / (self.dim - 2)) |
| | inv_freq = 1.0 / ( |
| | base |
| | **(torch.arange(0, self.dim, 2).float().to(device) / self.dim)) |
| | self.register_buffer('inv_freq', inv_freq, persistent=False) |
| |
|
| | t = torch.arange( |
| | self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) |
| |
|
| | freqs = torch.einsum('i,j->ij', t, self.inv_freq) |
| | |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | self.register_buffer( |
| | 'cos_cached', emb.cos().to(dtype), persistent=False) |
| | self.register_buffer( |
| | 'sin_cached', emb.sin().to(dtype), persistent=False) |
| |
|
| |
|
| | def rotate_half(x): |
| | """Rotates half the hidden dims of the input.""" |
| | x1 = x[..., :x.shape[-1] // 2] |
| | x2 = x[..., x.shape[-1] // 2:] |
| | return torch.cat((-x2, x1), dim=-1) |
| |
|
| |
|
| | def apply_rotary_pos_emb(q, k, cos, sin, position_ids): |
| | |
| | cos = cos.squeeze(1).squeeze(0) |
| | sin = sin.squeeze(1).squeeze(0) |
| | cos = cos.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1) |
| | sin = sin.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1) |
| | if q.size(2) == 1: |
| | q_embed = (q * cos[:, :, -1:, :]) + ( |
| | rotate_half(q) * sin[:, :, -1:, :]) |
| | else: |
| | q_embed = (q * cos) + (rotate_half(q) * sin) |
| |
|
| | if k.size(2) == 1: |
| | k_embed = (k * cos[:, :, -1:, :]) + ( |
| | rotate_half(k) * sin[:, :, -1:, :]) |
| | else: |
| | k_embed = (k * cos) + (rotate_half(k) * sin) |
| |
|
| | return q_embed, k_embed |
| |
|
| |
|
| | class InternLM2MLP(nn.Module): |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.hidden_size = config.hidden_size |
| | self.intermediate_size = config.intermediate_size |
| | |
| | |
| | |
| |
|
| | self.w1 = PLoRA( |
| | self.hidden_size, |
| | self.intermediate_size, |
| | bias=False, |
| | lora_r=256, |
| | lora_alpha=256, |
| | lora_len=576) |
| | self.w3 = PLoRA( |
| | self.hidden_size, |
| | self.intermediate_size, |
| | bias=False, |
| | lora_r=256, |
| | lora_alpha=256, |
| | lora_len=576) |
| | self.w2 = PLoRA( |
| | self.intermediate_size, |
| | self.hidden_size, |
| | bias=False, |
| | lora_r=256, |
| | lora_alpha=256, |
| | lora_len=576) |
| |
|
| | self.act_fn = ACT2FN[config.hidden_act] |
| |
|
| | def forward(self, x, im_mask): |
| | down_proj = self.w2( |
| | self.act_fn(self.w1(x, im_mask)) * self.w3(x, im_mask), im_mask) |
| |
|
| | return down_proj |
| |
|
| |
|
| | def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
| | """This is the equivalent of torch.repeat_interleave(x, dim=1, |
| | repeats=n_rep). |
| | |
| | The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to |
| | (batch, num_attention_heads, seqlen, head_dim) |
| | """ |
| | batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
| | if n_rep == 1: |
| | return hidden_states |
| | hidden_states = hidden_states[:, :, |
| | None, :, :].expand(batch, |
| | num_key_value_heads, |
| | n_rep, slen, head_dim) |
| | return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, |
| | head_dim) |
| |
|
| |
|
| | class InternLM2Attention(nn.Module): |
| | """Multi-headed attention from 'Attention Is All You Need' paper.""" |
| |
|
| | def __init__(self, config: InternLM2Config): |
| | super().__init__() |
| | self.config = config |
| | self.hidden_size = config.hidden_size |
| | self.num_heads = config.num_attention_heads |
| | self.head_dim = self.hidden_size // self.num_heads |
| | self.num_key_value_heads = config.num_key_value_heads |
| | self.num_key_value_groups = self.num_heads // self.num_key_value_heads |
| | self.max_position_embeddings = config.max_position_embeddings |
| | self.is_causal = True |
| |
|
| | if (self.head_dim * self.num_heads) != self.hidden_size: |
| | raise ValueError( |
| | f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}' |
| | f' and `num_heads`: {self.num_heads}).') |
| |
|
| | |
| | self.wqkv = PLoRA( |
| | self.hidden_size, |
| | (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim, |
| | bias=config.bias, |
| | lora_r=256, |
| | lora_alpha=256, |
| | lora_len=576) |
| |
|
| | |
| | self.wo = PLoRA( |
| | self.num_heads * self.head_dim, |
| | self.hidden_size, |
| | bias=config.bias, |
| | lora_r=256, |
| | lora_alpha=256, |
| | lora_len=576) |
| | self._init_rope() |
| |
|
| | def _init_rope(self): |
| | if self.config.rope_scaling is None: |
| | self.rotary_emb = InternLM2RotaryEmbedding( |
| | self.head_dim, |
| | max_position_embeddings=self.max_position_embeddings, |
| | base=self.config.rope_theta, |
| | ) |
| | else: |
| | scaling_type = self.config.rope_scaling['type'] |
| | scaling_factor = self.config.rope_scaling['factor'] |
| | if scaling_type == 'dynamic': |
| | self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding( |
| | self.head_dim, |
| | max_position_embeddings=self.max_position_embeddings, |
| | base=self.config.rope_theta, |
| | scaling_factor=scaling_factor) |
| | else: |
| | raise ValueError( |
| | "Currently we only support rotary embedding's type being 'dynamic'." |
| | ) |
| | return self.rotary_emb |
| |
|
| | def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
| | return tensor.view(bsz, seq_len, self.num_heads, |
| | self.head_dim).transpose(1, 2).contiguous() |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| | output_attentions: bool = False, |
| | use_cache: bool = False, |
| | im_mask: Optional[Tuple[torch.Tensor]] = None, |
| | **kwargs, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], |
| | Optional[Tuple[torch.Tensor]]]: |
| | if 'padding_mask' in kwargs: |
| | warnings.warn( |
| | 'Passing `padding_mask` is deprecated and will be removed in v4.37. ' |
| | 'Please make sure use `attention_mask` instead.`') |
| |
|
| | bsz, q_len, _ = hidden_states.size() |
| |
|
| | qkv_states = self.wqkv(hidden_states, im_mask) |
| |
|
| | qkv_states = rearrange( |
| | qkv_states, |
| | 'b q (h gs d) -> b q h gs d', |
| | gs=2 + self.num_key_value_groups, |
| | d=self.head_dim, |
| | ) |
| |
|
| | query_states = qkv_states[..., :self.num_key_value_groups, :] |
| | query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d') |
| | key_states = qkv_states[..., -2, :] |
| | value_states = qkv_states[..., -1, :] |
| |
|
| | query_states = query_states.transpose(1, 2) |
| | key_states = key_states.transpose(1, 2) |
| | value_states = value_states.transpose(1, 2) |
| |
|
| | kv_seq_len = key_states.shape[-2] |
| | if past_key_value is not None: |
| | kv_seq_len += past_key_value[0].shape[-2] |
| | cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) |
| | query_states, key_states = apply_rotary_pos_emb( |
| | query_states, key_states, cos, sin, position_ids) |
| |
|
| | if past_key_value is not None: |
| | |
| | key_states = torch.cat([past_key_value[0], key_states], dim=2) |
| | value_states = torch.cat([past_key_value[1], value_states], dim=2) |
| |
|
| | past_key_value = (key_states, value_states) if use_cache else None |
| |
|
| | key_states = repeat_kv(key_states, self.num_key_value_groups) |
| | value_states = repeat_kv(value_states, self.num_key_value_groups) |
| |
|
| | attn_weights = torch.matmul(query_states, key_states.transpose( |
| | 2, 3)) / math.sqrt(self.head_dim) |
| |
|
| | if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): |
| | raise ValueError( |
| | f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is' |
| | f' {attn_weights.size()}') |
| |
|
| | if attention_mask is not None: |
| | if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): |
| | raise ValueError( |
| | f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}' |
| | ) |
| | attn_weights = attn_weights + attention_mask |
| |
|
| | |
| | attn_weights = nn.functional.softmax( |
| | attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) |
| | attn_output = torch.matmul(attn_weights, value_states) |
| |
|
| | if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): |
| | raise ValueError( |
| | f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' |
| | f' {attn_output.size()}') |
| |
|
| | attn_output = attn_output.transpose(1, 2).contiguous() |
| | attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) |
| |
|
| | attn_output = self.wo(attn_output, im_mask) |
| |
|
| | if not output_attentions: |
| | attn_weights = None |
| |
|
| | return attn_output, attn_weights, past_key_value |
| |
|
| |
|
| | class InternLM2FlashAttention2(InternLM2Attention): |
| | """InternLM2 flash attention module. |
| | |
| | This module inherits from `InternLM2Attention` as the weights of the module |
| | stays untouched. The only required change would be on the forward pass |
| | where it needs to correctly call the public API of flash attention and deal |
| | with padding tokens in case the input contains any of them. |
| | """ |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| | output_attentions: bool = False, |
| | use_cache: bool = False, |
| | im_mask: Optional[Tuple[torch.Tensor]] = None, |
| | **kwargs, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], |
| | Optional[Tuple[torch.Tensor]]]: |
| | |
| | if 'padding_mask' in kwargs: |
| | warnings.warn( |
| | 'Passing `padding_mask` is deprecated and will be removed in v4.37. ' |
| | 'Please make sure use `attention_mask` instead.`') |
| |
|
| | |
| | attention_mask = kwargs.pop('padding_mask') |
| |
|
| | output_attentions = False |
| |
|
| | bsz, q_len, _ = hidden_states.size() |
| |
|
| | qkv_states = self.wqkv(hidden_states, im_mask) |
| |
|
| | qkv_states = rearrange( |
| | qkv_states, |
| | 'b q (h gs d) -> b q h gs d', |
| | gs=self.num_heads + 2 * self.num_key_value_heads, |
| | d=self.head_dim, |
| | q=q_len, |
| | ) |
| |
|
| | query_states = qkv_states[..., :self.num_key_value_groups, :] |
| | query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d') |
| | key_states = qkv_states[..., -2, :] |
| | value_states = qkv_states[..., -1, :] |
| |
|
| | kv_seq_len = key_states.shape[-2] |
| | if past_key_value is not None: |
| | kv_seq_len += past_key_value[0].shape[-2] |
| |
|
| | cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) |
| |
|
| | query_states, key_states = apply_rotary_pos_emb( |
| | query_states, key_states, cos, sin, position_ids) |
| |
|
| | if past_key_value is not None: |
| | |
| | key_states = torch.cat([past_key_value[0], key_states], dim=2) |
| | value_states = torch.cat([past_key_value[1], value_states], dim=2) |
| |
|
| | past_key_value = (key_states, value_states) if use_cache else None |
| |
|
| | query_states = query_states.transpose(1, 2) |
| | key_states = key_states.transpose(1, 2) |
| | value_states = value_states.transpose(1, 2) |
| |
|
| | dropout_rate = 0.0 if not self.training else self.attention_dropout |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | input_dtype = query_states.dtype |
| | if input_dtype == torch.float32: |
| | |
| | if hasattr(self.config, '_pre_quantization_dtype'): |
| | target_dtype = self.config._pre_quantization_dtype |
| | else: |
| | target_dtype = self.q_proj.weight.dtype |
| |
|
| | logger.warning_once( |
| | f'The input hidden states seems to be silently casted in float32, this might be related to' |
| | f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back ' |
| | f'the input in {target_dtype}.') |
| |
|
| | query_states = query_states.to(target_dtype) |
| | key_states = key_states.to(target_dtype) |
| | value_states = value_states.to(target_dtype) |
| |
|
| | attn_output = self._flash_attention_forward( |
| | query_states, |
| | key_states, |
| | value_states, |
| | attention_mask, |
| | q_len, |
| | dropout=dropout_rate) |
| |
|
| | attn_output = attn_output.reshape(bsz, q_len, |
| | self.hidden_size).contiguous() |
| | attn_output = self.wo(attn_output, im_mask) |
| |
|
| | if not output_attentions: |
| | attn_weights = None |
| |
|
| | return attn_output, attn_weights, past_key_value |
| |
|
| |
|
| | class InternLM2DecoderLayer(nn.Module): |
| |
|
| | def __init__(self, config: InternLM2Config): |
| | super().__init__() |
| | self.hidden_size = config.hidden_size |
| | self.attention = ( |
| | InternLM2Attention(config=config) |
| | if not getattr(config, '_flash_attn_2_enabled', False) else |
| | InternLM2FlashAttention2(config=config)) |
| | self.feed_forward = InternLM2MLP(config) |
| | self.attention_norm = InternLM2RMSNorm( |
| | config.hidden_size, eps=config.rms_norm_eps) |
| | self.ffn_norm = InternLM2RMSNorm( |
| | config.hidden_size, eps=config.rms_norm_eps) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| | output_attentions: Optional[bool] = False, |
| | use_cache: Optional[bool] = False, |
| | im_mask: Optional[Tuple[torch.Tensor]] = None, |
| | **kwargs, |
| | ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, |
| | torch.FloatTensor]]]: |
| | """ |
| | Args: |
| | hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| | attention_mask (`torch.FloatTensor`, *optional*): |
| | attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, |
| | query_sequence_length, key_sequence_length)` if default attention is used. |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | use_cache (`bool`, *optional*): |
| | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding |
| | (see `past_key_values`). |
| | past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states |
| | """ |
| | if 'padding_mask' in kwargs: |
| | warnings.warn( |
| | 'Passing `padding_mask` is deprecated and will be removed in v4.37. ' |
| | 'Please make sure use `attention_mask` instead.`') |
| |
|
| | residual = hidden_states |
| |
|
| | hidden_states = self.attention_norm(hidden_states) |
| |
|
| | |
| | hidden_states, self_attn_weights, present_key_value = self.attention( |
| | hidden_states=hidden_states, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_value=past_key_value, |
| | output_attentions=output_attentions, |
| | use_cache=use_cache, |
| | im_mask=im_mask, |
| | **kwargs, |
| | ) |
| | hidden_states = residual + hidden_states |
| |
|
| | |
| | residual = hidden_states |
| | hidden_states = self.ffn_norm(hidden_states) |
| | hidden_states = self.feed_forward(hidden_states, im_mask) |
| | hidden_states = residual + hidden_states |
| |
|
| | outputs = (hidden_states, ) |
| |
|
| | if output_attentions: |
| | outputs += (self_attn_weights, ) |
| |
|
| | if use_cache: |
| | outputs += (present_key_value, ) |
| |
|
| | return outputs |
| |
|
| |
|
| | InternLM2_START_DOCSTRING = r""" |
| | This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| | library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| | etc.) |
| | |
| | This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| | Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| | and behavior. |
| | |
| | Parameters: |
| | config ([`InternLM2Config`]): |
| | Model configuration class with all the parameters of the model. Initializing with a config file does not |
| | load the weights associated with the model, only the configuration. Check out the |
| | [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| | """ |
| |
|
| |
|
| | @add_start_docstrings( |
| | 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.', |
| | InternLM2_START_DOCSTRING, |
| | ) |
| | class InternLM2PreTrainedModel(PreTrainedModel): |
| | config_class = InternLM2Config |
| | base_model_prefix = 'model' |
| | supports_gradient_checkpointing = True |
| | _no_split_modules = ['InternLM2DecoderLayer'] |
| | _skip_keys_device_placement = 'past_key_values' |
| | _supports_flash_attn_2 = True |
| |
|
| | def _init_weights(self, module): |
| | std = self.config.initializer_range |
| | if isinstance(module, nn.Linear): |
| | module.weight.data.normal_(mean=0.0, std=std) |
| | if module.bias is not None: |
| | module.bias.data.zero_() |
| | elif isinstance(module, nn.Embedding): |
| | module.weight.data.normal_(mean=0.0, std=std) |
| | if module.padding_idx is not None: |
| | module.weight.data[module.padding_idx].zero_() |
| |
|
| |
|
| | InternLM2_INPUTS_DOCSTRING = r""" |
| | Args: |
| | input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| | Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide |
| | it. |
| | |
| | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| | [`PreTrainedTokenizer.__call__`] for details. |
| | |
| | [What are input IDs?](../glossary#input-ids) |
| | attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | |
| | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| | [`PreTrainedTokenizer.__call__`] for details. |
| | |
| | If `past_key_values` is used, optionally only the last `input_ids` have to be input (see |
| | `past_key_values`). |
| | |
| | If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] |
| | and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more |
| | information on the default strategy. |
| | |
| | - 1 indicates the head is **not masked**, |
| | - 0 indicates the head is **masked**. |
| | position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| | config.n_positions - 1]`. |
| | |
| | [What are position IDs?](../glossary#position-ids) |
| | past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or |
| | when `config.use_cache=True`): |
| | Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| | `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape |
| | `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`. |
| | |
| | Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention |
| | blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. |
| | |
| | If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't |
| | have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` |
| | of shape `(batch_size, sequence_length)`. |
| | inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| | Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| | is useful if you want more control over how to convert `input_ids` indices into associated vectors than the |
| | model's internal embedding lookup matrix. |
| | use_cache (`bool`, *optional*): |
| | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| | `past_key_values`). |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| | tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| | more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| |
|
| |
|
| | @add_start_docstrings( |
| | 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.', |
| | InternLM2_START_DOCSTRING, |
| | ) |
| | class InternLM2Model(InternLM2PreTrainedModel): |
| | """Transformer decoder consisting of *config.num_hidden_layers* layers. |
| | Each layer is a [`InternLM2DecoderLayer`] |
| | |
| | Args: |
| | config: InternLM2Config |
| | """ |
| |
|
| | _auto_class = 'AutoModel' |
| |
|
| | def __init__(self, config: InternLM2Config): |
| | super().__init__(config) |
| | self.padding_idx = config.pad_token_id |
| | self.vocab_size = config.vocab_size |
| |
|
| | self.tok_embeddings = nn.Embedding(config.vocab_size, |
| | config.hidden_size, |
| | self.padding_idx) |
| | self.layers = nn.ModuleList([ |
| | InternLM2DecoderLayer(config) |
| | for _ in range(config.num_hidden_layers) |
| | ]) |
| | self.norm = InternLM2RMSNorm( |
| | config.hidden_size, eps=config.rms_norm_eps) |
| |
|
| | self.gradient_checkpointing = False |
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.tok_embeddings |
| |
|
| | def set_input_embeddings(self, value): |
| | self.tok_embeddings = value |
| |
|
| | |
| | def _prepare_decoder_attention_mask(self, attention_mask, input_shape, |
| | inputs_embeds, past_key_values_length): |
| | |
| | |
| | combined_attention_mask = None |
| | if input_shape[-1] > 1: |
| | combined_attention_mask = _make_causal_mask( |
| | input_shape, |
| | inputs_embeds.dtype, |
| | device=inputs_embeds.device, |
| | past_key_values_length=past_key_values_length, |
| | ) |
| |
|
| | if attention_mask is not None: |
| | |
| | expanded_attn_mask = _expand_mask( |
| | attention_mask, inputs_embeds.dtype, |
| | tgt_len=input_shape[-1]).to(inputs_embeds.device) |
| | combined_attention_mask = ( |
| | expanded_attn_mask if combined_attention_mask is None else |
| | expanded_attn_mask + combined_attention_mask) |
| |
|
| | return combined_attention_mask |
| |
|
| | @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING) |
| | def forward(self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs) -> Union[Tuple, BaseModelOutputWithPast]: |
| |
|
| | im_mask = kwargs.get('im_mask', None) |
| |
|
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else |
| | self.config.output_hidden_states) |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError( |
| | 'You cannot specify both input_ids and inputs_embeds at the same time' |
| | ) |
| | elif input_ids is not None: |
| | batch_size, seq_length = input_ids.shape[:2] |
| | elif inputs_embeds is not None: |
| | batch_size, seq_length = inputs_embeds.shape[:2] |
| | else: |
| | raise ValueError( |
| | 'You have to specify either input_ids or inputs_embeds') |
| |
|
| | seq_length_with_past = seq_length |
| | past_key_values_length = 0 |
| | if past_key_values is not None: |
| | past_key_values_length = past_key_values[0][0].shape[2] |
| | seq_length_with_past = seq_length_with_past + past_key_values_length |
| |
|
| | if position_ids is None: |
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| | position_ids = torch.arange( |
| | past_key_values_length, |
| | seq_length + past_key_values_length, |
| | dtype=torch.long, |
| | device=device) |
| | position_ids = position_ids.unsqueeze(0) |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.tok_embeddings(input_ids) |
| | im_mask = torch.zeros(inputs_embeds.shape[:2]).to( |
| | inputs_embeds.device).bool() |
| | |
| | if attention_mask is None: |
| | attention_mask = torch.ones((batch_size, seq_length_with_past), |
| | dtype=torch.bool, |
| | device=inputs_embeds.device) |
| | attention_mask = self._prepare_decoder_attention_mask( |
| | attention_mask, (batch_size, seq_length), inputs_embeds, |
| | past_key_values_length) |
| |
|
| | |
| | hidden_states = inputs_embeds |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | if use_cache: |
| | logger.warning_once( |
| | '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' |
| | ) |
| | use_cache = False |
| |
|
| | |
| | all_hidden_states = () if output_hidden_states else None |
| | all_self_attns = () if output_attentions else None |
| | next_decoder_cache = () if use_cache else None |
| |
|
| | for idx, decoder_layer in enumerate(self.layers): |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states, ) |
| |
|
| | past_key_value = past_key_values[ |
| | idx] if past_key_values is not None else None |
| |
|
| | if self.gradient_checkpointing and self.training: |
| |
|
| | def create_custom_forward(module): |
| |
|
| | def custom_forward(*inputs): |
| | |
| | return module(*inputs, output_attentions, None, |
| | im_mask) |
| |
|
| | return custom_forward |
| |
|
| | layer_outputs = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(decoder_layer), |
| | hidden_states, |
| | attention_mask, |
| | position_ids, |
| | None, |
| | ) |
| | else: |
| | layer_outputs = decoder_layer( |
| | hidden_states, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_value=past_key_value, |
| | output_attentions=output_attentions, |
| | use_cache=use_cache, |
| | im_mask=im_mask, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| |
|
| | if use_cache: |
| | next_decoder_cache += ( |
| | layer_outputs[2 if output_attentions else 1], ) |
| |
|
| | if output_attentions: |
| | all_self_attns += (layer_outputs[1], ) |
| |
|
| | hidden_states = self.norm(hidden_states) |
| |
|
| | |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states, ) |
| |
|
| | next_cache = next_decoder_cache if use_cache else None |
| | if not return_dict: |
| | return tuple( |
| | v for v in |
| | [hidden_states, next_cache, all_hidden_states, all_self_attns] |
| | if v is not None) |
| | return BaseModelOutputWithPast( |
| | last_hidden_state=hidden_states, |
| | past_key_values=next_cache, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attns, |
| | ) |
| |
|
| |
|
| | class InternLM2ForCausalLM(InternLM2PreTrainedModel): |
| | _auto_class = 'AutoModelForCausalLM' |
| |
|
| | _tied_weights_keys = ['output.weight'] |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.model = InternLM2Model(config) |
| | self.vocab_size = config.vocab_size |
| | self.output = nn.Linear( |
| | config.hidden_size, config.vocab_size, bias=False) |
| | self.debug_flag = 1 |
| | self.tokenizer = None |
| |
|
| | self.max_length = config.max_length |
| | print(f'Set max length to {self.max_length}') |
| | self.debug_flag = 1 |
| | |
| | self.post_init() |
| |
|
| | self.vit = build_vision_tower(config._name_or_path) |
| | self.vision_proj = build_vision_projector() |
| |
|
| | self.vis_processor = transforms.Compose([ |
| | transforms.Resize((336, 336), |
| | interpolation=InterpolationMode.BICUBIC), |
| | transforms.ToTensor(), |
| | transforms.Normalize((0.48145466, 0.4578275, 0.40821073), |
| | (0.26862954, 0.26130258, 0.27577711)), |
| | ]) |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, InternLM2Model): |
| | module.gradient_checkpointing = value |
| | |
| | |
| |
|
| | def get_input_embeddings(self): |
| | return self.model.tok_embeddings |
| |
|
| | def set_input_embeddings(self, value): |
| | self.model.tok_embeddings = value |
| |
|
| | def get_output_embeddings(self): |
| | return self.output |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.output = new_embeddings |
| |
|
| | def set_decoder(self, decoder): |
| | self.model = decoder |
| |
|
| | def get_decoder(self): |
| | return self.model |
| |
|
| | def encode_text(self, t, add_special_tokens=False): |
| | t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n') |
| | t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n') |
| | t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]') |
| | t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]') |
| | t = t.replace('[UNUSED_TOKEN_0]', '[UNUSED_TOKEN_145]') |
| | t = t.replace('[UNUSED_TOKEN_1]', '[UNUSED_TOKEN_145]') |
| |
|
| | text = t |
| | token = self.tokenizer( |
| | text, return_tensors='pt', |
| | add_special_tokens=add_special_tokens).input_ids.to(self.device) |
| | embs = self.model.tok_embeddings(token) |
| | return embs |
| |
|
| | def encode_img(self, image): |
| | if image is None: |
| | return None |
| | if isinstance(image, str): |
| | image = Image.open(image).convert('RGB') |
| | image = self.vis_processor(image).unsqueeze(0).to(self.device) |
| | else: |
| | assert isinstance(image, torch.Tensor) |
| |
|
| | img_embeds, atts_img, img_target = self.img2emb(image) |
| | return img_embeds |
| |
|
| | def img2emb(self, image): |
| | bs = image.shape[0] |
| | |
| | img_embeds = |
| | atts_img = torch.ones( |
| | img_embeds.size()[:-1], dtype=torch.long).to(img_embeds.device) |
| |
|
| | img_target = torch.ones( |
| | img_embeds.size()[:2], dtype=torch.long).to( |
| | img_embeds.device) * -100 |
| |
|
| | return img_embeds, atts_img, img_target |
| |
|
| | def prompt_wrap(self, img_embeds, prompt): |
| | batch_size = img_embeds.shape[0] |
| | p_before, p_after = prompt.split('<ImageHere>') |
| | p_before_tokens = self.tokenizer( |
| | p_before, return_tensors='pt', |
| | add_special_tokens=True).to(img_embeds.device) |
| |
|
| | p_before_embeds = self.model.tok_embeddings( |
| | p_before_tokens.input_ids).expand(batch_size, -1, -1) |
| | wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds], dim=1) |
| |
|
| | wrapped_atts_img = torch.ones( |
| | wrapped_img_embeds.size()[:-1], |
| | dtype=torch.long).to(img_embeds.device) |
| |
|
| | wrapped_target = torch.ones( |
| | batch_size, wrapped_img_embeds.shape[1], dtype=torch.long).to( |
| | img_embeds.device) * -100 |
| |
|
| | return wrapped_img_embeds, wrapped_atts_img, wrapped_target |
| |
|
| | def text2emb(self, text, add_special=False): |
| | if type(text) == str: |
| | new_text = [] |
| | for t in text: |
| | t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n') |
| | t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n') |
| | t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]') |
| | t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]') |
| | new_text.append(t) |
| | text = new_text |
| | elif type(text) == list: |
| | new_text = [] |
| | text_list = text |
| | for text in text_list: |
| | for t in text: |
| | t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n') |
| | t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n') |
| | t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]') |
| | t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]') |
| | new_text.append(t) |
| | text = new_text |
| | to_regress_tokens = self.tokenizer( |
| | text, |
| | return_tensors='pt', |
| | padding='longest', |
| | truncation=True, |
| | max_length=self.max_length, |
| | add_special_tokens=add_special).to(self.device) |
| |
|
| | targets = self.mask_human_targets(to_regress_tokens.input_ids) |
| | targets = targets.to(self.device) |
| |
|
| | return to_regress_tokens, targets |
| |
|
| | def interleav_wrap(self, img_list, text_list): |
| | wrap_embeds_list, wrap_atts_list = [], [] |
| | wrap_target_list, wrap_im_mask_list = [], [] |
| |
|
| | for image, text in zip(img_list, text_list): |
| | img_embeds, atts_img, img_target = self.img2emb(image) |
| | text = text[0] |
| | parts = text.split('<ImageHere>') |
| | wrap_tokens, wrap_embeds, wrap_atts, wrap_im_mask = [], [], [], [] |
| | temp_len = 0 |
| | image_nums, im_len = img_embeds.shape[:2] |
| | need_bos = True |
| | for idx, part in enumerate(parts): |
| | if len(part) > 0: |
| | part_tokens = self.tokenizer( |
| | part, |
| | return_tensors='pt', |
| | padding='longest', |
| | add_special_tokens=need_bos).to(self.device) |
| | if need_bos: |
| | need_bos = False |
| | wrap_tokens.append(part_tokens.input_ids) |
| | part_embeds = self.model.tok_embeddings( |
| | part_tokens.input_ids) |
| | wrap_embeds.append(part_embeds) |
| | wrap_atts.append(part_tokens.attention_mask) |
| | wrap_im_mask.append( |
| | torch.zeros(part_embeds.shape[:2]).to(self.device)) |
| |
|
| | temp_len += part_embeds.shape[1] |
| | if idx < image_nums: |
| | wrap_tokens.append(img_target[idx].unsqueeze(0)) |
| | wrap_embeds.append(img_embeds[idx].unsqueeze(0)) |
| | wrap_atts.append(atts_img[idx].unsqueeze(0)) |
| | wrap_im_mask.append( |
| | torch.ones_like(atts_img[idx].unsqueeze(0))) |
| |
|
| | temp_len += im_len |
| | if temp_len > self.max_length: |
| | break |
| |
|
| | wrap_tokens = torch.cat(wrap_tokens, dim=1) |
| | wrap_embeds = torch.cat(wrap_embeds, dim=1) |
| | wrap_atts = torch.cat(wrap_atts, dim=1) |
| | wrap_im_mask = torch.cat(wrap_im_mask, dim=1) |
| |
|
| | wrap_target = self.mask_human_targets(wrap_tokens).to(self.device) |
| |
|
| | wrap_embeds = wrap_embeds[:, :self.max_length].to(self.device) |
| | wrap_atts = wrap_atts[:, :self.max_length].to(self.device) |
| | wrap_target = wrap_target[:, :self.max_length].to(self.device) |
| | wrap_im_mask = wrap_im_mask[:, :self.max_length].to(self.device) |
| |
|
| | wrap_embeds_list.append(wrap_embeds) |
| | wrap_atts_list.append(wrap_atts) |
| | wrap_target_list.append(wrap_target) |
| | wrap_im_mask_list.append(wrap_im_mask) |
| |
|
| | wrap_embeds = torch.cat(wrap_embeds_list) |
| | wrap_atts = torch.cat(wrap_atts_list) |
| | wrap_target = torch.cat(wrap_target_list) |
| | wrap_im_mask = torch.cat(wrap_im_mask_list) |
| | return wrap_embeds, wrap_atts, wrap_target, wrap_im_mask |
| |
|
| | def mask_human_targets(self, input_ids, pure=False): |
| | target_batch = [] |
| | for bs in range(input_ids.shape[0]): |
| | cur_idx = 0 |
| | ids = input_ids[bs] |
| | targets = copy.deepcopy(ids) |
| | end_count = 0 |
| | last_eoa = 0 |
| | for i, temp_id in enumerate(ids): |
| | if temp_id == 92542: |
| | if end_count % 2 == 0: |
| | targets[last_eoa:i + 6] = -100 |
| | else: |
| | last_eoa = i + 1 |
| | end_count += 1 |
| | elif temp_id == 2: |
| | targets[i + 1:] = -100 |
| | break |
| | if temp_id != 2 and end_count % 2 == 0: |
| | targets[last_eoa + |
| | 1:] = -100 |
| |
|
| | target_batch.append(targets.unsqueeze(0)) |
| | if self.debug_flag: |
| | print('#### Warning! System meta is not support now') |
| | targets_vis = targets.clone() |
| | targets_vis[targets_vis == -100] = 92399 |
| | targets_vis_tokens = ''.join( |
| | self.tokenizer.convert_ids_to_tokens(targets_vis)).replace( |
| | '[UNUSED_TOKEN_2]', ' ') |
| | |
| | print('-----------') |
| | print([targets_vis_tokens]) |
| | print('-----------------------------') |
| |
|
| | target_batch = torch.cat(target_batch, dim=0) |
| | return target_batch |
| |
|
| | @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING) |
| | @replace_return_docstrings( |
| | output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) |
| | def forward(self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs) -> Union[Tuple, CausalLMOutputWithPast]: |
| | r""" |
| | Args: |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| | config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| | (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| | |
| | Returns: |
| | |
| | Example: |
| | |
| | ```python |
| | >>> from transformers import AutoTokenizer, InternLM2ForCausalLM |
| | |
| | >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) |
| | >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) |
| | |
| | >>> prompt = "Hey, are you conscious? Can you talk to me?" |
| | >>> inputs = tokenizer(prompt, return_tensors="pt") |
| | |
| | >>> # Generate |
| | >>> generate_ids = model.generate(inputs.input_ids, max_length=30) |
| | >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
| | "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." |
| | ```""" |
| | samples = kwargs.get('samples', None) |
| | if samples: |
| | if self.debug_flag: |
| | self.debug_flag += 1 |
| | if self.debug_flag > 5: |
| | self.debug_flag = 0 |
| |
|
| | if samples['data_type'][0] == 'text': |
| | has_img = False |
| | elif samples['data_type'][0] == 'multi': |
| | has_img = True |
| | else: |
| | raise NotImplementedError |
| |
|
| | |
| | text = samples['text_input'] |
| | if has_img: |
| | image = samples['image'] |
| | to_regress_embeds, attention_mask, targets, im_mask = self.interleav_wrap( |
| | image, text) |
| | else: |
| | to_regress_tokens, targets = self.text2emb( |
| | text, add_special=True) |
| | to_regress_embeds = self.model.tok_embeddings( |
| | to_regress_tokens.input_ids) |
| | attention_mask = to_regress_tokens.attention_mask |
| | im_mask = torch.zeros(to_regress_embeds.shape[:2]).cuda() |
| |
|
| | inputs_embeds = to_regress_embeds[:, :self.max_length] |
| | attention_mask = attention_mask[:, :self.max_length] |
| | targets = targets[:, :self.max_length] |
| | im_mask = im_mask[:, :self.max_length].bool() |
| | labels = targets |
| | if self.debug_flag: |
| | print(targets.shape, inputs_embeds.shape, attention_mask.shape) |
| | le = len(samples['text_input']) |
| | data_type = samples['data_type'][0] |
| | print( |
| | f'DataType: {data_type}. Has Image: {has_img}. Current max length: {self.max_length}, BatchSize is {le}' |
| | ) |
| | |
| | |
| |
|
| | else: |
| | self.debug_flag = 0 |
| | im_mask = kwargs.get('im_mask', None) |
| |
|
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else |
| | self.config.output_hidden_states) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | outputs = self.model( |
| | input_ids=input_ids, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_values=past_key_values, |
| | inputs_embeds=inputs_embeds, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | im_mask=im_mask, |
| | ) |
| |
|
| | hidden_states = outputs[0] |
| | logits = self.output(hidden_states) |
| | logits = logits.float() |
| |
|
| | loss = None |
| | if labels is not None: |
| | |
| | shift_logits = logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous() |
| | |
| | loss_fct = CrossEntropyLoss() |
| | shift_logits = shift_logits.view(-1, self.config.vocab_size) |
| | shift_labels = shift_labels.view(-1) |
| | |
| | shift_labels = shift_labels.to(shift_logits.device) |
| | loss = loss_fct(shift_logits, shift_labels) |
| |
|
| | if not return_dict: |
| | output = (logits, ) + outputs[1:] |
| | return (loss, ) + output if loss is not None else output |
| |
|
| | return CausalLMOutputWithPast( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=outputs.past_key_values, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|
| | def prepare_inputs_for_generation(self, |
| | input_ids, |
| | past_key_values=None, |
| | attention_mask=None, |
| | inputs_embeds=None, |
| | im_mask=None, |
| | **kwargs): |
| | if past_key_values is not None: |
| | past_length = past_key_values[0][0].shape[2] |
| |
|
| | |
| | if input_ids.shape[1] > past_length: |
| | remove_prefix_length = past_length |
| | else: |
| | |
| | remove_prefix_length = input_ids.shape[1] - 1 |
| |
|
| | input_ids = input_ids[:, remove_prefix_length:] |
| |
|
| | position_ids = kwargs.get('position_ids', None) |
| | if attention_mask is not None and position_ids is None: |
| | |
| | position_ids = attention_mask.long().cumsum(-1) - 1 |
| | position_ids.masked_fill_(attention_mask == 0, 1) |
| | if past_key_values: |
| | position_ids = position_ids[:, -input_ids.shape[1]:] |
| |
|
| | |
| | if inputs_embeds is not None and past_key_values is None: |
| | model_inputs = {'inputs_embeds': inputs_embeds} |
| | else: |
| | model_inputs = {'input_ids': input_ids} |
| |
|
| | im_mask = im_mask |
| |
|
| | model_inputs.update({ |
| | 'position_ids': position_ids, |
| | 'past_key_values': past_key_values, |
| | 'use_cache': kwargs.get('use_cache'), |
| | 'attention_mask': attention_mask, |
| | 'im_mask': im_mask, |
| | }) |
| | return model_inputs |
| |
|
| | @staticmethod |
| | def _reorder_cache(past_key_values, beam_idx): |
| | reordered_past = () |
| | for layer_past in past_key_values: |
| | reordered_past += (tuple( |
| | past_state.index_select(0, beam_idx.to(past_state.device)) |
| | for past_state in layer_past), ) |
| | return reordered_past |
| |
|
| | def build_inputs(self, |
| | tokenizer, |
| | query: str, |
| | history: List[Tuple[str, str]] = []): |
| | prompt = '' |
| | for record in history: |
| | prompt += f"""<|User|>:{record[0]}\n<|Bot|>:{record[1]}[UNUSED_TOKEN_0]\n""" |
| | prompt += f"""<|User|>:{query}\n<|Bot|>:""" |
| | return tokenizer([prompt], return_tensors='pt') |
| |
|
| | @torch.no_grad() |
| | def chat( |
| | self, |
| | tokenizer, |
| | query: str, |
| | history: List[Tuple[str, str]] = [], |
| | streamer: Optional[BaseStreamer] = None, |
| | max_new_tokens: int = 1024, |
| | do_sample: bool = True, |
| | temperature: float = 0.8, |
| | top_p: float = 0.8, |
| | **kwargs, |
| | ): |
| | inputs = self.build_inputs(tokenizer, query, history) |
| | inputs = { |
| | k: v.to(self.device) |
| | for k, v in inputs.items() if torch.is_tensor(v) |
| | } |
| | outputs = self.generate( |
| | **inputs, |
| | streamer=streamer, |
| | max_new_tokens=max_new_tokens, |
| | do_sample=do_sample, |
| | temperature=temperature, |
| | top_p=top_p, |
| | **kwargs, |
| | ) |
| | outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):] |
| | response = tokenizer.decode(outputs, skip_special_tokens=True) |
| | response = response.split('[UNUSED_TOKEN_0]')[0] |
| | history = history + [(query, response)] |
| | return response, history |
| |
|
| | @torch.no_grad() |
| | def stream_chat( |
| | self, |
| | tokenizer, |
| | query: str, |
| | history: List[Tuple[str, str]] = [], |
| | max_new_tokens: int = 1024, |
| | do_sample: bool = True, |
| | temperature: float = 0.8, |
| | top_p: float = 0.8, |
| | **kwargs, |
| | ): |
| | """Return a generator in format: (response, history) Eg. |
| | |
| | ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')]) ('你好,有什么可以帮助您的吗?', [('你好', |
| | '你好,有什么可以帮助您的吗?')]) |
| | """ |
| | if BaseStreamer is None: |
| | raise ModuleNotFoundError( |
| | 'The version of `transformers` is too low. Please make sure ' |
| | 'that you have installed `transformers>=4.28.0`.') |
| |
|
| | response_queue = queue.Queue(maxsize=20) |
| |
|
| | class ChatStreamer(BaseStreamer): |
| |
|
| | def __init__(self, tokenizer) -> None: |
| | super().__init__() |
| | self.tokenizer = tokenizer |
| | self.queue = response_queue |
| | self.query = query |
| | self.history = history |
| | self.response = '' |
| | self.received_inputs = False |
| | self.queue.put( |
| | (self.response, history + [(self.query, self.response)])) |
| |
|
| | def put(self, value): |
| | if len(value.shape) > 1 and value.shape[0] > 1: |
| | raise ValueError('ChatStreamer only supports batch size 1') |
| | elif len(value.shape) > 1: |
| | value = value[0] |
| |
|
| | if not self.received_inputs: |
| | |
| | self.received_inputs = True |
| | return |
| |
|
| | token = self.tokenizer.decode([value[-1]], |
| | skip_special_tokens=True) |
| | if token.strip() != '[UNUSED_TOKEN_0]': |
| | self.response = self.response + token |
| | history = self.history + [(self.query, self.response)] |
| | self.queue.put((self.response, history)) |
| |
|
| | def end(self): |
| | self.queue.put(None) |
| |
|
| | def stream_producer(): |
| | return self.chat( |
| | tokenizer=tokenizer, |
| | query=query, |
| | streamer=ChatStreamer(tokenizer=tokenizer), |
| | history=history, |
| | max_new_tokens=max_new_tokens, |
| | do_sample=do_sample, |
| | temperature=temperature, |
| | top_p=top_p, |
| | **kwargs, |
| | ) |
| |
|
| | def consumer(): |
| | producer = threading.Thread(target=stream_producer) |
| | producer.start() |
| | while True: |
| | res = response_queue.get() |
| | if res is None: |
| | return |
| | yield res |
| |
|
| | return consumer() |
| |
|