OpenSpark-13B-Chat / modeling_spark.py
freedomking's picture
Upload folder using huggingface_hub
32cafd3 verified
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Union, List
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
CausalLMOutputWithPast,
BaseModelOutputWithPast,
)
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask,
)
from .configuration_spark import SparkConfig
def rotate_half(x):
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class SparkLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6, use_bias=True):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.use_bias = use_bias
if use_bias:
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.register_parameter('bias', None)
self.eps = eps
self.normalized_shape = (hidden_size,)
def forward(self, hidden_states):
return F.layer_norm(hidden_states, self.normalized_shape, self.weight, self.bias, self.eps)
class SparkMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.ffn_hidden_size = config.ffn_hidden_size
self.dense_h_to_4h = nn.Linear(self.hidden_size, self.ffn_hidden_size * 2, bias=True)
self.dense_4h_to_h = nn.Linear(self.ffn_hidden_size, self.hidden_size, bias=True)
if config.hidden_act == "fast_gelu":
self.activation_func = lambda x: F.gelu(x, approximate="tanh")
else:
self.activation_func = F.gelu
def forward(self, hidden_states):
intermediate = self.dense_h_to_4h(hidden_states)
hshape = intermediate.shape[:-1]
intermediate = intermediate.view(hshape + (-1, 2))
intermediate_parallel1, intermediate_parallel2 = torch.chunk(intermediate, 2, dim=-1)
intermediate_parallel1 = intermediate_parallel1.squeeze(-1)
intermediate_parallel2 = intermediate_parallel2.squeeze(-1)
intermediate_parallel1 = self.activation_func(intermediate_parallel1)
intermediate = intermediate_parallel1 * intermediate_parallel2
output = self.dense_4h_to_h(intermediate)
return output
class SparkAttention(nn.Module):
def __init__(self, config: SparkConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.num_heads = config.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.head_dim = self.hidden_size // self.num_heads
self.use_bias = config.use_bias
self.query_key_value = nn.Linear(
self.hidden_size,
self.num_heads * self.head_dim + 2 * self.num_key_value_heads * self.head_dim,
bias=self.use_bias
)
self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=self.use_bias)
self.attention_dropout = config.attention_dropout
def forward(self, hidden_states, attention_mask=None, position_ids=None, past_key_value=None,
output_attentions=False, use_cache=False, cache_position=None, position_embeddings=None):
bsz, q_len, _ = hidden_states.size()
qkv = self.query_key_value(hidden_states)
query_pos = self.num_heads * self.head_dim
key_value_pos = query_pos + self.num_key_value_heads * self.head_dim
query_states = qkv[..., :query_pos]
key_states = qkv[..., query_pos:key_value_pos]
value_states = qkv[..., key_value_pos:]
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
if isinstance(past_key_value, Cache):
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
else:
past_key, past_value = past_key_value
key_states = torch.cat([past_key, key_states], dim=2)
value_states = torch.cat([past_value, value_states], dim=2)
if past_key_value is None or not isinstance(past_key_value, Cache):
cached_key_states = key_states
cached_value_states = value_states
else:
cached_key_states = None
cached_value_states = None
key_states = self.repeat_kv(key_states, self.num_key_value_groups)
value_states = self.repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.dense(attn_output)
if use_cache:
present_key_value = past_key_value if isinstance(past_key_value, Cache) else (cached_key_states, cached_value_states)
else:
present_key_value = None
return attn_output, attn_weights if output_attentions else None, present_key_value
def repeat_kv(self, hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1: return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
class SparkDecoderLayer(nn.Module):
def __init__(self, config: SparkConfig, layer_idx: int):
super().__init__()
self.input_layernorm = SparkLayerNorm(config.hidden_size, eps=config.layernorm_epsilon)
self.self_attn = SparkAttention(config, layer_idx)
self.post_attention_layernorm = SparkLayerNorm(config.hidden_size, eps=config.layernorm_epsilon)
self.mlp = SparkMLP(config)
def forward(self, hidden_states, attention_mask=None, position_ids=None, past_key_value=None,
output_attentions=False, use_cache=False, cache_position=None, position_embeddings=None):
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids,
past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache,
cache_position=cache_position, position_embeddings=position_embeddings,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions: outputs += (self_attn_weights,)
if use_cache: outputs += (present_key_value,)
return outputs
class SparkPreTrainedModel(PreTrainedModel):
config_class = SparkConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["SparkDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_cache_class = True
def _init_weights(self, module):
std = self.config.init_std if hasattr(self.config, "init_std") else 0.02
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None: module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
class SparkModel(SparkPreTrainedModel):
def __init__(self, config: SparkConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([SparkDecoderLayer(config, layer_idx) for layer_idx in range(config.num_layers)])
self.norm = SparkLayerNorm(config.hidden_size, eps=config.layernorm_epsilon)
self.rope_theta = config.rope_theta
self.rotary_emb = SparkRotaryEmbedding(
config.hidden_size // config.num_heads,
max_position_embeddings=config.max_position_embeddings,
base=self.rope_theta,
)
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def forward(self, input_ids=None, attention_mask=None, position_ids=None, past_key_values=None,
inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None,
return_dict=None, cache_position=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
batch_size, seq_length = input_ids.shape
else:
batch_size, seq_length, _ = inputs_embeds.shape
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = _prepare_4d_causal_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length)
hidden_states = inputs_embeds
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = None
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for layer_idx, decoder_layer in enumerate(self.layers):
if output_hidden_states: all_hidden_states += (hidden_states,)
layer_past_key_value = past_key_values if isinstance(past_key_values, Cache) else (past_key_values[layer_idx] if past_key_values is not None else None)
layer_outputs = decoder_layer(
hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=layer_past_key_value,
output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings,
)
hidden_states = layer_outputs[0]
if use_cache:
layer_present_key_value = layer_outputs[2 if output_attentions else 1]
if next_decoder_cache is None:
next_decoder_cache = layer_present_key_value if isinstance(layer_present_key_value, Cache) else []
if not isinstance(next_decoder_cache, Cache): next_decoder_cache.append(layer_present_key_value)
if output_attentions: all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
if output_hidden_states: all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns)
class SparkForCausalLM(SparkPreTrainedModel):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.model = SparkModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def forward(self, input_ids=None, attention_mask=None, position_ids=None, past_key_values=None,
inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None,
return_dict=None, cache_position=None):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict, cache_position)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states).float()
loss = None
if labels is not None:
shift_logits = logits[..., :-1, :].contiguous().view(-1, self.config.vocab_size)
shift_labels = labels[..., 1:].contiguous().view(-1).to(shift_logits.device)
loss = nn.CrossEntropyLoss()(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs):
if input_ids is not None: input_ids = input_ids.long()
if attention_mask is not None: attention_mask = attention_mask.long()
cache_position = kwargs.get("cache_position", None)
if past_key_values is not None:
if isinstance(past_key_values, Cache):
if hasattr(past_key_values, 'cache_position') and past_key_values.cache_position is not None and past_key_values.cache_position.numel() > 0:
past_length = past_key_values.cache_position.max().item() + 1
else: past_length = getattr(past_key_values, 'seen_tokens', 0)
else: past_length = past_key_values[0][0].shape[2]
else: past_length = 0
if past_key_values is not None and input_ids is not None:
if cache_position is not None:
cache_position = cache_position.long()
input_ids = input_ids[:, cache_position] if cache_position.max() < input_ids.shape[1] else input_ids[:, past_length:]
else: input_ids = input_ids[:, past_length:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values is not None and past_length > 0: position_ids = position_ids[:, past_length:]
if cache_position is None and input_ids is not None:
cache_position = torch.arange(past_length, past_length + input_ids.shape[1], device=input_ids.device, dtype=torch.long)
model_inputs = {"use_cache": kwargs.get("use_cache", True)}
if inputs_embeds is not None and past_key_values is None: model_inputs["inputs_embeds"] = inputs_embeds
elif input_ids is not None: model_inputs["input_ids"] = input_ids
if position_ids is not None: model_inputs["position_ids"] = position_ids
if cache_position is not None: model_inputs["cache_position"] = cache_position
if past_key_values is not None: model_inputs["past_key_values"] = past_key_values
if attention_mask is not None: model_inputs["attention_mask"] = attention_mask
return model_inputs
class SparkRotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=32768, base=1000000.0, device=None):
super().__init__()
self.dim, self.max_position_embeddings, self.base = dim, max_position_embeddings, base
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
self._set_cos_sin_cache(seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype())
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
freqs = torch.outer(t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def forward(self, x, position_ids=None, seq_len=None):
if position_ids is not None:
max_position = position_ids.max().item() + 1
actual_seq_len = max(max_position, position_ids.shape[-1])
else: actual_seq_len = seq_len if isinstance(seq_len, int) else (seq_len.item() if seq_len is not None else x.shape[2])
if actual_seq_len > self.max_seq_len_cached:
self._set_cos_sin_cache(seq_len=actual_seq_len, device=x.device, dtype=x.dtype)
cos = self.cos_cached[:actual_seq_len].to(dtype=x.dtype)
sin = self.sin_cached[:actual_seq_len].to(dtype=x.dtype)
if position_ids is not None:
cos, sin = cos[position_ids], sin[position_ids]
return cos, sin