|
|
""" |
|
|
BitSkip v1 with Early Exit Loss and Quadratic Dropout |
|
|
- BitLinear quantization (8-bit) |
|
|
- Quadratic layer dropout (0 to 0.5 progression, sum=1 constraint) |
|
|
- Early exit loss from all layers |
|
|
- HuggingFace compatible |
|
|
""" |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import math |
|
|
from transformers import PreTrainedModel, PretrainedConfig, GenerationMixin |
|
|
from transformers.modeling_outputs import CausalLMOutputWithPast |
|
|
from typing import Optional, Tuple |
|
|
|
|
|
from .bitlinear import BitLinear |
|
|
|
|
|
|
|
|
class BitSkipV1EarlyExitConfig(PretrainedConfig): |
|
|
model_type = "bitskip_v1_earlyexit" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_size=50257, |
|
|
hidden_size=2048, |
|
|
num_hidden_layers=24, |
|
|
num_attention_heads=32, |
|
|
num_key_value_heads=8, |
|
|
intermediate_size=4096, |
|
|
max_position_embeddings=2048, |
|
|
rms_norm_eps=1e-5, |
|
|
rope_theta=10000.0, |
|
|
|
|
|
early_exit_loss_weight=0.3, |
|
|
|
|
|
max_dropout_prob=0.5, |
|
|
|
|
|
inference_exit_layer=None, |
|
|
**kwargs |
|
|
): |
|
|
self.vocab_size = vocab_size |
|
|
self.hidden_size = hidden_size |
|
|
self.num_hidden_layers = num_hidden_layers |
|
|
self.num_attention_heads = num_attention_heads |
|
|
self.num_key_value_heads = num_key_value_heads |
|
|
self.intermediate_size = intermediate_size |
|
|
self.max_position_embeddings = max_position_embeddings |
|
|
self.rms_norm_eps = rms_norm_eps |
|
|
self.rope_theta = rope_theta |
|
|
self.early_exit_loss_weight = early_exit_loss_weight |
|
|
self.max_dropout_prob = max_dropout_prob |
|
|
self.inference_exit_layer = inference_exit_layer |
|
|
super().__init__(**kwargs) |
|
|
|
|
|
|
|
|
class QuadraticLayerDropout(nn.Module): |
|
|
""" |
|
|
Quadratic layer dropout: p_l = p_max * (l/L)^2 |
|
|
Normalized so sum of probabilities = 1 |
|
|
""" |
|
|
|
|
|
def __init__(self, num_layers, max_dropout_prob=0.5): |
|
|
super().__init__() |
|
|
self.num_layers = num_layers |
|
|
self.max_dropout_prob = max_dropout_prob |
|
|
|
|
|
|
|
|
dropout_probs = [] |
|
|
for i in range(num_layers): |
|
|
|
|
|
prob = max_dropout_prob * ((i / max(num_layers - 1, 1)) ** 2) |
|
|
dropout_probs.append(prob) |
|
|
|
|
|
|
|
|
total_prob = sum(dropout_probs) |
|
|
if total_prob > 0: |
|
|
dropout_probs = [p / total_prob for p in dropout_probs] |
|
|
|
|
|
self.dropout_probs = dropout_probs |
|
|
|
|
|
def should_drop_layer(self, layer_idx): |
|
|
"""Returns True if layer should be dropped during training.""" |
|
|
if not self.training or layer_idx >= self.num_layers - 1: |
|
|
return False |
|
|
return torch.rand(1).item() < self.dropout_probs[layer_idx] |
|
|
|
|
|
|
|
|
class RMSNorm(nn.Module): |
|
|
def __init__(self, hidden_size, eps=1e-6): |
|
|
super().__init__() |
|
|
self.weight = nn.Parameter(torch.ones(hidden_size)) |
|
|
self.variance_epsilon = eps |
|
|
|
|
|
def forward(self, hidden_states): |
|
|
input_dtype = hidden_states.dtype |
|
|
hidden_states = hidden_states.to(torch.float32) |
|
|
variance = hidden_states.pow(2).mean(-1, keepdim=True) |
|
|
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
|
|
return self.weight * hidden_states.to(input_dtype) |
|
|
|
|
|
|
|
|
class RotaryEmbedding(nn.Module): |
|
|
def __init__(self, dim, max_position_embeddings=2048, base=10000): |
|
|
super().__init__() |
|
|
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) |
|
|
self.register_buffer("inv_freq", inv_freq) |
|
|
|
|
|
def forward(self, x, position_ids): |
|
|
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) |
|
|
position_ids_expanded = position_ids[:, None, :].float() |
|
|
freqs = (inv_freq_expanded @ position_ids_expanded).transpose(1, 2) |
|
|
emb = torch.cat((freqs, freqs), dim=-1) |
|
|
return emb.cos().to(x.dtype), emb.sin().to(x.dtype) |
|
|
|
|
|
|
|
|
def rotate_half(x): |
|
|
x1, x2 = x[..., :x.shape[-1]//2], x[..., x.shape[-1]//2:] |
|
|
return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
|
|
|
|
def apply_rotary_pos_emb(q, k, cos, sin): |
|
|
q_embed = (q * cos) + (rotate_half(q) * sin) |
|
|
k_embed = (k * cos) + (rotate_half(k) * sin) |
|
|
return q_embed, k_embed |
|
|
|
|
|
|
|
|
class BitSkipV1Attention(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.hidden_size = config.hidden_size |
|
|
self.num_heads = config.num_attention_heads |
|
|
self.head_dim = self.hidden_size // self.num_heads |
|
|
self.num_key_value_heads = config.num_key_value_heads |
|
|
self.num_key_value_groups = self.num_heads // self.num_key_value_heads |
|
|
|
|
|
self.q_proj = BitLinear(self.hidden_size, self.num_heads * self.head_dim) |
|
|
self.k_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim) |
|
|
self.v_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim) |
|
|
self.o_proj = BitLinear(self.hidden_size, self.hidden_size) |
|
|
|
|
|
self.rotary_emb = RotaryEmbedding(self.head_dim, config.max_position_embeddings, config.rope_theta) |
|
|
|
|
|
def forward(self, hidden_states, attention_mask=None, position_ids=None, past_key_value=None, use_cache=False): |
|
|
bsz, q_len, _ = hidden_states.size() |
|
|
|
|
|
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) |
|
|
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) |
|
|
|
|
|
cos, sin = self.rotary_emb(value_states, position_ids) |
|
|
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
|
|
|
|
|
if past_key_value is not None: |
|
|
key_states = torch.cat([past_key_value[0], key_states], dim=2) |
|
|
value_states = torch.cat([past_key_value[1], value_states], dim=2) |
|
|
|
|
|
past_key_value = (key_states, value_states) if use_cache else None |
|
|
|
|
|
key_states = key_states.repeat_interleave(self.num_key_value_groups, dim=1) |
|
|
value_states = value_states.repeat_interleave(self.num_key_value_groups, dim=1) |
|
|
|
|
|
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) |
|
|
if attention_mask is not None: |
|
|
attn_weights = attn_weights + attention_mask |
|
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
|
|
attn_output = torch.matmul(attn_weights, value_states) |
|
|
attn_output = attn_output.transpose(1, 2).contiguous().reshape(bsz, q_len, self.hidden_size) |
|
|
attn_output = self.o_proj(attn_output) |
|
|
|
|
|
return attn_output, None, past_key_value |
|
|
|
|
|
|
|
|
class BitSkipV1MLP(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.gate_proj = BitLinear(config.hidden_size, config.intermediate_size) |
|
|
self.up_proj = BitLinear(config.hidden_size, config.intermediate_size) |
|
|
self.down_proj = BitLinear(config.intermediate_size, config.hidden_size) |
|
|
|
|
|
def forward(self, x): |
|
|
return self.down_proj(nn.functional.silu(self.gate_proj(x)) * self.up_proj(x)) |
|
|
|
|
|
|
|
|
class BitSkipV1DecoderLayer(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.self_attn = BitSkipV1Attention(config) |
|
|
self.mlp = BitSkipV1MLP(config) |
|
|
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
|
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
|
|
|
|
def forward(self, hidden_states, attention_mask=None, position_ids=None, past_key_value=None, use_cache=False): |
|
|
residual = hidden_states |
|
|
hidden_states = self.input_layernorm(hidden_states) |
|
|
hidden_states, _, present_key_value = self.self_attn( |
|
|
hidden_states, attention_mask, position_ids, past_key_value, use_cache |
|
|
) |
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
residual = hidden_states |
|
|
hidden_states = self.post_attention_layernorm(hidden_states) |
|
|
hidden_states = self.mlp(hidden_states) |
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
return (hidden_states,) + ((present_key_value,) if use_cache else ()) |
|
|
|
|
|
|
|
|
class BitSkipV1PreTrainedModel(PreTrainedModel): |
|
|
config_class = BitSkipV1EarlyExitConfig |
|
|
base_model_prefix = "model" |
|
|
supports_gradient_checkpointing = True |
|
|
|
|
|
def _init_weights(self, module): |
|
|
if isinstance(module, (nn.Linear, BitLinear)): |
|
|
if hasattr(module, 'weight'): |
|
|
module.weight.data.normal_(mean=0.0, std=0.02) |
|
|
if hasattr(module, 'bias') and module.bias is not None: |
|
|
module.bias.data.zero_() |
|
|
elif isinstance(module, nn.Embedding): |
|
|
module.weight.data.normal_(mean=0.0, std=0.02) |
|
|
|
|
|
|
|
|
class BitSkipV1Model(BitSkipV1PreTrainedModel): |
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) |
|
|
self.layers = nn.ModuleList([BitSkipV1DecoderLayer(config) for _ in range(config.num_hidden_layers)]) |
|
|
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
|
|
|
self.layer_dropout = QuadraticLayerDropout(config.num_hidden_layers, config.max_dropout_prob) |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def forward(self, input_ids, attention_mask=None, position_ids=None, past_key_values=None, use_cache=False, output_hidden_states=False, return_all_layer_outputs=False): |
|
|
hidden_states = self.embed_tokens(input_ids) |
|
|
|
|
|
if position_ids is None: |
|
|
position_ids = torch.arange(input_ids.shape[1], dtype=torch.long, device=input_ids.device) |
|
|
position_ids = position_ids.unsqueeze(0) |
|
|
|
|
|
next_decoder_cache = () if use_cache else None |
|
|
all_layer_hidden_states = [] |
|
|
|
|
|
|
|
|
num_layers_to_run = self.config.inference_exit_layer if self.config.inference_exit_layer else len(self.layers) |
|
|
num_layers_to_run = min(num_layers_to_run, len(self.layers)) |
|
|
|
|
|
for idx in range(num_layers_to_run): |
|
|
layer = self.layers[idx] |
|
|
past_key_value = past_key_values[idx] if past_key_values else None |
|
|
|
|
|
|
|
|
if self.training and self.layer_dropout.should_drop_layer(idx): |
|
|
|
|
|
all_layer_hidden_states.append(hidden_states) |
|
|
continue |
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
layer_outputs = self._gradient_checkpointing_func( |
|
|
layer.__call__, |
|
|
hidden_states, |
|
|
attention_mask, |
|
|
position_ids, |
|
|
past_key_value, |
|
|
use_cache, |
|
|
) |
|
|
else: |
|
|
layer_outputs = layer(hidden_states, attention_mask, position_ids, past_key_value, use_cache) |
|
|
|
|
|
hidden_states = layer_outputs[0] |
|
|
all_layer_hidden_states.append(hidden_states) |
|
|
|
|
|
if use_cache: |
|
|
next_decoder_cache += (layer_outputs[1],) |
|
|
|
|
|
hidden_states = self.norm(hidden_states) |
|
|
all_layer_hidden_states.append(hidden_states) |
|
|
|
|
|
if return_all_layer_outputs: |
|
|
return hidden_states, next_decoder_cache, all_layer_hidden_states |
|
|
else: |
|
|
return hidden_states, next_decoder_cache, None |
|
|
|
|
|
|
|
|
class BitSkipV1ForCausalLMWithEarlyExit(BitSkipV1PreTrainedModel, GenerationMixin): |
|
|
"""BitSkip v1 with early exit loss.""" |
|
|
|
|
|
_tied_weights_keys = ["lm_head.weight"] |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
self.model = BitSkipV1Model(config) |
|
|
self.vocab_size = config.vocab_size |
|
|
|
|
|
|
|
|
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.model.embed_tokens |
|
|
|
|
|
def set_input_embeddings(self, value): |
|
|
self.model.embed_tokens = value |
|
|
|
|
|
def get_output_embeddings(self): |
|
|
return self.lm_head |
|
|
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
|
self.lm_head = new_embeddings |
|
|
|
|
|
def compute_early_exit_loss(self, all_layer_hidden_states, labels): |
|
|
""" |
|
|
Compute early exit loss from all layers. |
|
|
Uses layer-proportional weighting: w_i = i/N |
|
|
""" |
|
|
num_layers = len(all_layer_hidden_states) |
|
|
|
|
|
|
|
|
weights = [(i + 1) / num_layers for i in range(num_layers)] |
|
|
weight_sum = sum(weights) |
|
|
weights = [w / weight_sum for w in weights] |
|
|
|
|
|
total_exit_loss = 0.0 |
|
|
|
|
|
for i, hidden_states in enumerate(all_layer_hidden_states): |
|
|
|
|
|
logits = self.lm_head(hidden_states) |
|
|
|
|
|
|
|
|
shift_logits = logits[..., :-1, :].contiguous() |
|
|
shift_labels = labels[..., 1:].contiguous() |
|
|
|
|
|
loss_fct = nn.CrossEntropyLoss() |
|
|
layer_loss = loss_fct(shift_logits.view(-1, self.vocab_size), shift_labels.view(-1)) |
|
|
|
|
|
|
|
|
total_exit_loss += weights[i] * layer_loss |
|
|
|
|
|
return total_exit_loss |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids=None, |
|
|
attention_mask=None, |
|
|
position_ids=None, |
|
|
past_key_values=None, |
|
|
inputs_embeds=None, |
|
|
labels=None, |
|
|
use_cache=None, |
|
|
output_attentions=None, |
|
|
output_hidden_states=None, |
|
|
return_dict=None, |
|
|
): |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
|
|
|
return_all = self.training and labels is not None |
|
|
|
|
|
hidden_states, past_key_values_output, all_layer_hidden_states = self.model( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
past_key_values=past_key_values, |
|
|
use_cache=use_cache, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_all_layer_outputs=return_all, |
|
|
) |
|
|
|
|
|
logits = self.lm_head(hidden_states) |
|
|
logits = logits.float() |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
|
|
|
shift_logits = logits[..., :-1, :].contiguous() |
|
|
shift_labels = labels[..., 1:].contiguous() |
|
|
loss_fct = nn.CrossEntropyLoss() |
|
|
main_loss = loss_fct(shift_logits.view(-1, self.vocab_size), shift_labels.view(-1)) |
|
|
|
|
|
|
|
|
if all_layer_hidden_states is not None and len(all_layer_hidden_states) > 0: |
|
|
early_exit_loss = self.compute_early_exit_loss(all_layer_hidden_states[:-1], labels) |
|
|
|
|
|
|
|
|
loss = main_loss + self.config.early_exit_loss_weight * early_exit_loss |
|
|
else: |
|
|
loss = main_loss |
|
|
|
|
|
if not return_dict: |
|
|
output = (logits,) + (past_key_values_output,) |
|
|
return (loss,) + output if loss is not None else output |
|
|
|
|
|
return CausalLMOutputWithPast( |
|
|
loss=loss, |
|
|
logits=logits, |
|
|
past_key_values=past_key_values_output, |
|
|
hidden_states=None, |
|
|
attentions=None, |
|
|
) |
|
|
|
|
|
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs): |
|
|
if past_key_values is not None: |
|
|
past_length = past_key_values[0][0].shape[2] |
|
|
if input_ids.shape[1] > past_length: |
|
|
remove_prefix_length = past_length |
|
|
else: |
|
|
remove_prefix_length = input_ids.shape[1] - 1 |
|
|
input_ids = input_ids[:, remove_prefix_length:] |
|
|
|
|
|
position_ids = kwargs.get("position_ids", None) |
|
|
if attention_mask is not None and position_ids is None: |
|
|
position_ids = attention_mask.long().cumsum(-1) - 1 |
|
|
position_ids.masked_fill_(attention_mask == 0, 1) |
|
|
if past_key_values: |
|
|
position_ids = position_ids[:, -input_ids.shape[1] :] |
|
|
|
|
|
if inputs_embeds is not None and past_key_values is None: |
|
|
model_inputs = {"inputs_embeds": inputs_embeds} |
|
|
else: |
|
|
model_inputs = {"input_ids": input_ids} |
|
|
|
|
|
model_inputs.update({ |
|
|
"position_ids": position_ids, |
|
|
"past_key_values": past_key_values, |
|
|
"use_cache": kwargs.get("use_cache"), |
|
|
"attention_mask": attention_mask, |
|
|
}) |
|
|
return model_inputs |
|
|
|
|
|
@staticmethod |
|
|
def _reorder_cache(past_key_values, beam_idx): |
|
|
reordered_past = () |
|
|
for layer_past in past_key_values: |
|
|
reordered_past += ( |
|
|
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), |
|
|
) |
|
|
return reordered_past |
|
|
|
|
|
def set_exit_layer(self, exit_layer): |
|
|
"""Set early exit layer for inference.""" |
|
|
self.config.inference_exit_layer = exit_layer |
|
|
self.model.config.inference_exit_layer = exit_layer |
|
|
|
|
|
|
|
|
BitSkipV1EarlyExitConfig.register_for_auto_class() |
|
|
BitSkipV1ForCausalLMWithEarlyExit.register_for_auto_class("AutoModelForCausalLM") |
|
|
|