Update modeling_custom_seq2seq_llm.py
Browse files
modeling_custom_seq2seq_llm.py
CHANGED
|
@@ -7,9 +7,9 @@ from flash_atten import MHA # Import the MHA class from the provided implementa
|
|
| 7 |
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss
|
| 8 |
from liger_kernel.transformers.rms_norm import LigerRMSNorm
|
| 9 |
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
|
| 10 |
-
from transformers import PreTrainedModel
|
|
|
|
| 11 |
|
| 12 |
-
from configuration_custom_seq2seq_llm import Seq2SeqConfig
|
| 13 |
|
| 14 |
|
| 15 |
class RMSNorm(nn.Module):
|
|
@@ -23,6 +23,60 @@ class RMSNorm(nn.Module):
|
|
| 23 |
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
|
| 24 |
return self.weight * hidden_states.to(self.weight.dtype)
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
class CustomSeq2SeqLLM(PreTrainedModel):
|
| 28 |
config_class = Seq2SeqConfig
|
|
|
|
| 7 |
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss
|
| 8 |
from liger_kernel.transformers.rms_norm import LigerRMSNorm
|
| 9 |
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
|
| 10 |
+
from transformers import PreTrainedModel, PretrainedConfig
|
| 11 |
+
|
| 12 |
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
class RMSNorm(nn.Module):
|
|
|
|
| 23 |
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
|
| 24 |
return self.weight * hidden_states.to(self.weight.dtype)
|
| 25 |
|
| 26 |
+
class Seq2SeqConfig(PretrainedConfig):
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
vocab_size=30522,
|
| 30 |
+
hidden_size=768,
|
| 31 |
+
num_encoder_layers=6,
|
| 32 |
+
num_decoder_layers=12,
|
| 33 |
+
num_attention_heads=12,
|
| 34 |
+
num_key_value_heads=4,
|
| 35 |
+
intermediate_size=3072,
|
| 36 |
+
hidden_act="silu",
|
| 37 |
+
hidden_dropout_prob=0.0,
|
| 38 |
+
attention_probs_dropout_prob=0.0,
|
| 39 |
+
max_position_embeddings=512,
|
| 40 |
+
initializer_range=0.02,
|
| 41 |
+
layer_norm_eps=1e-12,
|
| 42 |
+
pad_token_id=0,
|
| 43 |
+
bos_token_id=1,
|
| 44 |
+
eos_token_id=2,
|
| 45 |
+
use_cache=True,
|
| 46 |
+
rotary_emb_dim=0,
|
| 47 |
+
rotary_emb_base=10000.0,
|
| 48 |
+
rotary_emb_scale_base=None,
|
| 49 |
+
rotary_emb_interleaved=False,
|
| 50 |
+
**kwargs
|
| 51 |
+
):
|
| 52 |
+
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
| 53 |
+
self.vocab_size = vocab_size
|
| 54 |
+
self.hidden_size = hidden_size
|
| 55 |
+
self.num_encoder_layers = num_encoder_layers
|
| 56 |
+
self.num_decoder_layers = num_decoder_layers
|
| 57 |
+
self.num_attention_heads = num_attention_heads
|
| 58 |
+
self.num_key_value_heads = num_key_value_heads
|
| 59 |
+
self.hidden_act = hidden_act
|
| 60 |
+
self.intermediate_size = intermediate_size
|
| 61 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 62 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 63 |
+
self.max_position_embeddings = max_position_embeddings
|
| 64 |
+
self.initializer_range = initializer_range
|
| 65 |
+
self.layer_norm_eps = layer_norm_eps
|
| 66 |
+
self.use_cache = use_cache
|
| 67 |
+
self.rotary_emb_base = rotary_emb_base
|
| 68 |
+
self.rotary_emb_scale_base = rotary_emb_scale_base
|
| 69 |
+
self.rotary_emb_interleaved = rotary_emb_interleaved
|
| 70 |
+
|
| 71 |
+
# Calculate head_dim and set rotary_emb_dim
|
| 72 |
+
self.head_dim = self.hidden_size // self.num_attention_heads
|
| 73 |
+
self.rotary_emb_dim = kwargs.get('rotary_emb_dim', self.head_dim // 2)
|
| 74 |
+
|
| 75 |
+
# Ensure rotary_emb_dim is not larger than head_dim
|
| 76 |
+
if self.rotary_emb_dim > self.head_dim:
|
| 77 |
+
print(f"Warning: rotary_emb_dim ({self.rotary_emb_dim}) is larger than head_dim ({self.head_dim}). Setting rotary_emb_dim to head_dim.")
|
| 78 |
+
self.rotary_emb_dim = self.head_dim
|
| 79 |
+
|
| 80 |
|
| 81 |
class CustomSeq2SeqLLM(PreTrainedModel):
|
| 82 |
config_class = Seq2SeqConfig
|