Fix KV cache handling in Attention layer
Browse files- modeling_nebula.py +28 -20
modeling_nebula.py
CHANGED
|
@@ -12,9 +12,17 @@ class NebulaConfig(PretrainedConfig):
|
|
| 12 |
def __init__(self, dim=1280, n_layers=14, n_heads=10, n_kv_heads=10, vocab_size=60729,
|
| 13 |
multiple_of=256, ffn_dim_multiplier=8/3, norm_eps=1e-5, max_seq_len=2048,
|
| 14 |
dropout=0.1, use_cache=True, **kwargs):
|
| 15 |
-
self.dim
|
| 16 |
-
self.
|
| 17 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
super().__init__(**kwargs)
|
| 19 |
|
| 20 |
class RMSNorm(nn.Module):
|
|
@@ -40,11 +48,9 @@ class RoPE(nn.Module):
|
|
| 40 |
self.register_buffer('cos_cached', freqs.cos(), persistent=False)
|
| 41 |
self.register_buffer('sin_cached', freqs.sin(), persistent=False)
|
| 42 |
def forward(self, x: torch.Tensor, start_pos: int = 0):
|
| 43 |
-
seq_len = x.shape[
|
| 44 |
cos = self.cos_cached[start_pos : start_pos + seq_len]
|
| 45 |
sin = self.sin_cached[start_pos : start_pos + seq_len]
|
| 46 |
-
cos = cos.unsqueeze(0).unsqueeze(2)
|
| 47 |
-
sin = sin.unsqueeze(0).unsqueeze(2)
|
| 48 |
x1 = x[..., : self.dim // 2]
|
| 49 |
x2 = x[..., self.dim // 2 :]
|
| 50 |
rotated_x1 = x1 * cos - x2 * sin
|
|
@@ -75,10 +81,10 @@ class Attention(nn.Module):
|
|
| 75 |
self.wv = nn.Linear(config.dim, self.n_kv_heads * self.head_dim, bias=False)
|
| 76 |
self.wo = nn.Linear(self.n_heads * self.head_dim, config.dim, bias=False)
|
| 77 |
self.rope = RoPE(config)
|
| 78 |
-
def repeat_kv(self, x: torch.Tensor) -> torch.Tensor:
|
| 79 |
-
bs,
|
| 80 |
-
if
|
| 81 |
-
return x.unsqueeze(3).expand(bs,
|
| 82 |
def forward(self, x: torch.Tensor, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, attention_mask: Optional[torch.Tensor] = None):
|
| 83 |
bs, seq_len_q, _ = x.shape
|
| 84 |
start_pos = past_key_values[0].shape[2] if past_key_values is not None else 0
|
|
@@ -93,9 +99,8 @@ class Attention(nn.Module):
|
|
| 93 |
xk = torch.cat([past_k, xk], dim=2)
|
| 94 |
xv = torch.cat([past_v, xv], dim=2)
|
| 95 |
present_key_values = (xk, xv) if use_cache else None
|
| 96 |
-
xk_rep, xv_rep = self.repeat_kv(xk), self.repeat_kv(xv)
|
| 97 |
-
|
| 98 |
-
output = F.scaled_dot_product_attention(xq, xk_rep, xv_rep, attn_mask=attention_mask, is_causal=is_causal)
|
| 99 |
output = output.transpose(1, 2).contiguous().view(bs, seq_len_q, -1)
|
| 100 |
return self.wo(output), present_key_values
|
| 101 |
|
|
@@ -130,18 +135,21 @@ class NebulaForCausalLM(PreTrainedModel, GenerationMixin):
|
|
| 130 |
def _init_weights(self, module):
|
| 131 |
if isinstance(module, (nn.Linear, nn.Embedding)): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
| 132 |
if hasattr(module, 'is_residual_output'): torch.nn.init.normal_(module.weight, mean=0.0, std=(0.02 / math.sqrt(2 * self.config.n_layers)))
|
| 133 |
-
def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[
|
| 134 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 135 |
x = self.dropout(self.model.tok_embeddings(input_ids))
|
| 136 |
-
|
| 137 |
for i, layer in enumerate(self.model.layers):
|
| 138 |
past_kv = past_key_values[i] if past_key_values is not None else None
|
| 139 |
x, present_kv = layer(x, past_key_values=past_kv, use_cache=use_cache, attention_mask=attention_mask)
|
| 140 |
-
if use_cache and
|
|
|
|
| 141 |
logits = self.model.output(self.model.norm(x))
|
| 142 |
loss = None
|
| 143 |
-
if labels is not None:
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
| 147 |
return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache", True), "attention_mask": attention_mask}
|
|
|
|
| 12 |
def __init__(self, dim=1280, n_layers=14, n_heads=10, n_kv_heads=10, vocab_size=60729,
|
| 13 |
multiple_of=256, ffn_dim_multiplier=8/3, norm_eps=1e-5, max_seq_len=2048,
|
| 14 |
dropout=0.1, use_cache=True, **kwargs):
|
| 15 |
+
self.dim = dim
|
| 16 |
+
self.n_layers = n_layers
|
| 17 |
+
self.n_heads = n_heads
|
| 18 |
+
self.n_kv_heads = n_kv_heads
|
| 19 |
+
self.vocab_size = vocab_size
|
| 20 |
+
self.multiple_of = multiple_of
|
| 21 |
+
self.ffn_dim_multiplier = ffn_dim_multiplier
|
| 22 |
+
self.norm_eps = norm_eps
|
| 23 |
+
self.max_seq_len = max_seq_len
|
| 24 |
+
self.dropout = dropout
|
| 25 |
+
self.use_cache = use_cache
|
| 26 |
super().__init__(**kwargs)
|
| 27 |
|
| 28 |
class RMSNorm(nn.Module):
|
|
|
|
| 48 |
self.register_buffer('cos_cached', freqs.cos(), persistent=False)
|
| 49 |
self.register_buffer('sin_cached', freqs.sin(), persistent=False)
|
| 50 |
def forward(self, x: torch.Tensor, start_pos: int = 0):
|
| 51 |
+
seq_len = x.shape[-2] # Use -2 for sequence length dimension
|
| 52 |
cos = self.cos_cached[start_pos : start_pos + seq_len]
|
| 53 |
sin = self.sin_cached[start_pos : start_pos + seq_len]
|
|
|
|
|
|
|
| 54 |
x1 = x[..., : self.dim // 2]
|
| 55 |
x2 = x[..., self.dim // 2 :]
|
| 56 |
rotated_x1 = x1 * cos - x2 * sin
|
|
|
|
| 81 |
self.wv = nn.Linear(config.dim, self.n_kv_heads * self.head_dim, bias=False)
|
| 82 |
self.wo = nn.Linear(self.n_heads * self.head_dim, config.dim, bias=False)
|
| 83 |
self.rope = RoPE(config)
|
| 84 |
+
def repeat_kv(self, x: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 85 |
+
bs, n_kv_heads, seq_len, head_dim = x.shape
|
| 86 |
+
if n_rep == 1: return x
|
| 87 |
+
return x.unsqueeze(3).expand(bs, n_kv_heads, seq_len, n_rep, head_dim).reshape(bs, self.n_heads, seq_len, head_dim)
|
| 88 |
def forward(self, x: torch.Tensor, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, attention_mask: Optional[torch.Tensor] = None):
|
| 89 |
bs, seq_len_q, _ = x.shape
|
| 90 |
start_pos = past_key_values[0].shape[2] if past_key_values is not None else 0
|
|
|
|
| 99 |
xk = torch.cat([past_k, xk], dim=2)
|
| 100 |
xv = torch.cat([past_v, xv], dim=2)
|
| 101 |
present_key_values = (xk, xv) if use_cache else None
|
| 102 |
+
xk_rep, xv_rep = self.repeat_kv(xk, self.n_rep), self.repeat_kv(xv, self.n_rep)
|
| 103 |
+
output = F.scaled_dot_product_attention(xq, xk_rep, xv_rep, attn_mask=attention_mask)
|
|
|
|
| 104 |
output = output.transpose(1, 2).contiguous().view(bs, seq_len_q, -1)
|
| 105 |
return self.wo(output), present_key_values
|
| 106 |
|
|
|
|
| 135 |
def _init_weights(self, module):
|
| 136 |
if isinstance(module, (nn.Linear, nn.Embedding)): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
| 137 |
if hasattr(module, 'is_residual_output'): torch.nn.init.normal_(module.weight, mean=0.0, std=(0.02 / math.sqrt(2 * self.config.n_layers)))
|
| 138 |
+
def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, use_cache: Optional[bool] = None, labels: Optional[torch.Tensor] = None, **kwargs) -> CausalLMOutputWithPast:
|
| 139 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 140 |
x = self.dropout(self.model.tok_embeddings(input_ids))
|
| 141 |
+
present_key_values_list = [] if use_cache else None
|
| 142 |
for i, layer in enumerate(self.model.layers):
|
| 143 |
past_kv = past_key_values[i] if past_key_values is not None else None
|
| 144 |
x, present_kv = layer(x, past_key_values=past_kv, use_cache=use_cache, attention_mask=attention_mask)
|
| 145 |
+
if use_cache and present_key_values_list is not None:
|
| 146 |
+
present_key_values_list.append(present_kv)
|
| 147 |
logits = self.model.output(self.model.norm(x))
|
| 148 |
loss = None
|
| 149 |
+
if labels is not None:
|
| 150 |
+
loss = nn.CrossEntropyLoss()(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 151 |
+
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=tuple(present_key_values_list) if present_key_values_list else None)
|
| 152 |
+
def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs) -> Dict[str, Any]:
|
| 153 |
+
if past_key_values:
|
| 154 |
+
input_ids = input_ids[:, -1:]
|
| 155 |
return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache", True), "attention_mask": attention_mask}
|