AlexAISandro commited on
Commit
ca731b9
·
verified ·
1 Parent(s): d890812

Add custom modeling code

Browse files
Files changed (1) hide show
  1. modeling_nebula.py +147 -0
modeling_nebula.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from transformers import PreTrainedModel, PretrainedConfig, GenerationMixin
6
+ from transformers.modeling_outputs import CausalLMOutputWithPast
7
+ from typing import Optional, Tuple, Dict, Any
8
+ import math
9
+
10
+ class NebulaConfig(PretrainedConfig):
11
+ model_type = "nebula"
12
+ def __init__(self, dim=1280, n_layers=14, n_heads=10, n_kv_heads=10, vocab_size=60729,
13
+ multiple_of=256, ffn_dim_multiplier=8/3, norm_eps=1e-5, max_seq_len=2048,
14
+ dropout=0.1, use_cache=True, **kwargs):
15
+ self.dim, self.n_layers, self.n_heads, self.n_kv_heads = dim, n_layers, n_heads, n_kv_heads
16
+ self.vocab_size, self.multiple_of, self.ffn_dim_multiplier = vocab_size, multiple_of, ffn_dim_multiplier
17
+ self.norm_eps, self.max_seq_len, self.dropout, self.use_cache = norm_eps, max_seq_len, dropout, use_cache
18
+ super().__init__(**kwargs)
19
+
20
+ class RMSNorm(nn.Module):
21
+ def __init__(self, dim: int, eps: float = 1e-6):
22
+ super().__init__()
23
+ self.eps = eps
24
+ self.weight = nn.Parameter(torch.ones(dim))
25
+ def _norm(self, x):
26
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
27
+ def forward(self, x):
28
+ return self._norm(x.float()).type_as(x) * self.weight
29
+
30
+ class RoPE(nn.Module):
31
+ def __init__(self, config: NebulaConfig):
32
+ super().__init__()
33
+ self.dim = config.dim // config.n_heads
34
+ self.max_seq_len = config.max_seq_len
35
+ self._build_cache(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
36
+ def _build_cache(self, device, base=10000):
37
+ theta = 1.0 / (base ** (torch.arange(0, self.dim, 2, device=device).float() / self.dim))
38
+ t = torch.arange(self.max_seq_len, device=device, dtype=theta.dtype)
39
+ freqs = torch.einsum("i,j->ij", t, theta)
40
+ self.register_buffer('cos_cached', freqs.cos(), persistent=False)
41
+ self.register_buffer('sin_cached', freqs.sin(), persistent=False)
42
+ def forward(self, x: torch.Tensor, start_pos: int = 0):
43
+ seq_len = x.shape[1]
44
+ cos = self.cos_cached[start_pos : start_pos + seq_len]
45
+ sin = self.sin_cached[start_pos : start_pos + seq_len]
46
+ cos = cos.unsqueeze(0).unsqueeze(2)
47
+ sin = sin.unsqueeze(0).unsqueeze(2)
48
+ x1 = x[..., : self.dim // 2]
49
+ x2 = x[..., self.dim // 2 :]
50
+ rotated_x1 = x1 * cos - x2 * sin
51
+ rotated_x2 = x1 * sin + x2 * cos
52
+ return torch.cat([rotated_x1, rotated_x2], dim=-1).type_as(x)
53
+
54
+ class SwiGLU(nn.Module):
55
+ def __init__(self, config: NebulaConfig):
56
+ super().__init__()
57
+ hidden_dim = int(config.dim * config.ffn_dim_multiplier)
58
+ hidden_dim = config.multiple_of * ((hidden_dim + config.multiple_of - 1) // config.multiple_of)
59
+ self.w1 = nn.Linear(config.dim, hidden_dim, bias=False)
60
+ self.w2 = nn.Linear(hidden_dim, config.dim, bias=False)
61
+ self.w3 = nn.Linear(config.dim, hidden_dim, bias=False)
62
+ def forward(self, x):
63
+ return self.w2(F.silu(self.w1(x)) * self.w3(x))
64
+
65
+ class Attention(nn.Module):
66
+ def __init__(self, config: NebulaConfig):
67
+ super().__init__()
68
+ self.config = config
69
+ self.n_heads = config.n_heads
70
+ self.n_kv_heads = config.n_kv_heads
71
+ self.head_dim = config.dim // config.n_heads
72
+ self.n_rep = self.n_heads // config.n_kv_heads
73
+ self.wq = nn.Linear(config.dim, self.n_heads * self.head_dim, bias=False)
74
+ self.wk = nn.Linear(config.dim, self.n_kv_heads * self.head_dim, bias=False)
75
+ self.wv = nn.Linear(config.dim, self.n_kv_heads * self.head_dim, bias=False)
76
+ self.wo = nn.Linear(self.n_heads * self.head_dim, config.dim, bias=False)
77
+ self.rope = RoPE(config)
78
+ def repeat_kv(self, x: torch.Tensor) -> torch.Tensor:
79
+ bs, seq_len_kv, n_kv_heads, head_dim = x.shape
80
+ if self.n_rep == 1: return x
81
+ return x.unsqueeze(3).expand(bs, seq_len_kv, n_kv_heads, self.n_rep, head_dim).reshape(bs, seq_len_kv, self.n_heads, head_dim)
82
+ def forward(self, x: torch.Tensor, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, attention_mask: Optional[torch.Tensor] = None):
83
+ bs, seq_len_q, _ = x.shape
84
+ start_pos = past_key_values[0].shape[2] if past_key_values is not None else 0
85
+ xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
86
+ xq = xq.view(bs, seq_len_q, self.n_heads, self.head_dim).transpose(1, 2)
87
+ xk = xk.view(bs, seq_len_q, self.n_kv_heads, self.head_dim).transpose(1, 2)
88
+ xv = xv.view(bs, seq_len_q, self.n_kv_heads, self.head_dim).transpose(1, 2)
89
+ xq = self.rope(xq, start_pos=start_pos)
90
+ xk = self.rope(xk, start_pos=start_pos)
91
+ if past_key_values is not None:
92
+ past_k, past_v = past_key_values
93
+ xk = torch.cat([past_k, xk], dim=2)
94
+ xv = torch.cat([past_v, xv], dim=2)
95
+ present_key_values = (xk, xv) if use_cache else None
96
+ xk_rep, xv_rep = self.repeat_kv(xk), self.repeat_kv(xv)
97
+ is_causal = False if use_cache and past_key_values is not None else True
98
+ output = F.scaled_dot_product_attention(xq, xk_rep, xv_rep, attn_mask=attention_mask, is_causal=is_causal)
99
+ output = output.transpose(1, 2).contiguous().view(bs, seq_len_q, -1)
100
+ return self.wo(output), present_key_values
101
+
102
+ class DecoderBlock(nn.Module):
103
+ def __init__(self, config: NebulaConfig):
104
+ super().__init__()
105
+ self.attention = Attention(config)
106
+ self.feed_forward = SwiGLU(config)
107
+ self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
108
+ self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
109
+ self.dropout = nn.Dropout(config.dropout)
110
+ self.attention.wo.is_residual_output = True
111
+ self.feed_forward.w2.is_residual_output = True
112
+ def forward(self, x: torch.Tensor, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, attention_mask: Optional[torch.Tensor] = None):
113
+ attn_out, present_kv = self.attention(self.attention_norm(x), past_key_values=past_key_values, use_cache=use_cache, attention_mask=attention_mask)
114
+ h = x + self.dropout(attn_out)
115
+ ff_out = self.feed_forward(self.ffn_norm(h))
116
+ out = h + self.dropout(ff_out)
117
+ return out, present_kv
118
+
119
+ class NebulaForCausalLM(PreTrainedModel, GenerationMixin):
120
+ config_class = NebulaConfig
121
+ def __init__(self, config: NebulaConfig):
122
+ super().__init__(config)
123
+ self.model = nn.ModuleDict({"tok_embeddings": nn.Embedding(config.vocab_size, config.dim),
124
+ "layers": nn.ModuleList([DecoderBlock(config) for _ in range(config.n_layers)]),
125
+ "norm": RMSNorm(config.dim, eps=config.norm_eps),
126
+ "output": nn.Linear(config.dim, config.vocab_size, bias=False)})
127
+ self.dropout = nn.Dropout(config.dropout)
128
+ self.model.tok_embeddings.weight = self.model.output.weight
129
+ self.post_init()
130
+ def _init_weights(self, module):
131
+ if isinstance(module, (nn.Linear, nn.Embedding)): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
132
+ if hasattr(module, 'is_residual_output'): torch.nn.init.normal_(module.weight, mean=0.0, std=(0.02 / math.sqrt(2 * self.config.n_layers)))
133
+ def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list] = None, use_cache: Optional[bool] = None, labels: Optional[torch.Tensor] = None, **kwargs) -> CausalLMOutputWithPast:
134
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
135
+ x = self.dropout(self.model.tok_embeddings(input_ids))
136
+ present_key_values = [] if use_cache else None
137
+ for i, layer in enumerate(self.model.layers):
138
+ past_kv = past_key_values[i] if past_key_values is not None else None
139
+ x, present_kv = layer(x, past_key_values=past_kv, use_cache=use_cache, attention_mask=attention_mask)
140
+ if use_cache and present_key_values is not None: present_key_values.append(present_kv)
141
+ logits = self.model.output(self.model.norm(x))
142
+ loss = None
143
+ if labels is not None: loss = nn.CrossEntropyLoss()(logits.view(-1, self.config.vocab_size), labels.view(-1))
144
+ return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=tuple(present_key_values) if present_key_values else None)
145
+ def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[list] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs) -> Dict[str, Any]:
146
+ if past_key_values: input_ids = input_ids[:, -1:]
147
+ return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache", True), "attention_mask": attention_mask}