Upload Llama2.py
Browse files
Llama2.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
from tensorflow.keras.layers import Dense,Dropout
|
| 3 |
+
from tensorflow.keras.initializers import RandomNormal
|
| 4 |
+
from tensorflow.keras.regularizers import L2
|
| 5 |
+
from tensorflow.keras import Model
|
| 6 |
+
import math
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class ModelArgs:
|
| 13 |
+
# default hyperparameters for the Llama 7B model
|
| 14 |
+
dim: int = 4096
|
| 15 |
+
n_layers: int = 32
|
| 16 |
+
n_heads: int = 32
|
| 17 |
+
n_kv_heads: Optional[int] = None
|
| 18 |
+
vocab_size: int = 32000
|
| 19 |
+
hidden_dim: Optional[int] = None
|
| 20 |
+
multiple_of: int = 256 # MLP hidden layer size will be multiple of
|
| 21 |
+
norm_eps: float = 1e-5
|
| 22 |
+
max_seq_len: int = 2048
|
| 23 |
+
dropout: float = 0.0
|
| 24 |
+
weight_decay: float = 0.1
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class RMSNorm:
|
| 28 |
+
def __init__(self, dim: int, eps: float):
|
| 29 |
+
self.eps = eps
|
| 30 |
+
self.weight = tf.Variable(tf.ones((dim,)))
|
| 31 |
+
|
| 32 |
+
def _norm(self, x):
|
| 33 |
+
return x * tf.math.rsqrt(tf.reduce_mean(tf.math.pow(x, 2), -1, keepdims=True) + self.eps)
|
| 34 |
+
|
| 35 |
+
def __call__(self, x):
|
| 36 |
+
output = tf.cast(self._norm(tf.cast(x, 'float32')), x.dtype)
|
| 37 |
+
return output * self.weight
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
|
| 41 |
+
freqs = 1.0 / (theta ** (tf.cast(tf.range(0, dim, 2)[: (dim // 2)], 'float32') / dim))
|
| 42 |
+
t = tf.range(end) # type: ignore
|
| 43 |
+
freqs = tf.cast(tf.experimental.numpy.outer(t, freqs), 'float32') # type: ignore
|
| 44 |
+
freqs_cos = tf.math.cos(freqs) # real part
|
| 45 |
+
freqs_sin = tf.math.sin(freqs) # imaginary part
|
| 46 |
+
return freqs_cos, freqs_sin
|
| 47 |
+
|
| 48 |
+
def reshape_for_broadcast(freqs_cis, x):
|
| 49 |
+
ndim = x.ndim
|
| 50 |
+
assert 0 <= 1 < ndim
|
| 51 |
+
assert freqs_cis.shape == (x.shape[1], x.shape[-1])
|
| 52 |
+
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
|
| 53 |
+
return tf.reshape(freqs_cis, shape)
|
| 54 |
+
|
| 55 |
+
def apply_rotary_emb(
|
| 56 |
+
xq,
|
| 57 |
+
xk,
|
| 58 |
+
freqs_cos,
|
| 59 |
+
freqs_sin
|
| 60 |
+
):
|
| 61 |
+
|
| 62 |
+
# reshape xq and xk to match the complex representation
|
| 63 |
+
xq_r, xq_i = tf.unstack(tf.reshape(tf.cast(xq, 'float32'), (xq.shape[:-1] + (xq.shape[-1] // 2, 2))), axis=-1)
|
| 64 |
+
xk_r, xk_i = tf.unstack(tf.reshape(tf.cast(xk, 'float32'), (xk.shape[:-1] + (xk.shape[-1] // 2, 2))), axis=-1)
|
| 65 |
+
|
| 66 |
+
# reshape freqs_cos and freqs_sin for broadcasting
|
| 67 |
+
freqs_cos = reshape_for_broadcast(freqs_cos, xq_r)
|
| 68 |
+
freqs_sin = reshape_for_broadcast(freqs_sin, xq_r)
|
| 69 |
+
|
| 70 |
+
# apply rotation using real numbers
|
| 71 |
+
xq_out_r = xq_r * freqs_cos - xq_i * freqs_sin
|
| 72 |
+
xq_out_i = xq_r * freqs_sin + xq_i * freqs_cos
|
| 73 |
+
xk_out_r = xk_r * freqs_cos - xk_i * freqs_sin
|
| 74 |
+
xk_out_i = xk_r * freqs_sin + xk_i * freqs_cos
|
| 75 |
+
|
| 76 |
+
# flatten last two dimensions
|
| 77 |
+
xq_out = tf.stack([xq_out_r, xq_out_i], axis=-1)
|
| 78 |
+
shape = xq_out.shape
|
| 79 |
+
xq_out = tf.reshape(xq_out, [-1, shape[1], shape[2], shape[3] * shape[4]])
|
| 80 |
+
xk_out = tf.stack([xk_out_r, xk_out_i], axis=-1)
|
| 81 |
+
shape = xk_out.shape
|
| 82 |
+
xk_out = tf.reshape(xk_out, [-1, shape[1], shape[2], shape[3] * shape[4]])
|
| 83 |
+
|
| 84 |
+
return tf.cast(xq_out, xq.dtype), tf.cast(xk_out, xk.dtype)
|
| 85 |
+
|
| 86 |
+
def repeat_kv(x, n_rep: int):
|
| 87 |
+
bs, slen, n_kv_heads, head_dim = x.shape
|
| 88 |
+
if n_rep == 1:
|
| 89 |
+
return x
|
| 90 |
+
return tf.reshape(tf.tile(x[:, :, :, None, :], [1, 1, 1, n_rep, 1]), (bs, slen, n_kv_heads * n_rep, head_dim))
|
| 91 |
+
|
| 92 |
+
class Attention:
|
| 93 |
+
def __init__(self, args: ModelArgs):
|
| 94 |
+
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
|
| 95 |
+
assert args.n_heads % self.n_kv_heads == 0
|
| 96 |
+
model_parallel_size = 1
|
| 97 |
+
self.n_local_heads = args.n_heads // model_parallel_size
|
| 98 |
+
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
|
| 99 |
+
self.n_rep = self.n_local_heads // self.n_local_kv_heads
|
| 100 |
+
self.head_dim = args.dim // args.n_heads
|
| 101 |
+
self.wq = Dense(args.n_heads * self.head_dim, kernel_initializer=RandomNormal(stddev=0.02),
|
| 102 |
+
kernel_regularizer=L2(args.weight_decay), use_bias=False)
|
| 103 |
+
self.wk = Dense(self.n_kv_heads * self.head_dim, kernel_initializer=RandomNormal(stddev=0.02),
|
| 104 |
+
kernel_regularizer=L2(args.weight_decay), use_bias=False)
|
| 105 |
+
self.wv = Dense(self.n_kv_heads * self.head_dim, kernel_initializer=RandomNormal(stddev=0.02),
|
| 106 |
+
kernel_regularizer=L2(args.weight_decay), use_bias=False)
|
| 107 |
+
self.wo = Dense(args.dim, kernel_initializer=RandomNormal(stddev=0.02/math.sqrt(2 * args.n_layers)),
|
| 108 |
+
kernel_regularizer=L2(args.weight_decay), use_bias=False)
|
| 109 |
+
self.attn_dropout = Dropout(args.dropout)
|
| 110 |
+
self.resid_dropout = Dropout(args.dropout)
|
| 111 |
+
self.mask = tf.fill((args.max_seq_len, args.max_seq_len), float("-inf"))
|
| 112 |
+
self.mask = tf.linalg.band_part(self.mask, 0, -1)
|
| 113 |
+
self.mask = tf.linalg.set_diag(self.mask, tf.zeros(args.max_seq_len))
|
| 114 |
+
self.mask = tf.reshape(self.mask, (1, 1, *self.mask.shape))
|
| 115 |
+
|
| 116 |
+
def __call__(
|
| 117 |
+
self,
|
| 118 |
+
x,
|
| 119 |
+
freqs_cos,
|
| 120 |
+
freqs_sin,
|
| 121 |
+
):
|
| 122 |
+
bsz, seqlen, _ = x.shape
|
| 123 |
+
|
| 124 |
+
# QKV
|
| 125 |
+
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
|
| 126 |
+
xq = tf.reshape(xq, (bsz, seqlen, self.n_local_heads, self.head_dim))
|
| 127 |
+
xk = tf.reshape(xk, (bsz, seqlen, self.n_local_kv_heads, self.head_dim))
|
| 128 |
+
xv = tf.reshape(xv, (bsz, seqlen, self.n_local_kv_heads, self.head_dim))
|
| 129 |
+
|
| 130 |
+
# RoPE relative positional embeddings
|
| 131 |
+
xq, xk = apply_rotary_emb(xq, xk, freqs_cos, freqs_sin)
|
| 132 |
+
|
| 133 |
+
# grouped multiquery attention: expand out keys and values
|
| 134 |
+
xk = repeat_kv(xk, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
|
| 135 |
+
xv = repeat_kv(xv, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
|
| 136 |
+
|
| 137 |
+
# make heads into a batch dimension
|
| 138 |
+
xq = tf.transpose(xq, (0, 2, 1, 3)) # (bs, n_local_heads, seqlen, head_dim)
|
| 139 |
+
xk = tf.transpose(xk, (0, 2, 1, 3))
|
| 140 |
+
xv = tf.transpose(xv, (0, 2, 1, 3))
|
| 141 |
+
|
| 142 |
+
scores = tf.matmul(xq, tf.transpose(xk, (0, 1, 3, 2))) / math.sqrt(self.head_dim)
|
| 143 |
+
assert hasattr(self, 'mask')
|
| 144 |
+
scores = scores + self.mask[:, :, :seqlen, :seqlen] # (bs, n_local_heads, seqlen, cache_len + seqlen)
|
| 145 |
+
scores = tf.cast(tf.nn.softmax(tf.cast(scores, 'float32'), axis=-1), xq.dtype)
|
| 146 |
+
scores = self.attn_dropout(scores)
|
| 147 |
+
output = tf.matmul(scores, xv) # (bs, n_local_heads, seqlen, head_dim)
|
| 148 |
+
|
| 149 |
+
# restore time as batch dimension and concat heads
|
| 150 |
+
output = tf.reshape(tf.transpose(output, (0, 2, 1, 3)), (bsz, seqlen, -1))
|
| 151 |
+
|
| 152 |
+
# final projection into the residual stream
|
| 153 |
+
output = self.wo(output)
|
| 154 |
+
output = self.resid_dropout(output)
|
| 155 |
+
return output
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class FeedForward:
|
| 159 |
+
def __init__(self, dim: int, hidden_dim: int, multiple_of: int, drop_rate: float):
|
| 160 |
+
if hidden_dim is None:
|
| 161 |
+
hidden_dim = 4 * dim
|
| 162 |
+
hidden_dim = int(2 * hidden_dim / 3)
|
| 163 |
+
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
|
| 164 |
+
self.w1 = Dense(hidden_dim, kernel_initializer=RandomNormal(stddev=0.02),
|
| 165 |
+
kernel_regularizer=L2(ModelArgs.weight_decay), use_bias=False)
|
| 166 |
+
self.w2 = Dense(dim, kernel_initializer=RandomNormal(stddev=0.02),
|
| 167 |
+
kernel_regularizer=L2(ModelArgs.weight_decay), use_bias=False)
|
| 168 |
+
self.w3 = Dense(hidden_dim, kernel_initializer=RandomNormal(stddev=0.02/math.sqrt(2 * ModelArgs.n_layers)),
|
| 169 |
+
kernel_regularizer=L2(ModelArgs.weight_decay), use_bias=False)
|
| 170 |
+
self.dropout = Dropout(drop_rate)
|
| 171 |
+
|
| 172 |
+
def __call__(self, x):
|
| 173 |
+
return self.dropout(self.w2(tf.nn.silu(self.w1(x)) * self.w3(x)))
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class TransformerBlock:
|
| 177 |
+
def __init__(self, layer_id: int, args: ModelArgs):
|
| 178 |
+
self.n_heads = args.n_heads
|
| 179 |
+
self.dim = args.dim
|
| 180 |
+
self.head_dim = args.dim // args.n_heads
|
| 181 |
+
self.attention = Attention(args)
|
| 182 |
+
self.feed_forward = FeedForward(
|
| 183 |
+
dim=args.dim,
|
| 184 |
+
hidden_dim=args.hidden_dim,
|
| 185 |
+
multiple_of=args.multiple_of,
|
| 186 |
+
drop_rate=args.dropout,
|
| 187 |
+
)
|
| 188 |
+
self.layer_id = layer_id
|
| 189 |
+
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
|
| 190 |
+
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
|
| 191 |
+
|
| 192 |
+
def __call__(self, x, freqs_cos, freqs_sin):
|
| 193 |
+
h = x + self.attention(self.attention_norm(x), freqs_cos, freqs_sin)
|
| 194 |
+
out = h + self.feed_forward(self.ffn_norm(h))
|
| 195 |
+
return out
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class Llama2(Model):
|
| 199 |
+
def __init__(self, params: ModelArgs):
|
| 200 |
+
super(Llama2, self).__init__()
|
| 201 |
+
self.params = params
|
| 202 |
+
self.vocab_size = params.vocab_size
|
| 203 |
+
self.n_layers = params.n_layers
|
| 204 |
+
|
| 205 |
+
self.dropout = Dropout(params.dropout)
|
| 206 |
+
self.layers = []
|
| 207 |
+
for layer_id in range(params.n_layers):
|
| 208 |
+
self.layers.append(TransformerBlock(layer_id, params))
|
| 209 |
+
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
|
| 210 |
+
self.output = Dense(params.vocab_size, kernel_initializer=RandomNormal(stddev=0.02),
|
| 211 |
+
kernel_regularizer=L2(params.weight_decay), use_bias=False)
|
| 212 |
+
|
| 213 |
+
# some useful precompute for the RoPE relative positional embeddings
|
| 214 |
+
self.freqs_cos, self.freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len)
|
| 215 |
+
|
| 216 |
+
def __call__(self, tokens):
|
| 217 |
+
_bsz, seqlen = tokens.shape
|
| 218 |
+
h = tf.gather(tf.transpose(self.output.weight), tokens)
|
| 219 |
+
h = self.dropout(h)
|
| 220 |
+
freqs_cos = self.freqs_cos[:seqlen]
|
| 221 |
+
freqs_sin = self.freqs_sin[:seqlen]
|
| 222 |
+
|
| 223 |
+
for layer in self.layers:
|
| 224 |
+
h = layer(h, freqs_cos, freqs_sin)
|
| 225 |
+
h = self.norm(h)
|
| 226 |
+
|
| 227 |
+
if self.training:
|
| 228 |
+
# if we are given some desired targets also calculate the loss
|
| 229 |
+
logits = self.output(h)
|
| 230 |
+
else:
|
| 231 |
+
# inference-time mini-optimization: only forward the output on the very last position
|
| 232 |
+
logits = self.output(h[:, [-1], :]) # note: using list [-1] to preserve the time dim
|
| 233 |
+
|
| 234 |
+
return logits
|
| 235 |
+
|
| 236 |
+
def estimate_mfu(self, fwdbwd_per_iter, dt):
|
| 237 |
+
""" estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
|
| 238 |
+
# first estimate the number of flops we do per iteration.
|
| 239 |
+
# see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
|
| 240 |
+
N = sum(p.numel() for p in self.parameters())
|
| 241 |
+
cfg = self.params
|
| 242 |
+
L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim//cfg.n_heads, cfg.max_seq_len
|
| 243 |
+
flops_per_token = 6*N + 12*L*H*Q*T
|
| 244 |
+
flops_per_fwdbwd = flops_per_token * T
|
| 245 |
+
flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
|
| 246 |
+
# express our flops throughput as ratio of A100 bfloat16 peak flops
|
| 247 |
+
flops_achieved = flops_per_iter * (1.0/dt) # per second
|
| 248 |
+
flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
|
| 249 |
+
mfu = flops_achieved / flops_promised
|
| 250 |
+
return mfu
|
| 251 |
+
|
| 252 |
+
def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
|
| 253 |
+
"""
|
| 254 |
+
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
|
| 255 |
+
the sequence max_new_tokens times, feeding the predictions back into the model each time.
|
| 256 |
+
Most likely you'll want to make sure to be in model.eval() mode of operation for this.
|
| 257 |
+
Also note this is a super inefficient version of sampling with no key/value cache.
|
| 258 |
+
"""
|
| 259 |
+
for _ in range(max_new_tokens):
|
| 260 |
+
# if the sequence context is growing too long we must crop it at block_size
|
| 261 |
+
idx_cond = idx if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]
|
| 262 |
+
# forward the model to get the logits for the index in the sequence
|
| 263 |
+
logits = self(idx_cond)
|
| 264 |
+
logits = logits[:, -1, :] # crop to just the final time step
|
| 265 |
+
if temperature == 0.0:
|
| 266 |
+
# "sample" the single most likely index
|
| 267 |
+
idx_next = tf.math.argmax(logits, axis=-1)
|
| 268 |
+
else:
|
| 269 |
+
# pluck the logits at the final step and scale by desired temperature
|
| 270 |
+
logits = logits / temperature
|
| 271 |
+
# optionally crop the logits to only the top k options
|
| 272 |
+
if top_k is not None:
|
| 273 |
+
k = tf.minimum(top_k, logits.shape[-1])
|
| 274 |
+
v, _ = tf.math.top_k(logits, k=k, sorted=True)
|
| 275 |
+
logits[logits < v[:, [-1]]] = -float('Inf')
|
| 276 |
+
# apply softmax to convert logits to (normalized) probabilities
|
| 277 |
+
probs = tf.nn.softmax(logits, dim=-1)
|
| 278 |
+
idx_next = tf.random.categorical(tf.math.log(probs), num_samples=1)
|
| 279 |
+
# append sampled index to the running sequence and continue
|
| 280 |
+
idx = tf.concat((idx, idx_next), axis=1)
|
| 281 |
+
|
| 282 |
+
return idx
|