Upload folder using huggingface_hub
Browse files- __init__.py +1 -0
- config.json +27 -0
- configuration_qmoe.py +21 -0
- generation_config.json +8 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- modeling_qmoe.py +183 -0
- special_tokens_map.json +6 -0
- tokenizer.json +0 -0
- tokenizer_config.json +22 -0
- vocab.json +0 -0
__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"QMoEForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_qmoe.QMoEConfig",
|
| 7 |
+
"AutoModel": "modeling_qmoe.QMoEForCausalLM",
|
| 8 |
+
"AutoModelForCausalLM": "modeling_qmoe.QMoEForCausalLM"
|
| 9 |
+
},
|
| 10 |
+
"bos_token_id": 50256,
|
| 11 |
+
"d_model": 768,
|
| 12 |
+
"dtype": "float32",
|
| 13 |
+
"eos_token_id": 50256,
|
| 14 |
+
"ffn_dim": 2048,
|
| 15 |
+
"is_decoder": true,
|
| 16 |
+
"max_seq_len": 512,
|
| 17 |
+
"model_type": "qmoe",
|
| 18 |
+
"moe_top_k": 2,
|
| 19 |
+
"num_experts": 8,
|
| 20 |
+
"num_heads": 16,
|
| 21 |
+
"num_layers": 12,
|
| 22 |
+
"pad_token_id": 50256,
|
| 23 |
+
"tie_word_embeddings": false,
|
| 24 |
+
"transformers_version": "4.57.3",
|
| 25 |
+
"use_cache": false,
|
| 26 |
+
"vocab_size": 50257
|
| 27 |
+
}
|
configuration_qmoe.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import PretrainedConfig
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class QMoEConfig(PretrainedConfig):
|
| 5 |
+
model_type = 'qmoe'
|
| 6 |
+
|
| 7 |
+
def __init__(self, vocab_size=50257, d_model=768, num_layers=12, num_heads=16, max_seq_len=512, num_experts=8, moe_top_k=2, ffn_dim=2048, **kwargs):
|
| 8 |
+
super().__init__(**kwargs)
|
| 9 |
+
self.vocab_size = vocab_size
|
| 10 |
+
self.d_model = d_model
|
| 11 |
+
self.num_layers = num_layers
|
| 12 |
+
self.num_heads = num_heads
|
| 13 |
+
self.max_seq_len = max_seq_len
|
| 14 |
+
self.num_experts = num_experts
|
| 15 |
+
self.moe_top_k = moe_top_k
|
| 16 |
+
self.ffn_dim = ffn_dim
|
| 17 |
+
self.is_decoder = True
|
| 18 |
+
self.add_cross_attention = False
|
| 19 |
+
self.use_cache = False
|
| 20 |
+
self.tie_word_embeddings = False
|
| 21 |
+
|
generation_config.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 50256,
|
| 4 |
+
"eos_token_id": 50256,
|
| 5 |
+
"pad_token_id": 50256,
|
| 6 |
+
"transformers_version": "4.57.3",
|
| 7 |
+
"use_cache": false
|
| 8 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0233e34c8a3d454ed6bf3c1f4bb4ee461611a7c1f36be1e7bd3b2d4dfa0b013
|
| 3 |
+
size 1633024192
|
modeling_qmoe.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Tuple
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
from transformers import GenerationMixin, PreTrainedModel
|
| 7 |
+
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
|
| 8 |
+
|
| 9 |
+
from .configuration_qmoe import QMoEConfig
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class RMSNorm(nn.Module):
|
| 13 |
+
def __init__(self, d_model: int, eps: float = 1e-6):
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.eps = eps
|
| 16 |
+
self.scale = nn.Parameter(torch.ones(d_model, dtype=torch.float32))
|
| 17 |
+
|
| 18 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 19 |
+
rms = x.pow(2).mean(dim=-1, keepdim=True).add(self.eps).sqrt()
|
| 20 |
+
return (x / rms) * self.scale
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class DenseNoBias(nn.Module):
|
| 24 |
+
def __init__(self, in_features: int, out_features: int):
|
| 25 |
+
super().__init__()
|
| 26 |
+
self.kernel = nn.Parameter(torch.empty(in_features, out_features, dtype=torch.float32))
|
| 27 |
+
nn.init.normal_(self.kernel, std=0.02)
|
| 28 |
+
|
| 29 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 30 |
+
return x @ self.kernel
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def causal_mask(t: int, *, device: torch.device) -> torch.Tensor:
|
| 34 |
+
return torch.tril(torch.ones((t, t), dtype=torch.bool, device=device))
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class MultiHeadAttention(nn.Module):
|
| 38 |
+
def __init__(self, d_model: int, num_heads: int):
|
| 39 |
+
super().__init__()
|
| 40 |
+
if d_model % num_heads != 0:
|
| 41 |
+
raise ValueError('d_model must be divisible by num_heads')
|
| 42 |
+
self.d_model = d_model
|
| 43 |
+
self.num_heads = num_heads
|
| 44 |
+
self.head_dim = d_model // num_heads
|
| 45 |
+
self.q_proj = DenseNoBias(d_model, d_model)
|
| 46 |
+
self.k_proj = DenseNoBias(d_model, d_model)
|
| 47 |
+
self.v_proj = DenseNoBias(d_model, d_model)
|
| 48 |
+
self.out_proj = DenseNoBias(d_model, d_model)
|
| 49 |
+
|
| 50 |
+
def forward(self, x: torch.Tensor, *, attn_mask: torch.Tensor) -> torch.Tensor:
|
| 51 |
+
b, t, d = x.shape
|
| 52 |
+
q = self.q_proj(x).view(b, t, self.num_heads, self.head_dim)
|
| 53 |
+
k = self.k_proj(x).view(b, t, self.num_heads, self.head_dim)
|
| 54 |
+
v = self.v_proj(x).view(b, t, self.num_heads, self.head_dim)
|
| 55 |
+
scale = 1.0 / math.sqrt(self.head_dim)
|
| 56 |
+
att = torch.einsum('bthd,bshd->bhts', q, k) * scale
|
| 57 |
+
att = att.masked_fill(~attn_mask.view(1, 1, t, t), -1e30)
|
| 58 |
+
att = torch.softmax(att, dim=-1)
|
| 59 |
+
out = torch.einsum('bhts,bshd->bthd', att, v).contiguous()
|
| 60 |
+
out = out.view(b, t, d)
|
| 61 |
+
return self.out_proj(out)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class Router(nn.Module):
|
| 65 |
+
def __init__(self, d_model: int, num_experts: int, top_k: int):
|
| 66 |
+
super().__init__()
|
| 67 |
+
self.num_experts = num_experts
|
| 68 |
+
self.top_k = top_k
|
| 69 |
+
self.gate = DenseNoBias(d_model, num_experts)
|
| 70 |
+
|
| 71 |
+
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 72 |
+
logits = self.gate(x)
|
| 73 |
+
probs = torch.softmax(logits, dim=-1)
|
| 74 |
+
topk_vals, topk_idx = torch.topk(probs, k=self.top_k, dim=-1)
|
| 75 |
+
denom = topk_vals.sum(dim=-1, keepdim=True).clamp_min(1e-6)
|
| 76 |
+
gates = topk_vals / denom
|
| 77 |
+
return topk_idx, gates
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class ExpertMLPBank(nn.Module):
|
| 81 |
+
def __init__(self, d_model: int, hidden_dim: int, num_experts: int):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.w1 = nn.Parameter(torch.empty(num_experts, d_model, hidden_dim, dtype=torch.float32))
|
| 84 |
+
self.b1 = nn.Parameter(torch.zeros(num_experts, hidden_dim, dtype=torch.float32))
|
| 85 |
+
self.w2 = nn.Parameter(torch.empty(num_experts, hidden_dim, d_model, dtype=torch.float32))
|
| 86 |
+
self.b2 = nn.Parameter(torch.zeros(num_experts, d_model, dtype=torch.float32))
|
| 87 |
+
nn.init.normal_(self.w1, std=0.02)
|
| 88 |
+
nn.init.normal_(self.w2, std=0.02)
|
| 89 |
+
|
| 90 |
+
def forward(self, x: torch.Tensor, expert_idx: torch.Tensor) -> torch.Tensor:
|
| 91 |
+
w1 = self.w1.index_select(0, expert_idx)
|
| 92 |
+
b1 = self.b1.index_select(0, expert_idx)
|
| 93 |
+
w2 = self.w2.index_select(0, expert_idx)
|
| 94 |
+
b2 = self.b2.index_select(0, expert_idx)
|
| 95 |
+
h = torch.einsum('nd,ndh->nh', x, w1) + b1
|
| 96 |
+
h = torch.nn.functional.silu(h)
|
| 97 |
+
y = torch.einsum('nh,nhd->nd', h, w2) + b2
|
| 98 |
+
return y
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class MoEFeedForward(nn.Module):
|
| 102 |
+
def __init__(self, d_model: int, hidden_dim: int, num_experts: int, top_k: int):
|
| 103 |
+
super().__init__()
|
| 104 |
+
self.router = Router(d_model=d_model, num_experts=num_experts, top_k=top_k)
|
| 105 |
+
self.experts = ExpertMLPBank(d_model=d_model, hidden_dim=hidden_dim, num_experts=num_experts)
|
| 106 |
+
self.top_k = top_k
|
| 107 |
+
|
| 108 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 109 |
+
b, t, d = x.shape
|
| 110 |
+
topk_idx, gates = self.router(x)
|
| 111 |
+
x_flat = x.reshape(b * t, d)
|
| 112 |
+
idx_flat = topk_idx.reshape(b * t, self.top_k)
|
| 113 |
+
gates_flat = gates.reshape(b * t, self.top_k)
|
| 114 |
+
y = torch.zeros_like(x_flat)
|
| 115 |
+
for j in range(self.top_k):
|
| 116 |
+
e_idx = idx_flat[:, j]
|
| 117 |
+
y_j = self.experts(x_flat, e_idx)
|
| 118 |
+
y = y + y_j * gates_flat[:, j : j + 1]
|
| 119 |
+
return y.reshape(b, t, d)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class Block(nn.Module):
|
| 123 |
+
def __init__(self, d_model: int, num_heads: int, hidden_dim: int, num_experts: int, top_k: int):
|
| 124 |
+
super().__init__()
|
| 125 |
+
self.rmsnorm_0 = RMSNorm(d_model)
|
| 126 |
+
self.attn = MultiHeadAttention(d_model=d_model, num_heads=num_heads)
|
| 127 |
+
self.rmsnorm_1 = RMSNorm(d_model)
|
| 128 |
+
self.moe = MoEFeedForward(d_model=d_model, hidden_dim=hidden_dim, num_experts=num_experts, top_k=top_k)
|
| 129 |
+
|
| 130 |
+
def forward(self, x: torch.Tensor, *, attn_mask: torch.Tensor) -> torch.Tensor:
|
| 131 |
+
h = self.rmsnorm_0(x)
|
| 132 |
+
x = x + self.attn(h, attn_mask=attn_mask)
|
| 133 |
+
h = self.rmsnorm_1(x)
|
| 134 |
+
x = x + self.moe(h)
|
| 135 |
+
return x
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class QMoEForCausalLM(PreTrainedModel, GenerationMixin):
|
| 139 |
+
config_class = QMoEConfig
|
| 140 |
+
main_input_name = 'input_ids'
|
| 141 |
+
|
| 142 |
+
def __init__(self, config: QMoEConfig):
|
| 143 |
+
super().__init__(config)
|
| 144 |
+
self.tok_emb = nn.Embedding(config.vocab_size, config.d_model)
|
| 145 |
+
self.pos_emb = nn.Embedding(config.max_seq_len, config.d_model)
|
| 146 |
+
self.blocks = nn.ModuleList([Block(config.d_model, config.num_heads, config.ffn_dim, config.num_experts, config.moe_top_k) for _ in range(config.num_layers)])
|
| 147 |
+
self.rmsnorm_f = RMSNorm(config.d_model)
|
| 148 |
+
self.lm_head = DenseNoBias(config.d_model, config.vocab_size)
|
| 149 |
+
self.post_init()
|
| 150 |
+
|
| 151 |
+
def get_input_embeddings(self):
|
| 152 |
+
return self.tok_emb
|
| 153 |
+
|
| 154 |
+
def set_input_embeddings(self, value):
|
| 155 |
+
self.tok_emb = value
|
| 156 |
+
|
| 157 |
+
def get_output_embeddings(self):
|
| 158 |
+
return self.lm_head
|
| 159 |
+
|
| 160 |
+
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
|
| 161 |
+
return {'input_ids': input_ids, 'attention_mask': attention_mask}
|
| 162 |
+
|
| 163 |
+
def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs):
|
| 164 |
+
if input_ids is None:
|
| 165 |
+
raise ValueError('input_ids is required')
|
| 166 |
+
b, t = input_ids.shape
|
| 167 |
+
device = input_ids.device
|
| 168 |
+
tok = self.tok_emb(input_ids)
|
| 169 |
+
pos_idx = torch.arange(t, device=device).unsqueeze(0)
|
| 170 |
+
pos = self.pos_emb(pos_idx)
|
| 171 |
+
x = tok + pos
|
| 172 |
+
attn_mask = causal_mask(t, device=device)
|
| 173 |
+
for blk in self.blocks:
|
| 174 |
+
x = blk(x, attn_mask=attn_mask)
|
| 175 |
+
x = self.rmsnorm_f(x)
|
| 176 |
+
logits = self.lm_head(x)
|
| 177 |
+
loss = None
|
| 178 |
+
if labels is not None:
|
| 179 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 180 |
+
shift_labels = labels[:, 1:].contiguous()
|
| 181 |
+
loss = torch.nn.functional.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), ignore_index=-100)
|
| 182 |
+
return CausalLMOutputWithCrossAttentions(logits=logits, loss=loss)
|
| 183 |
+
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 512,
|
| 18 |
+
"name_or_path": "Q-MoE-400-90000",
|
| 19 |
+
"pad_token": "<|endoftext|>",
|
| 20 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 21 |
+
"unk_token": "<|endoftext|>"
|
| 22 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|