lccurious commited on
Commit
1b8bc9e
·
1 Parent(s): 09ff5ec

First model version

Browse files
config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LLaDA2MoeModelLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_llada2_moe.LLaDA2MoeConfig",
8
+ "AutoModel": "modeling_llada2_moe.LLaDA2MoeModel",
9
+ "AutoModelForCausalLM": "modeling_llada2_moe.LLaDA2MoeModelLM"
10
+ },
11
+ "dtype": "bfloat16",
12
+ "embedding_dropout": 0.0,
13
+ "first_k_dense_replace": 1,
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 2048,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 5120,
19
+ "max_position_embeddings": 32768,
20
+ "max_window_layers": 28,
21
+ "model_type": "llada2_moe",
22
+ "moe_intermediate_size": 512,
23
+ "moe_router_enable_expert_bias": true,
24
+ "n_group": 8,
25
+ "norm_head": false,
26
+ "norm_softmax": false,
27
+ "norm_topk_prob": true,
28
+ "num_attention_heads": 16,
29
+ "num_experts": 256,
30
+ "num_experts_per_tok": 8,
31
+ "num_hidden_layers": 20,
32
+ "num_key_value_heads": 4,
33
+ "num_shared_experts": 1,
34
+ "output_dropout": 0.0,
35
+ "output_router_logits": false,
36
+ "pad_token_id": 156892,
37
+ "partial_rotary_factor": 0.5,
38
+ "rms_norm_eps": 1e-06,
39
+ "rope_scaling": null,
40
+ "rope_theta": 600000,
41
+ "rotary_dim": 64,
42
+ "routed_scaling_factor": 2.5,
43
+ "router_dtype": "fp32",
44
+ "score_function": "sigmoid",
45
+ "sliding_window": 4096,
46
+ "tie_word_embeddings": false,
47
+ "topk_group": 4,
48
+ "transformers_version": "4.57.1",
49
+ "use_bias": false,
50
+ "use_cache": false,
51
+ "use_qkv_bias": false,
52
+ "use_rmsnorm": true,
53
+ "use_sliding_window": false,
54
+ "using_split_qkv_in_self_attention": false,
55
+ "vocab_size": 157184
56
+ }
configuration_llada2_moe.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LLaDA2 MoE model configuration"""
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+
5
+
6
+ class LLaDA2MoeConfig(PretrainedConfig):
7
+ model_type = "llada2_moe"
8
+
9
+ def __init__(
10
+ self,
11
+ vocab_size=30592,
12
+ hidden_size=1024,
13
+ intermediate_size=None,
14
+ num_hidden_layers=24,
15
+ num_attention_heads=16,
16
+ num_key_value_heads=0,
17
+ hidden_act="silu",
18
+ use_qkv_bias=False, # llada2 only
19
+ use_qk_norm=True,
20
+ use_bias=True, # llada2 only
21
+ rms_norm_eps=1e-05,
22
+ norm_head=False, # llada2 only
23
+ tie_word_embeddings=False, # PretrainedConfig key, here change default value.
24
+ embedding_dropout=0.1,
25
+ attention_dropout=0.1,
26
+ output_dropout=0.1,
27
+ initializer_range=0.02,
28
+ max_position_embeddings=16384,
29
+ rope_theta=10000.0,
30
+ use_cache=True,
31
+ use_sliding_window=False,
32
+ sliding_window=4096,
33
+ max_window_layers=28,
34
+ rope_scaling=None,
35
+ pad_token_id=126081,
36
+ num_experts=16,
37
+ num_shared_experts=0,
38
+ num_experts_per_tok=2,
39
+ n_group=8,
40
+ topk_group=4,
41
+ routed_scaling_factor=2.5,
42
+ moe_intermediate_size=None,
43
+ first_k_dense_replace=0,
44
+ head_dim=None,
45
+ output_router_logits=False,
46
+ partial_rotary_factor=0.5,
47
+ **kwargs,
48
+ ):
49
+ self.num_hidden_layers = num_hidden_layers
50
+ self.vocab_size = vocab_size
51
+ self.hidden_size = hidden_size
52
+ self.intermediate_size = intermediate_size
53
+ self.num_attention_heads = num_attention_heads
54
+ self.num_key_value_heads = num_key_value_heads
55
+ self.hidden_act = hidden_act
56
+ self.use_qkv_bias = use_qkv_bias
57
+ self.use_qk_norm = use_qk_norm
58
+ self.use_bias = use_bias
59
+ self.norm_head = norm_head
60
+ self.rms_norm_eps = rms_norm_eps
61
+ self.embedding_dropout = embedding_dropout
62
+ self.attention_dropout = attention_dropout
63
+ self.output_dropout = output_dropout
64
+ self.initializer_range = initializer_range
65
+ self.max_position_embeddings = max_position_embeddings
66
+ self.rope_theta = rope_theta
67
+ self.use_cache = use_cache
68
+ self.use_sliding_window = use_sliding_window
69
+ self.sliding_window = sliding_window
70
+ self.max_window_layers = max_window_layers
71
+ self.head_dim = head_dim or self.hidden_size // self.num_attention_heads
72
+ self.rope_scaling = rope_scaling
73
+
74
+ # MoE configs
75
+ self.num_experts = num_experts
76
+ self.num_shared_experts = num_shared_experts
77
+ self.num_experts_per_tok = num_experts_per_tok
78
+ self.n_group = n_group
79
+ self.topk_group = topk_group
80
+ self.moe_intermediate_size = moe_intermediate_size
81
+ self.first_k_dense_replace = first_k_dense_replace
82
+ self.output_router_logits = output_router_logits
83
+ self.routed_scaling_factor = routed_scaling_factor
84
+ self.partial_rotary_factor = partial_rotary_factor
85
+
86
+ super().__init__(
87
+ pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
88
+ )
89
+
model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbb97beb957df1551ee6508f0d0ccec9b79fdc8b4adf988bcd72a8697e87154c
3
+ size 4999887840
model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecd1c445bd190a966bd8731a957b32d3416946284999ade15f171d85dffe7305
3
+ size 4998880752
model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e2fa25a6f1e5385e27ce07be192158c3fad7b9120402c7152dd958bd50bc33e
3
+ size 4998880776
model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:562f7aaf6f5a5dfd99298e985b46163ed0383d63462d1a6678adcaa4e781074f
3
+ size 4998882960
model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fac238eb1f4283efb0733754943e2b5a2c53665be6fa5ea355c1224b03d2e1e
3
+ size 4998883176
model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f059d81227766cddc9de337aaa448a86163d6145f956009d14f48557af8298be
3
+ size 4998883200
model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31f4862396580c6e3de277746234b8f120196c0b5741aaea92b46cd4b5ce59df
3
+ size 2518823800
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_llada2_moe.py ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Antgroup and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
4
+ # and OPT implementations in this library. It has been modified from its
5
+ # original forms to accommodate minor architectural differences compared
6
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ """PyTorch LLaDA2MoE model."""
20
+
21
+ import math
22
+ from typing import List, Callable, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+ from torch import nn
27
+ from torch.nn import CrossEntropyLoss
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.modeling_attn_mask_utils import (
32
+ _prepare_4d_causal_attention_mask,
33
+ _prepare_4d_causal_attention_mask_for_sdpa,
34
+ )
35
+ from transformers.modeling_outputs import (
36
+ MoeModelOutputWithPast,
37
+ MoeCausalLMOutputWithPast,
38
+ )
39
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
40
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
41
+ from transformers.processing_utils import Unpack
42
+ from transformers.pytorch_utils import (
43
+ ALL_LAYERNORM_LAYERS,
44
+ is_torch_greater_or_equal_than_1_13,
45
+ )
46
+ from transformers.utils import (
47
+ TransformersKwargs,
48
+ add_start_docstrings,
49
+ add_start_docstrings_to_model_forward,
50
+ logging,
51
+ replace_return_docstrings,
52
+ )
53
+ from transformers.utils.import_utils import is_torch_fx_available
54
+ from .configuration_llada2_moe import LLaDA2MoeConfig
55
+ from transformers.generation.utils import GenerationMixin
56
+
57
+
58
+ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
59
+ # It means that the function will not be traced through and simply appear as a node in the graph.
60
+ if is_torch_fx_available():
61
+ if not is_torch_greater_or_equal_than_1_13:
62
+ import torch.fx
63
+
64
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
65
+
66
+
67
+ logger = logging.get_logger(__name__)
68
+
69
+ _CONFIG_FOR_DOC = "LLaDA2MoeConfig"
70
+
71
+
72
+ def _get_unpad_data(attention_mask):
73
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
74
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
75
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
76
+ cu_seqlens = F.pad(
77
+ torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)
78
+ )
79
+ return (
80
+ indices,
81
+ cu_seqlens,
82
+ max_seqlen_in_batch,
83
+ )
84
+
85
+
86
+ class LLaDA2MoeRMSNorm(nn.Module):
87
+ def __init__(self, hidden_size, eps=1e-6):
88
+ """
89
+ LLaDA2MoeRMSNorm is equivalent to T5LayerNorm
90
+ """
91
+ super().__init__()
92
+ self.weight = nn.Parameter(torch.ones(hidden_size))
93
+ self.variance_epsilon = eps
94
+
95
+ def forward(self, hidden_states):
96
+ input_dtype = hidden_states.dtype
97
+ hidden_states = hidden_states.to(torch.float32)
98
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
99
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
100
+ return self.weight * hidden_states.to(input_dtype)
101
+
102
+
103
+ ALL_LAYERNORM_LAYERS.append(LLaDA2MoeRMSNorm)
104
+
105
+
106
+ class LLaDA2MoeRotaryEmbedding(nn.Module):
107
+ def __init__(self, config: LLaDA2MoeConfig, device=None):
108
+ super().__init__()
109
+ # BC: "rope_type" was originally "type"
110
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
111
+ self.rope_type = config.rope_scaling.get(
112
+ "rope_type", config.rope_scaling.get("type")
113
+ )
114
+ else:
115
+ self.rope_type = "default"
116
+ self.max_seq_len_cached = config.max_position_embeddings
117
+ self.original_max_seq_len = config.max_position_embeddings
118
+
119
+ self.config = config
120
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
121
+
122
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
123
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
124
+ self.original_inv_freq = self.inv_freq
125
+
126
+ @torch.no_grad()
127
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
128
+ def forward(self, x, position_ids):
129
+ inv_freq_expanded = (
130
+ self.inv_freq[None, :, None]
131
+ .float()
132
+ .expand(position_ids.shape[0], -1, 1)
133
+ .to(x.device)
134
+ )
135
+ position_ids_expanded = position_ids[:, None, :].float()
136
+
137
+ device_type = (
138
+ x.device.type
139
+ if isinstance(x.device.type, str) and x.device.type != "mps"
140
+ else "cpu"
141
+ )
142
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
143
+ freqs = (
144
+ inv_freq_expanded.float() @ position_ids_expanded.float()
145
+ ).transpose(1, 2)
146
+ emb = torch.cat((freqs, freqs), dim=-1)
147
+ cos = emb.cos() * self.attention_scaling
148
+ sin = emb.sin() * self.attention_scaling
149
+
150
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
151
+
152
+
153
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
154
+ def rotate_half(x):
155
+ """Rotates half the hidden dims of the input."""
156
+ x1 = x[..., : x.shape[-1] // 2]
157
+ x2 = x[..., x.shape[-1] // 2 :]
158
+ return torch.cat((-x2, x1), dim=-1)
159
+
160
+
161
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
162
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
163
+ """Applies Rotary Position Embedding to the query and key tensors.
164
+
165
+ Args:
166
+ q (`torch.Tensor`): The query tensor.
167
+ k (`torch.Tensor`): The key tensor.
168
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
169
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
170
+ position_ids (`torch.Tensor`):
171
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
172
+ used to pass offsetted position ids when working with a KV-cache.
173
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
174
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
175
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
176
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
177
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
178
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
179
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
180
+ Returns:
181
+ `tuple(torch.Tensor)` comprising the query and key tensors rotated using the Rotary Position Embedding.
182
+ """
183
+ cos = cos.unsqueeze(unsqueeze_dim)
184
+ sin = sin.unsqueeze(unsqueeze_dim)
185
+
186
+ # Keep half or full tensor for later concatenation
187
+ rotary_dim = cos.shape[-1]
188
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
189
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
190
+
191
+ # Apply rotary embeddings on the first half or full tensor
192
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
193
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
194
+
195
+ # Concatenate back to full shape
196
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
197
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
198
+ return q_embed, k_embed
199
+
200
+
201
+ class LLaDA2MoeMLP(nn.Module):
202
+ def __init__(self, config: LLaDA2MoeConfig, intermediate_size: int):
203
+ super().__init__()
204
+ self.config = config
205
+ self.hidden_size = config.hidden_size
206
+ self.intermediate_size = intermediate_size
207
+
208
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
209
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
210
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
211
+ self.act_fn = ACT2FN[config.hidden_act]
212
+
213
+ def forward(self, x):
214
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
215
+
216
+
217
+ class LLaDA2MoeGate(nn.Module):
218
+ def __init__(self, config):
219
+ super().__init__()
220
+ self.config = config
221
+ self.top_k = config.num_experts_per_tok
222
+ self.num_experts = config.num_experts
223
+
224
+ self.n_group = config.n_group
225
+ self.topk_group = config.topk_group
226
+
227
+ # topk selection algorithm
228
+ self.gating_dim = config.hidden_size
229
+ self.weight = nn.Parameter(torch.empty((self.num_experts, self.gating_dim)))
230
+ self.routed_scaling_factor = config.routed_scaling_factor
231
+
232
+ self.register_buffer("expert_bias", torch.zeros(self.num_experts))
233
+ self.reset_parameters()
234
+
235
+ def reset_parameters(self) -> None:
236
+ import torch.nn.init as init
237
+
238
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
239
+
240
+ def group_limited_topk(
241
+ self,
242
+ scores: torch.Tensor,
243
+ ):
244
+ num_tokens, _ = scores.size()
245
+ # Organize the experts into groups
246
+ group_scores = (
247
+ scores.view(num_tokens, self.n_group, -1).topk(2, dim=-1)[0].sum(dim=-1)
248
+ )
249
+ group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
250
+ group_mask = torch.zeros_like(group_scores)
251
+ group_mask.scatter_(1, group_idx, 1)
252
+
253
+ # Mask the experts based on selection groups
254
+ score_mask = (
255
+ group_mask.unsqueeze(-1)
256
+ .expand(num_tokens, self.n_group, self.num_experts // self.n_group)
257
+ .reshape(num_tokens, -1)
258
+ )
259
+
260
+ masked_scores = scores.masked_fill(~score_mask.bool(), float("-inf"))
261
+ probs, top_indices = torch.topk(masked_scores, k=self.top_k, dim=-1)
262
+
263
+ return probs, top_indices
264
+
265
+ def forward(self, hidden_states):
266
+ # compute gating score
267
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
268
+ logits = F.linear(
269
+ hidden_states.type(torch.float32), self.weight.type(torch.float32)
270
+ )
271
+
272
+ scores = torch.sigmoid(logits.float()).type_as(logits)
273
+
274
+ scores_for_routing = scores + self.expert_bias
275
+ _, topk_idx = self.group_limited_topk(scores_for_routing)
276
+
277
+ scores = torch.gather(scores, dim=1, index=topk_idx).type_as(logits)
278
+
279
+ topk_weight = (
280
+ scores / (scores.sum(dim=-1, keepdim=True) + 1e-20)
281
+ if self.top_k > 1
282
+ else scores
283
+ )
284
+ topk_weight = topk_weight * self.routed_scaling_factor
285
+
286
+ return topk_idx, topk_weight, logits
287
+
288
+
289
+ class LLaDA2MoeSparseMoeBlock(nn.Module):
290
+ """
291
+ A mixed expert module containing shared experts.
292
+ """
293
+
294
+ def __init__(self, config: LLaDA2MoeConfig):
295
+ super().__init__()
296
+ self.config = config
297
+ self.num_experts_per_tok = config.num_experts_per_tok
298
+ self._setup_experts()
299
+ self.gate = LLaDA2MoeGate(config)
300
+ if config.num_shared_experts is not None:
301
+ self.shared_experts = LLaDA2MoeMLP(
302
+ config=config,
303
+ intermediate_size=config.moe_intermediate_size
304
+ * config.num_shared_experts,
305
+ )
306
+
307
+ def _setup_experts(self):
308
+ self.experts = nn.ModuleList(
309
+ [
310
+ LLaDA2MoeMLP(
311
+ config=self.config,
312
+ intermediate_size=self.config.moe_intermediate_size,
313
+ )
314
+ for _ in range(self.config.num_experts)
315
+ ]
316
+ )
317
+
318
+ def forward(self, hidden_states):
319
+ identity = hidden_states
320
+ bsz, seq_len, h = hidden_states.shape
321
+ topk_idx, topk_weight, router_logits = self.gate(hidden_states)
322
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
323
+ flat_topk_idx = topk_idx.view(-1)
324
+ if self.training:
325
+ hidden_states = hidden_states.repeat_interleave(
326
+ self.num_experts_per_tok, dim=0
327
+ )
328
+ y = torch.empty_like(hidden_states)
329
+ for i, expert in enumerate(self.experts):
330
+ y[flat_topk_idx == i] = expert(hidden_states[flat_topk_idx == i])
331
+ y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
332
+ y = y.to(hidden_states.dtype).view(bsz, seq_len, h)
333
+ else:
334
+ y = self.moe_infer(hidden_states, topk_idx, topk_weight).view(
335
+ bsz, seq_len, h
336
+ )
337
+ if self.config.num_shared_experts is not None:
338
+ y = y + self.shared_experts(identity)
339
+ return y, (
340
+ router_logits.view(bsz, seq_len, -1),
341
+ topk_idx.view(bsz, seq_len, -1),
342
+ )
343
+
344
+ @torch.no_grad()
345
+ def moe_infer(self, x, topk_ids, topk_weight):
346
+ cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts)))
347
+ cnts.scatter_(1, topk_ids, 1)
348
+ tokens_per_expert = cnts.sum(dim=0)
349
+ idxs = topk_ids.view(-1).argsort()
350
+ sorted_tokens = x[idxs // topk_ids.shape[1]]
351
+ tokens_per_expert = tokens_per_expert.cpu().numpy()
352
+ outputs = []
353
+ start_idx = 0
354
+ for i, num_tokens_tensor in enumerate(tokens_per_expert):
355
+ num_tokens = num_tokens_tensor.item()
356
+ if num_tokens == 0:
357
+ continue
358
+ end_idx = start_idx + num_tokens
359
+ expert = self.experts[i]
360
+ tokens_for_this_expert = sorted_tokens[start_idx:end_idx]
361
+ expert_out = expert(tokens_for_this_expert)
362
+ outputs.append(expert_out.to(x.device))
363
+ start_idx = end_idx
364
+
365
+ outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
366
+ new_x = torch.empty_like(outs)
367
+ new_x[idxs] = outs
368
+ final_out = (
369
+ new_x.view(*topk_ids.shape, -1)
370
+ .type(topk_weight.dtype)
371
+ .mul_(topk_weight.unsqueeze(dim=-1))
372
+ .sum(dim=1)
373
+ .type(new_x.dtype)
374
+ )
375
+ return final_out
376
+
377
+
378
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
379
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
380
+ """
381
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
382
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
383
+ """
384
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
385
+ if n_rep == 1:
386
+ return hidden_states
387
+ hidden_states = hidden_states[:, :, None, :, :].expand(
388
+ batch, num_key_value_heads, n_rep, slen, head_dim
389
+ )
390
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
391
+
392
+
393
+ def eager_attention_forward(
394
+ module: nn.Module,
395
+ query: torch.Tensor,
396
+ key: torch.Tensor,
397
+ value: torch.Tensor,
398
+ attention_mask: Optional[torch.Tensor],
399
+ scaling: float,
400
+ dropout: float = 0.0,
401
+ **kwargs: Unpack[TransformersKwargs],
402
+ ):
403
+ key_states = repeat_kv(key, module.num_key_value_groups)
404
+ value_states = repeat_kv(value, module.num_key_value_groups)
405
+
406
+ attn_weights = (
407
+ torch.matmul(query, key_states.transpose(2, 3)) * scaling
408
+ )
409
+ if attention_mask is not None:
410
+ attn_weights = attn_weights + attention_mask[:, :, :, : key_states.shape[-2]]
411
+
412
+ # upcast attention to fp32
413
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
414
+ query.dtype
415
+ )
416
+ attn_weights = nn.functional.dropout(
417
+ attn_weights, p=dropout, training=module.training
418
+ )
419
+ attn_output = torch.matmul(attn_weights, value_states)
420
+ attn_output = attn_output.transpose(1, 2).contiguous()
421
+
422
+ return attn_output, attn_weights
423
+
424
+
425
+ # Copied from transformers.models.llama.modeling_llama.LlamaAttention with Llama->LLaDA2Moe
426
+ class LLaDA2MoeAttention(nn.Module):
427
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
428
+
429
+ def __init__(self, config: LLaDA2MoeConfig, layer_idx: Optional[int] = None):
430
+ super().__init__()
431
+ self.config = config
432
+ self.layer_idx = layer_idx
433
+ if layer_idx is None:
434
+ logger.warning_once(
435
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
436
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
437
+ "when creating this class."
438
+ )
439
+ self.attention_dropout = config.attention_dropout
440
+ self.hidden_size = config.hidden_size
441
+ self.num_heads = config.num_attention_heads
442
+ self.head_dim = config.head_dim or self.hidden_size // self.num_heads
443
+ partial_rotary_factor = (
444
+ config.partial_rotary_factor
445
+ if hasattr(config, "partial_rotary_factor")
446
+ else 1.0
447
+ )
448
+ self.rope_dim = int(self.head_dim * partial_rotary_factor)
449
+ self.num_key_value_heads = config.num_key_value_heads
450
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
451
+ self.max_position_embeddings = config.max_position_embeddings
452
+ self.rope_theta = config.rope_theta
453
+ self.scaling = self.head_dim**-0.5
454
+ self.is_causal = False
455
+
456
+ self.query_key_value = nn.Linear(
457
+ self.hidden_size,
458
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
459
+ bias=config.use_qkv_bias,
460
+ )
461
+
462
+ if self.config.use_qk_norm:
463
+ self.query_layernorm = LLaDA2MoeRMSNorm(
464
+ self.head_dim, eps=config.rms_norm_eps
465
+ )
466
+ self.key_layernorm = LLaDA2MoeRMSNorm(
467
+ self.head_dim, eps=config.rms_norm_eps
468
+ )
469
+ self.dense = nn.Linear(
470
+ self.num_heads * self.head_dim, self.hidden_size, bias=config.use_bias
471
+ )
472
+ self.sliding_window = getattr(config, "sliding_window", None)
473
+
474
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
475
+ return (
476
+ tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
477
+ .transpose(1, 2)
478
+ .contiguous()
479
+ )
480
+
481
+ def forward(
482
+ self,
483
+ hidden_states: torch.Tensor,
484
+ attention_mask: Optional[torch.Tensor] = None,
485
+ position_ids: Optional[torch.LongTensor] = None,
486
+ past_key_value: Optional[Cache] = None,
487
+ output_attentions: bool = False,
488
+ use_cache: bool = False,
489
+ position_embeddings: Optional[
490
+ Tuple[torch.Tensor, torch.Tensor]
491
+ ] = None, # necessary, but kept here for BC
492
+ **kwargs,
493
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
494
+ input_shape = hidden_states.shape[:-1]
495
+
496
+ bsz, q_len, _ = hidden_states.size()
497
+
498
+ qkv = self.query_key_value(hidden_states)
499
+ qkv = qkv.view(
500
+ bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim
501
+ )
502
+
503
+ query_states, key_states, value_states = qkv.split(
504
+ [self.num_heads, self.num_key_value_heads, self.num_key_value_heads], dim=-2
505
+ )
506
+ query_states = query_states.transpose(1, 2)
507
+ key_states = key_states.transpose(1, 2)
508
+ value_states = value_states.transpose(1, 2)
509
+
510
+ if self.config.use_qk_norm:
511
+ query_states = self.query_layernorm(query_states)
512
+ key_states = self.key_layernorm(key_states)
513
+
514
+ cos, sin = position_embeddings
515
+ query_states, key_states = apply_rotary_pos_emb(
516
+ query_states, key_states, cos, sin
517
+ )
518
+
519
+ if past_key_value is not None:
520
+ if self.layer_idx is None:
521
+ raise ValueError(
522
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
523
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
524
+ "with a layer index."
525
+ )
526
+ cache_kwargs = {"sin": sin, "cos": cos}
527
+ key_states, value_states = past_key_value.update(
528
+ key_states, value_states, self.layer_idx, cache_kwargs
529
+ )
530
+
531
+ attention_interface: Callable = eager_attention_forward
532
+ if self.config._attn_implementation != "eager":
533
+ attention_interface = ALL_ATTENTION_FUNCTIONS[
534
+ self.config._attn_implementation
535
+ ]
536
+
537
+ attn_output, attn_weights = attention_interface(
538
+ self,
539
+ query_states,
540
+ key_states,
541
+ value_states,
542
+ attention_mask,
543
+ dropout=0.0 if not self.training else self.attention_dropout,
544
+ scaling=self.scaling,
545
+ sliding_window=self.sliding_window, # diff with Llama
546
+ **kwargs,
547
+ )
548
+
549
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
550
+ attn_output = self.dense(attn_output)
551
+
552
+ return attn_output, attn_weights, past_key_value
553
+
554
+
555
+ class LLaDA2MoeDecoderLayer(nn.Module):
556
+ def __init__(self, config: LLaDA2MoeConfig, layer_idx: int):
557
+ super().__init__()
558
+ self.hidden_size = config.hidden_size
559
+
560
+ self.attention = LLaDA2MoeAttention(config=config, layer_idx=layer_idx)
561
+
562
+ self.mlp = (
563
+ LLaDA2MoeSparseMoeBlock(config)
564
+ if (
565
+ config.num_experts is not None
566
+ and layer_idx >= config.first_k_dense_replace
567
+ )
568
+ else LLaDA2MoeMLP(config=config, intermediate_size=config.intermediate_size)
569
+ )
570
+ self.input_layernorm = LLaDA2MoeRMSNorm(
571
+ config.hidden_size, eps=config.rms_norm_eps
572
+ )
573
+ self.post_attention_layernorm = LLaDA2MoeRMSNorm(
574
+ config.hidden_size, eps=config.rms_norm_eps
575
+ )
576
+
577
+ def forward(
578
+ self,
579
+ hidden_states: torch.Tensor,
580
+ attention_mask: Optional[torch.Tensor] = None,
581
+ position_ids: Optional[torch.LongTensor] = None,
582
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
583
+ output_attentions: Optional[bool] = False,
584
+ output_router_logits: Optional[bool] = False,
585
+ use_cache: Optional[bool] = False,
586
+ position_embeddings: Optional[
587
+ Tuple[torch.Tensor, torch.Tensor]
588
+ ] = None, # necessary, but kept here for BC
589
+ **kwargs,
590
+ ) -> Tuple[
591
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
592
+ ]:
593
+ """
594
+ Args:
595
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
596
+ attention_mask (`torch.FloatTensor`, *optional*):
597
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
598
+ query_sequence_length, key_sequence_length)` if default attention is used.
599
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
600
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
601
+ config.n_positions - 1]`.
602
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
603
+ cached past key and value projection states
604
+ output_attentions (`bool`, *optional*):
605
+ Whether to return the attentions tensors of all attention layers. See `attentions` under
606
+ returned tensors for more detail.
607
+ output_router_logits (`bool`, *optional*):
608
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
609
+ and should not be returned during inference.
610
+ use_cache (`bool`, *optional*):
611
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
612
+ (see `past_key_values`).
613
+ """
614
+ residual = hidden_states
615
+
616
+ hidden_states = self.input_layernorm(hidden_states)
617
+
618
+ # Self Attention
619
+ hidden_states, self_attn_weights, present_key_value = self.attention(
620
+ hidden_states=hidden_states,
621
+ attention_mask=attention_mask,
622
+ position_ids=position_ids,
623
+ past_key_value=past_key_value,
624
+ output_attentions=output_attentions,
625
+ position_embeddings=position_embeddings,
626
+ use_cache=use_cache,
627
+ )
628
+ hidden_states = residual + hidden_states
629
+
630
+ # Fully Connected
631
+ residual = hidden_states
632
+ hidden_states = self.post_attention_layernorm(hidden_states)
633
+ hidden_states = self.mlp(hidden_states)
634
+ if isinstance(hidden_states, tuple):
635
+ hidden_states, router_logits = hidden_states
636
+ else:
637
+ router_logits = None
638
+ hidden_states = residual + hidden_states.to(residual.device)
639
+
640
+ outputs = (hidden_states,)
641
+
642
+ if output_attentions:
643
+ outputs += (self_attn_weights,)
644
+
645
+ if use_cache:
646
+ outputs += (present_key_value,)
647
+
648
+ if output_router_logits:
649
+ outputs += (router_logits,)
650
+
651
+ return outputs
652
+
653
+
654
+ LLADA2MOE_START_DOCSTRING = r"""
655
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
656
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
657
+ etc.)
658
+
659
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
660
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
661
+ and behavior.
662
+
663
+ Parameters:
664
+ config ([`LLaDA2MoeConfig`]):
665
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
666
+ load the weights associated with the model, only the configuration. Check out the
667
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
668
+ """
669
+
670
+
671
+ @add_start_docstrings(
672
+ "The bare LLaDA2Moe Model outputting raw hidden-states without any specific head on top.",
673
+ LLADA2MOE_START_DOCSTRING,
674
+ )
675
+ class LLaDA2MoePreTrainedModel(PreTrainedModel):
676
+ config_class = LLaDA2MoeConfig
677
+ base_model_prefix = "model"
678
+ supports_gradient_checkpointing = True
679
+ _no_split_modules = ["LLaDA2MoeDecoderLayer"]
680
+ _skip_keys_device_placement = ["past_key_values"]
681
+ _supports_flash_attn_2 = False
682
+ _supports_sdpa = True
683
+ _supports_flex_attn = True
684
+ _supports_cache_class = True
685
+
686
+ def _init_weights(self, module):
687
+ std = self.config.initializer_range
688
+ if isinstance(module, nn.Linear):
689
+ module.weight.data.normal_(mean=0.0, std=std)
690
+ if module.bias is not None:
691
+ module.bias.data.zero_()
692
+ elif isinstance(module, nn.Embedding):
693
+ module.weight.data.normal_(mean=0.0, std=std)
694
+ if module.padding_idx is not None:
695
+ module.weight.data[module.padding_idx].zero_()
696
+
697
+
698
+ LLADA2MOE_INPUTS_DOCSTRING = r"""
699
+ Args:
700
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
701
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
702
+ it.
703
+
704
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
705
+ [`PreTrainedTokenizer.__call__`] for details.
706
+
707
+ [What are input IDs?](../glossary#input-ids)
708
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
709
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
710
+
711
+ - 1 for tokens that are **not masked**,
712
+ - 0 for tokens that are **masked**.
713
+
714
+ [What are attention masks?](../glossary#attention-mask)
715
+
716
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
717
+ [`PreTrainedTokenizer.__call__`] for details.
718
+
719
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
720
+ `past_key_values`).
721
+
722
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
723
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
724
+ information on the default strategy.
725
+
726
+ - 1 indicates the head is **not masked**,
727
+ - 0 indicates the head is **masked**.
728
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
729
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
730
+ config.n_positions - 1]`.
731
+
732
+ [What are position IDs?](../glossary#position-ids)
733
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
734
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
735
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
736
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
737
+
738
+ Two formats are allowed:
739
+ - a [`~cache_utils.Cache`] instance;
740
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
741
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
742
+ cache format.
743
+
744
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
745
+ legacy cache format will be returned.
746
+
747
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
748
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
749
+ of shape `(batch_size, sequence_length)`.
750
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
751
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
752
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
753
+ model's internal embedding lookup matrix.
754
+ use_cache (`bool`, *optional*):
755
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
756
+ `past_key_values`).
757
+ output_attentions (`bool`, *optional*):
758
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
759
+ tensors for more detail.
760
+ output_hidden_states (`bool`, *optional*):
761
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
762
+ more detail.
763
+ return_dict (`bool`, *optional*):
764
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
765
+ """
766
+
767
+
768
+ @add_start_docstrings(
769
+ "The bare LLaDA2Moe Model outputting raw hidden-states without any specific head on top.",
770
+ LLADA2MOE_START_DOCSTRING,
771
+ )
772
+ class LLaDA2MoeModel(LLaDA2MoePreTrainedModel):
773
+ """
774
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LLaDA2MoeDecoderLayer`]
775
+
776
+ Args:
777
+ config: LLaDA2MoeConfig
778
+ """
779
+
780
+ def __init__(self, config: LLaDA2MoeConfig):
781
+ super().__init__(config)
782
+ self.padding_idx = config.pad_token_id
783
+ self.vocab_size = config.vocab_size
784
+
785
+ self.word_embeddings = nn.Embedding(
786
+ config.vocab_size, config.hidden_size, self.padding_idx
787
+ )
788
+ self.layers = nn.ModuleList(
789
+ [
790
+ LLaDA2MoeDecoderLayer(config, layer_idx)
791
+ for layer_idx in range(config.num_hidden_layers)
792
+ ]
793
+ )
794
+ self._use_sdpa = config._attn_implementation == "sdpa"
795
+ self._use_flex_attention = config._attn_implementation == "flex_attention"
796
+ self.norm = LLaDA2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
797
+ self.rotary_emb = LLaDA2MoeRotaryEmbedding(config=config)
798
+ self.gradient_checkpointing = False
799
+ # Initialize weights and apply final processing
800
+ self.post_init()
801
+
802
+ def get_input_embeddings(self):
803
+ return self.word_embeddings
804
+
805
+ def set_input_embeddings(self, value):
806
+ self.word_embeddings = value
807
+
808
+ @add_start_docstrings_to_model_forward(LLADA2MOE_INPUTS_DOCSTRING)
809
+ def forward(
810
+ self,
811
+ input_ids: torch.LongTensor = None,
812
+ attention_mask: Optional[torch.Tensor] = None,
813
+ position_ids: Optional[torch.LongTensor] = None,
814
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
815
+ inputs_embeds: Optional[torch.FloatTensor] = None,
816
+ use_cache: Optional[bool] = None,
817
+ output_attentions: Optional[bool] = None,
818
+ output_hidden_states: Optional[bool] = None,
819
+ output_router_logits: Optional[bool] = None,
820
+ return_dict: Optional[bool] = None,
821
+ **kwargs,
822
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
823
+ output_attentions = (
824
+ output_attentions
825
+ if output_attentions is not None
826
+ else self.config.output_attentions
827
+ )
828
+ output_hidden_states = (
829
+ output_hidden_states
830
+ if output_hidden_states is not None
831
+ else self.config.output_hidden_states
832
+ )
833
+ output_router_logits = (
834
+ output_router_logits
835
+ if output_router_logits is not None
836
+ else self.config.output_router_logits
837
+ )
838
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
839
+
840
+ return_dict = (
841
+ return_dict if return_dict is not None else self.config.use_return_dict
842
+ )
843
+
844
+ # retrieve input_ids and inputs_embeds
845
+ if input_ids is not None and inputs_embeds is not None:
846
+ raise ValueError(
847
+ "You cannot specify both input_ids and inputs_embeds at the same time"
848
+ )
849
+ elif input_ids is not None:
850
+ batch_size, seq_length = input_ids.shape[:2]
851
+ elif inputs_embeds is not None:
852
+ batch_size, seq_length = inputs_embeds.shape[:2]
853
+ else:
854
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
855
+
856
+ if self.gradient_checkpointing and self.training:
857
+ if use_cache:
858
+ logger.warning_once(
859
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers."
860
+ )
861
+ use_cache = False
862
+
863
+ if use_cache and past_key_values is None:
864
+ past_key_values = DynamicCache()
865
+
866
+ if inputs_embeds is None:
867
+ inputs_embeds = self.word_embeddings(input_ids)
868
+
869
+ past_seen_tokens = (
870
+ past_key_values.get_seq_length() if past_key_values is not None else 0
871
+ )
872
+
873
+ if position_ids is None:
874
+ position_ids = torch.arange(
875
+ past_seen_tokens,
876
+ past_seen_tokens + inputs_embeds.shape[1],
877
+ device=inputs_embeds.device,
878
+ )
879
+ position_ids = position_ids.unsqueeze(0)
880
+
881
+ if self._use_flex_attention:
882
+ if attention_mask is not None and isinstance(attention_mask, torch.Tensor):
883
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
884
+ attention_mask,
885
+ (batch_size, seq_length),
886
+ inputs_embeds,
887
+ past_seen_tokens,
888
+ )
889
+ elif self._use_sdpa and not output_attentions:
890
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
891
+ # the manual implementation that requires a 4D causal mask in all cases.
892
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
893
+ attention_mask,
894
+ (batch_size, seq_length),
895
+ inputs_embeds,
896
+ past_seen_tokens,
897
+ )
898
+ else:
899
+ # 4d mask is passed through the layers
900
+ attention_mask = _prepare_4d_causal_attention_mask(
901
+ attention_mask,
902
+ (batch_size, seq_length),
903
+ inputs_embeds,
904
+ past_seen_tokens,
905
+ )
906
+
907
+ # embed positions
908
+ hidden_states = inputs_embeds
909
+
910
+ # create position embeddings to be shared across the decoder layers
911
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
912
+
913
+ # decoder layers
914
+ all_hidden_states = () if output_hidden_states else None
915
+ all_self_attns = () if output_attentions else None
916
+ all_router_logits = () if output_router_logits else None
917
+ next_decoder_cache = None
918
+
919
+ for decoder_layer in self.layers:
920
+ if output_hidden_states:
921
+ all_hidden_states += (hidden_states,)
922
+
923
+ if self.gradient_checkpointing and self.training:
924
+ layer_outputs = self._gradient_checkpointing_func(
925
+ decoder_layer.__call__,
926
+ hidden_states,
927
+ attention_mask,
928
+ position_ids,
929
+ past_key_values,
930
+ output_attentions,
931
+ output_router_logits,
932
+ use_cache,
933
+ position_embeddings,
934
+ )
935
+ else:
936
+ layer_outputs = decoder_layer(
937
+ hidden_states,
938
+ attention_mask=attention_mask,
939
+ position_ids=position_ids,
940
+ past_key_value=past_key_values,
941
+ output_attentions=output_attentions,
942
+ output_router_logits=output_router_logits,
943
+ use_cache=use_cache,
944
+ position_embeddings=position_embeddings,
945
+ )
946
+ hidden_states = layer_outputs[0]
947
+
948
+ if use_cache:
949
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
950
+
951
+ if output_attentions:
952
+ all_self_attns += (layer_outputs[1],)
953
+
954
+ if output_router_logits and layer_outputs[-1] is not None:
955
+ all_router_logits += (layer_outputs[-1],)
956
+
957
+ hidden_states = self.norm(hidden_states)
958
+
959
+ # add hidden states from the last decoder layer
960
+ if output_hidden_states:
961
+ all_hidden_states += (hidden_states,)
962
+
963
+ next_cache = None
964
+ if use_cache:
965
+ next_cache = next_decoder_cache
966
+ if not return_dict:
967
+ return tuple(
968
+ v
969
+ for v in [
970
+ hidden_states,
971
+ next_cache,
972
+ all_hidden_states,
973
+ all_self_attns,
974
+ all_router_logits,
975
+ ]
976
+ if v is not None
977
+ )
978
+ return MoeModelOutputWithPast(
979
+ last_hidden_state=hidden_states,
980
+ past_key_values=next_cache,
981
+ hidden_states=all_hidden_states,
982
+ attentions=all_self_attns,
983
+ router_logits=all_router_logits,
984
+ )
985
+
986
+
987
+ class LLaDA2MoeModelLM(LLaDA2MoePreTrainedModel, GenerationMixin):
988
+ _tied_weights_keys = ["lm_head.weight"]
989
+
990
+ def __init__(self, config: LLaDA2MoeConfig):
991
+ super().__init__(config)
992
+ self.model = LLaDA2MoeModel(config)
993
+ self.vocab_size = config.vocab_size
994
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
995
+
996
+ # Initialize weights and apply final processing
997
+ self.post_init()
998
+
999
+ def get_input_embeddings(self):
1000
+ return self.model.word_embeddings
1001
+
1002
+ def set_input_embeddings(self, value):
1003
+ self.model.word_embeddings = value
1004
+
1005
+ def get_output_embeddings(self):
1006
+ return self.lm_head
1007
+
1008
+ def set_output_embeddings(self, new_embeddings):
1009
+ self.lm_head = new_embeddings
1010
+
1011
+ def set_decoder(self, decoder):
1012
+ self.model = decoder
1013
+
1014
+ def get_decoder(self):
1015
+ return self.model
1016
+
1017
+ @add_start_docstrings_to_model_forward(LLADA2MOE_INPUTS_DOCSTRING)
1018
+ @replace_return_docstrings(
1019
+ output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
1020
+ )
1021
+ def forward(
1022
+ self,
1023
+ input_ids: torch.LongTensor = None,
1024
+ attention_mask: Optional[torch.Tensor] = None,
1025
+ position_ids: Optional[torch.LongTensor] = None,
1026
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1027
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1028
+ labels: Optional[torch.LongTensor] = None,
1029
+ use_cache: Optional[bool] = None,
1030
+ output_attentions: Optional[bool] = None,
1031
+ output_hidden_states: Optional[bool] = None,
1032
+ output_router_logits: Optional[bool] = None,
1033
+ return_dict: Optional[bool] = None,
1034
+ **kwargs,
1035
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
1036
+ r"""
1037
+ Args:
1038
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1039
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1040
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1041
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1042
+
1043
+ Returns:
1044
+
1045
+ Example:
1046
+
1047
+ ```python
1048
+ >>> from transformers import AutoTokenizer
1049
+
1050
+ >>> model = LLaDA2MoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1051
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1052
+
1053
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1054
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1055
+
1056
+ >>> # Generate
1057
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1058
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1059
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1060
+ ```"""
1061
+ output_attentions = (
1062
+ output_attentions
1063
+ if output_attentions is not None
1064
+ else self.config.output_attentions
1065
+ )
1066
+ output_hidden_states = (
1067
+ output_hidden_states
1068
+ if output_hidden_states is not None
1069
+ else self.config.output_hidden_states
1070
+ )
1071
+ output_router_logits = (
1072
+ output_router_logits
1073
+ if output_router_logits is not None
1074
+ else self.config.output_router_logits
1075
+ )
1076
+ return_dict = (
1077
+ return_dict if return_dict is not None else self.config.use_return_dict
1078
+ )
1079
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1080
+ outputs = self.model(
1081
+ input_ids=input_ids,
1082
+ attention_mask=attention_mask,
1083
+ position_ids=position_ids,
1084
+ past_key_values=past_key_values,
1085
+ inputs_embeds=inputs_embeds,
1086
+ use_cache=use_cache,
1087
+ output_attentions=output_attentions,
1088
+ output_hidden_states=output_hidden_states,
1089
+ output_router_logits=output_router_logits,
1090
+ return_dict=return_dict,
1091
+ **kwargs,
1092
+ )
1093
+
1094
+ loss = None
1095
+ aux_loss = None
1096
+ hidden_states = outputs[0]
1097
+
1098
+ logits = self.lm_head(hidden_states)
1099
+ logits = logits.float()
1100
+
1101
+ if labels is not None:
1102
+ # LLaDA2.0 will use same label position logits
1103
+ shift_logits = logits
1104
+ shift_labels = labels
1105
+ # Flatten the tokens
1106
+ loss_fct = CrossEntropyLoss()
1107
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1108
+ shift_labels = shift_labels.view(-1)
1109
+ # Enable model parallelism
1110
+ shift_labels = shift_labels.to(shift_logits.device)
1111
+ loss = loss_fct(shift_logits, shift_labels)
1112
+
1113
+ if not return_dict:
1114
+ output = (logits,) + outputs[1:]
1115
+ if output_router_logits:
1116
+ output = (aux_loss,) + output
1117
+ return (loss,) + output if loss is not None else output
1118
+
1119
+ return MoeCausalLMOutputWithPast(
1120
+ loss=loss,
1121
+ aux_loss=aux_loss,
1122
+ logits=logits,
1123
+ past_key_values=outputs.past_key_values,
1124
+ hidden_states=outputs.hidden_states,
1125
+ attentions=outputs.attentions,
1126
+ router_logits=outputs.router_logits,
1127
+ )
1128
+
1129
+ def prepare_inputs_for_generation(
1130
+ self,
1131
+ input_ids,
1132
+ past_key_values=None,
1133
+ attention_mask=None,
1134
+ inputs_embeds=None,
1135
+ token_type_ids=None,
1136
+ **kwargs,
1137
+ ):
1138
+ if past_key_values is not None:
1139
+ if isinstance(past_key_values, Cache):
1140
+ cache_length = past_key_values.get_seq_length()
1141
+ past_length = past_key_values.seen_tokens
1142
+ max_cache_length = (
1143
+ past_key_values.get_max_length()
1144
+ if hasattr(past_key_values, "get_max_length")
1145
+ else past_key_values.get_max_cache_shape()
1146
+ )
1147
+ else:
1148
+ cache_length = past_length = past_key_values[0][0].shape[2]
1149
+ max_cache_length = None
1150
+
1151
+ # Keep only the unprocessed tokens:
1152
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1153
+ # some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as input)
1154
+ if (
1155
+ attention_mask is not None
1156
+ and attention_mask.shape[1] > input_ids.shape[1]
1157
+ ):
1158
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1159
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1160
+ # input_ids based on the past_length.
1161
+ elif past_length < input_ids.shape[1]:
1162
+ input_ids = input_ids[:, past_length:]
1163
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1164
+
1165
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1166
+ if (
1167
+ max_cache_length is not None
1168
+ and attention_mask is not None
1169
+ and cache_length + input_ids.shape[1] > max_cache_length
1170
+ ):
1171
+ attention_mask = attention_mask[:, -max_cache_length:]
1172
+
1173
+ position_ids = kwargs.get("position_ids", None)
1174
+ if attention_mask is not None and position_ids is None:
1175
+ # create position_ids on the fly for batch generation
1176
+ position_ids = attention_mask.long().cumsum(-1) - 1
1177
+ position_ids.masked_fill_(attention_mask == 0, 1)
1178
+ if past_key_values:
1179
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1180
+
1181
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1182
+ if inputs_embeds is not None and past_key_values is None:
1183
+ model_inputs = {"inputs_embeds": inputs_embeds}
1184
+ else:
1185
+ model_inputs = {"input_ids": input_ids}
1186
+
1187
+ model_inputs.update(
1188
+ {
1189
+ "position_ids": position_ids,
1190
+ "past_key_values": past_key_values,
1191
+ "use_cache": kwargs.get("use_cache"),
1192
+ "attention_mask": attention_mask,
1193
+ }
1194
+ )
1195
+ return model_inputs
1196
+
1197
+ @staticmethod
1198
+ def _reorder_cache(past_key_values, beam_idx):
1199
+ reordered_past = ()
1200
+ for layer_past in past_key_values:
1201
+ reordered_past += (
1202
+ tuple(
1203
+ past_state.index_select(0, beam_idx.to(past_state.device))
1204
+ for past_state in layer_past
1205
+ ),
1206
+ )
1207
+ return reordered_past
1208
+
1209
+ @staticmethod
1210
+ def _top_k_logits(logits, k):
1211
+ if k is None or k <= 0:
1212
+ return logits
1213
+ else:
1214
+ values, _ = torch.topk(logits, k)
1215
+ min_values = values[..., -1, None]
1216
+ return torch.where(
1217
+ logits < min_values, torch.full_like(logits, float("-inf")), logits
1218
+ )
1219
+
1220
+ @staticmethod
1221
+ def _top_p_logits(logits, p):
1222
+ if p is None or p >= 1.0:
1223
+ return logits
1224
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
1225
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
1226
+ sorted_mask = cumulative_probs > p
1227
+ sorted_mask[..., 1:] = sorted_mask[..., :-1].clone()
1228
+ sorted_mask[..., 0] = False
1229
+ mask_indices = torch.scatter(
1230
+ torch.full_like(logits, False, dtype=torch.bool),
1231
+ -1,
1232
+ sorted_indices,
1233
+ sorted_mask,
1234
+ )
1235
+ return logits.masked_fill(mask_indices, float("-inf"))
1236
+
1237
+ def _sample_with_temperature_topk_topp(
1238
+ self, logits, temperature=1.0, top_k=0, top_p=1.0
1239
+ ):
1240
+ orig_shape = logits.shape[:-1]
1241
+ vocab_size = logits.shape[-1]
1242
+ logits = logits.reshape(-1, vocab_size)
1243
+ if temperature > 0 and temperature != 1.0:
1244
+ logits = logits / temperature
1245
+ logits = self._top_k_logits(logits, top_k)
1246
+ logits = self._top_p_logits(logits, top_p)
1247
+ probs = F.softmax(logits, dim=-1)
1248
+ token = torch.multinomial(probs, num_samples=1)
1249
+ token_prob = torch.gather(probs, -1, token)
1250
+ return token.view(*orig_shape), token_prob.view(*orig_shape)
1251
+
1252
+ @staticmethod
1253
+ def _get_num_transfer_tokens(block_length, steps):
1254
+ if steps == 0:
1255
+ return torch.tensor([], dtype=torch.int64)
1256
+ base = block_length // steps
1257
+ remainder = block_length % steps
1258
+ num_transfer_tokens = torch.full((steps,), base, dtype=torch.int64)
1259
+ num_transfer_tokens[:remainder] += 1
1260
+ return num_transfer_tokens
1261
+
1262
+ @torch.no_grad()
1263
+ def generate(
1264
+ self,
1265
+ inputs: Optional[torch.Tensor] = None,
1266
+ temperature: int = 0.0,
1267
+ block_length: int = 32,
1268
+ steps: int = 32,
1269
+ gen_length: int = 2048,
1270
+ top_p: Optional[int] = None,
1271
+ top_k: Optional[int] = None,
1272
+ eos_early_stop: bool = False,
1273
+ minimal_topk: int = 1,
1274
+ threshold: float = 0.95,
1275
+ eos_id: int = 156892,
1276
+ mask_id: int = 156895,
1277
+ ):
1278
+ r"""
1279
+ Generates tokens using a block-wise, iterative refinement strategy.
1280
+
1281
+ This method operates differently from standard autoregressive generation. It first creates a template of the
1282
+ full desired length, filled with a special `mask_id`. It then processes this template in segments (`blocks`)
1283
+ and iteratively "denoises" or "refines" the `mask_id` tokens into actual tokens over a series of `steps` for
1284
+ each block. A custom block-diagonal causal attention mask ensures that generation within a block can attend to
1285
+ all previous blocks but not future ones.
1286
+
1287
+ <Tip warning={true}>
1288
+
1289
+ This is a specialized generation method. The quality and speed of the output are highly dependent on the interplay
1290
+ between `block_length`, `steps`, and `threshold`. It aims to achieve faster generation through parallel
1291
+ decoding within blocks, which is a departure from the token-by-token generation of standard `.generate()` methods.
1292
+
1293
+ </Tip>
1294
+
1295
+ Parameters:
1296
+ inputs (`torch.Tensor`):
1297
+ The token sequence used as a prompt for the generation.
1298
+ temperature (`float`, *optional*, defaults to 0.0):
1299
+ The value used to module the next token probabilities. A value of 0.0 corresponds to greedy decoding.
1300
+ block_length (`int`, *optional*, defaults to 32):
1301
+ The size of each generation block. The model generates text in parallel within these blocks. This is a
1302
+ key parameter for controlling the granularity of the generation process.
1303
+ steps (`int`, *optional*, defaults to 32):
1304
+ The number of iterative refinement (or "denoising") steps to perform for each block. Within each block,
1305
+ the model will try to replace `mask_id` tokens with real tokens for this many iterations.
1306
+ gen_length (`int`, *optional*, defaults to 2048):
1307
+ The maximum number of tokens to generate, excluding the prompt.
1308
+ top_p (`float`, *optional*):
1309
+ If set to a float value between 0 and 1, only the most probable tokens with probabilities that add up to
1310
+ `top_p` or higher are kept for generation (nucleus sampling).
1311
+ top_k (`int`, *optional*):
1312
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
1313
+ eos_early_stop (`bool`, *optional*, defaults to `False`):
1314
+ If `True`, generation will stop as soon as a valid End-Of-Sequence token is generated and confirmed,
1315
+ even if `gen_length` has not been reached.
1316
+ minimal_topk (`int`, *optional*, defaults to 1):
1317
+ A parameter used to dynamically adjust the number of refinement `steps`. The effective number of steps
1318
+ is capped at `gen_length // minimal_topk`.
1319
+ threshold (`float`, *optional*, defaults to 0.95):
1320
+ The confidence probability threshold for accepting a sampled token. During each refinement step, a
1321
+ sampled token is only kept if its probability is above this threshold. If not enough tokens meet the
1322
+ threshold, the ones with the highest confidence are chosen.
1323
+ eos_id (`int`, *optional*, defaults to 156892):
1324
+ The token ID for the end-of-sequence token. Used for `eos_early_stop`.
1325
+ mask_id (`int`, *optional*, defaults to 156895):
1326
+ The token ID used as a placeholder for tokens that are yet to be generated. This is central to the
1327
+ iterative refinement algorithm.
1328
+
1329
+ Return:
1330
+ `torch.Tensor`: A string containing the generated token IDs, starting
1331
+ after the prompt and stopping at the first `eos_id` or `gen_length`.
1332
+ """
1333
+ steps = min(steps, gen_length // minimal_topk)
1334
+ input_ids = inputs.to(self.device)
1335
+
1336
+ prompt_length = input_ids.shape[1]
1337
+ num_blocks = (prompt_length + gen_length + block_length - 1) // block_length
1338
+ total_length = num_blocks * block_length
1339
+
1340
+ block_mask = torch.tril(torch.ones(num_blocks, num_blocks, device=self.device))
1341
+ block_diffusion_attention_mask = (
1342
+ (
1343
+ block_mask.repeat_interleave(block_length, dim=0)
1344
+ .repeat_interleave(block_length, dim=1)
1345
+ .unsqueeze(0)
1346
+ .unsqueeze(0)
1347
+ )
1348
+ .log()
1349
+ .to(torch.bfloat16)
1350
+ )
1351
+
1352
+ position_ids = torch.arange(total_length, device=self.device).unsqueeze(0)
1353
+ x = torch.full((1, total_length), mask_id, dtype=torch.long, device=self.device)
1354
+ x[:, :prompt_length] = input_ids.clone()
1355
+
1356
+ prompt_index_full = torch.zeros_like(x, dtype=torch.bool)
1357
+ prompt_index_full[:, :prompt_length] = True
1358
+
1359
+ prefill_blocks = prompt_length // block_length
1360
+
1361
+ denoising_steps_per_block = steps
1362
+ num_transfer_tokens_schedule = self._get_num_transfer_tokens(
1363
+ block_length, denoising_steps_per_block
1364
+ )
1365
+ for num_block in range(prefill_blocks, num_blocks):
1366
+ current_window_end = (num_block + 1) * block_length
1367
+ cur_x = x[:, :current_window_end]
1368
+ cur_attn_mask = block_diffusion_attention_mask[
1369
+ :, :, :current_window_end, :current_window_end
1370
+ ]
1371
+ cur_position_ids = position_ids[:, :current_window_end]
1372
+
1373
+ for step in range(denoising_steps_per_block):
1374
+ active_block_mask = cur_x[:, -block_length:] == mask_id
1375
+ if active_block_mask.sum() == 0:
1376
+ break
1377
+
1378
+ logits = self.forward(
1379
+ cur_x,
1380
+ attention_mask=cur_attn_mask,
1381
+ position_ids=cur_position_ids,
1382
+ ).logits
1383
+
1384
+ active_logits = logits[:, -block_length:, :]
1385
+ x0, x0_p = self._sample_with_temperature_topk_topp(
1386
+ active_logits, temperature=temperature, top_k=top_k, top_p=top_p
1387
+ )
1388
+
1389
+ num_to_transfer = num_transfer_tokens_schedule[step].item()
1390
+ transfer_index = torch.zeros_like(x0, dtype=torch.bool)
1391
+
1392
+ confidence = torch.where(active_block_mask, x0_p, -torch.inf)
1393
+ high_conf_mask = confidence[0] > threshold
1394
+ num_high_confidence = high_conf_mask.sum().item()
1395
+
1396
+ if num_high_confidence >= num_to_transfer:
1397
+ transfer_index[0] = high_conf_mask
1398
+ else:
1399
+ _, idx = torch.topk(
1400
+ confidence[0],
1401
+ k=min(num_to_transfer, active_block_mask.sum().item()),
1402
+ )
1403
+ transfer_index[0, idx] = True
1404
+
1405
+ if transfer_index.any():
1406
+ cur_x[:, -block_length:][transfer_index] = x0[transfer_index]
1407
+ if eos_early_stop and (x0[transfer_index] == eos_id).any():
1408
+ eos_pos_in_x = (cur_x[0] == eos_id).nonzero(as_tuple=True)
1409
+ if len(eos_pos_in_x[0]) > 0:
1410
+ eos_pos = eos_pos_in_x[0][0].item()
1411
+ if (cur_x[0, prompt_length:eos_pos] != mask_id).all():
1412
+ final_x = x[:, :total_length][:, : eos_pos + 1]
1413
+ return final_x
1414
+
1415
+ x[:, :current_window_end] = cur_x
1416
+ if (
1417
+ eos_id is not None
1418
+ and (x[0, prompt_length:current_window_end] == eos_id).any()
1419
+ ):
1420
+ break
1421
+
1422
+ generated_answer = x[:, : prompt_length + gen_length]
1423
+
1424
+ mask_positions = (generated_answer[0][input_ids.shape[1] :] == eos_id).nonzero(
1425
+ as_tuple=True
1426
+ )[0]
1427
+ if len(mask_positions) > 0:
1428
+ first_mask_position = mask_positions[0].item()
1429
+ else:
1430
+ first_mask_position = gen_length
1431
+ return generated_answer[
1432
+ :, input_ids.shape[1] : input_ids.shape[1] + first_mask_position + 1
1433
+ ]
1434
+
special_tokens_map.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|startoftext|>",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "<|endoftext|>",
5
+ "gmask_token": "[gMASK]",
6
+ "pad_token": "<|endoftext|>",
7
+ "mask_token": "<|mask|>"
8
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "bos_token": "<|startoftext|>",
5
+ "chat_template": "{% set thinking_option = 'off' %}\n{{- '<role>SYSTEM</role>' }}\n{%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n' }}\n{%- endif %}\n{%- if tools %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call>\\n\" }}\n{%- endif %}\n{{- 'detailed thinking ' + thinking_option + '<|role_end|>' }}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if message.role == \"user\" %}\n {{- '<role>HUMAN</role>' + message.content + '<|role_end|>' }}\n {%- elif message.role == \"system\" and not loop.first %}\n {{- '<role>SYSTEM</role>' + message.content + '<|role_end|>' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if reasoning_content %}\n {{- '<role>ASSISTANT</role>' + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<role>ASSISTANT</role>' + content }}\n {%- endif %}\n {%- else %}\n {{- '<role>ASSISTANT</role>' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|role_end|>' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<role>OBSERVATION</role>' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|role_end|>' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<role>ASSISTANT</role>' }}\n{%- endif %}",
6
+ "clean_up_tokenization_spaces": false,
7
+ "cls_token": "[CLS]",
8
+ "eos_token": "<|endoftext|>",
9
+ "mask_token": "<|mask|>",
10
+ "fast_tokenizer": true,
11
+ "gmask_token": "[gMASK]",
12
+ "merges_file": null,
13
+ "model_max_length": 32768,
14
+ "pad_token": "<|endoftext|>",
15
+ "tokenizer_class": "PreTrainedTokenizerFast",
16
+ "trust_remote_code": true,
17
+ "vocab_file": null
18
+ }