wangrongsheng's picture
Add files using upload-large-folder tool
a86f2f6 verified
# coding=utf-8
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import warnings
from typing import Dict, List, Optional, Tuple
import torch
import torch.distributed as dist
import torch.nn.functional as F
import torch.utils.checkpoint
import torch_npu
from torch import nn
from torch.distributed.distributed_c10d import _world
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_13
from transformers.utils.import_utils import is_torch_fx_available
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from configuration_openpangu_moe import PanguUltraMoEConfig
if is_torch_fx_available():
if not is_torch_greater_or_equal_than_1_13:
import torch.fx
_prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
class PanguUltraMoERMSNorm(nn.Module):
def __init__(self, hidden_dim, epsilon=1e-5):
super().__init__()
self.weight = nn.Parameter(torch.empty(hidden_dim))
self.epsilon = epsilon
def forward(self, hidden_states, *args):
if len(args) == 0:
result = torch_npu.npu_rms_norm(hidden_states, self.weight, self.epsilon)[0]
return result
elif len(args) == 1 and args[0] is None:
result = torch_npu.npu_rms_norm(hidden_states, self.weight, self.epsilon)[0]
residual = hidden_states
return (result, residual)
elif len(args) == 1:
residual = args[0]
y, _, x = torch_npu.npu_add_rms_norm(
residual, hidden_states, self.weight, self.epsilon
)
return (y, x)
else:
raise NotImplementedError(f"PanguUltraMoERMSNorm inner error")
class PanguUltraMoERotaryEmbedding(nn.Module):
def __init__(
self, dim, max_position_embeddings=131072, base=25600000.0, device=None
):
super().__init__()
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
self._set_cache(
seq_len=max_position_embeddings,
device=device,
dtype=torch.get_default_dtype(),
)
def _set_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
dim = self.dim
inv_freq = 1.0 / (
self.base
** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)
)
self.register_buffer("inv_freq", inv_freq, persistent=False)
t = torch.arange(seq_len, device=device, dtype=torch.float32)
freqs = torch.outer(t, inv_freq)
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def forward(self, x, kv_len, max_seq_len=None):
if max_seq_len is None:
self._set_cache(seq_len=kv_len, device=x.device, dtype=x.dtype)
elif max_seq_len > self.max_seq_len_cached:
self._set_cache(seq_len=max_seq_len, device=x.device, dtype=x.dtype)
batch_size = x.shape[0]
seq_len = x.shape[1]
if seq_len == 1:
cos = (
torch.index_select(self.cos_cached, dim=0, index=kv_len)
.unsqueeze(1)
.unsqueeze(1)
)
sin = (
torch.index_select(self.sin_cached, dim=0, index=kv_len)
.unsqueeze(1)
.unsqueeze(1)
)
else:
cos = (
self.cos_cached[:seq_len]
.unsqueeze(0)
.unsqueeze(2)
.repeat(batch_size, 1, 1, 1)
)
sin = (
self.sin_cached[:seq_len]
.unsqueeze(0)
.unsqueeze(2)
.repeat(batch_size, 1, 1, 1)
)
cos = cos[0, :, 0, :]
sin = sin[0, :, 0, :]
return (
cos.to(dtype=x.dtype),
sin.to(dtype=x.dtype),
)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`):
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
used to pass offsetted position ids when working with a KV-cache.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
b, h, s, d = q.shape
q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
b, h, s, d = k.shape
k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class MLP(nn.Module):
def __init__(self, config, runner_config, hidden_size=None, intermediate_size=None):
super().__init__()
self.runner_config = runner_config
self.moe_tp_size = self.runner_config.get("parallel_config").get(
"moe_tp_size", 1
)
self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
self.intermediate_size = (
config.intermediate_size if intermediate_size is None else intermediate_size
)
self.intermediate_size_per_rank = self.intermediate_size // self.moe_tp_size
self.merge_up_gate_proj = nn.Linear(
self.hidden_size, self.intermediate_size_per_rank * 2, bias=False
)
self.down_proj = nn.Linear(
self.intermediate_size_per_rank, self.hidden_size, bias=False
)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
merged_x = self.merge_up_gate_proj(x)
gate_state, up_state = merged_x.chunk(2, dim=-1)
intermediate_hidden_states = self.act_fn(gate_state) * up_state
down_proj = self.down_proj(intermediate_hidden_states)
if self.moe_tp_size > 1:
dist.all_reduce(down_proj)
return down_proj
class MoE(nn.Module):
def __init__(self, config, runner_config, hidden_size=None, intermediate_size=None):
super().__init__()
self.runner_config = runner_config
self.moe_tp_size = self.runner_config.get("parallel_config").get(
"moe_tp_size", 1
)
self.num_experts = config.num_routed_experts
self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
self.intermediate_size = (
config.intermediate_size if intermediate_size is None else intermediate_size
)
self.intermediate_size_per_rank = self.intermediate_size // self.moe_tp_size
self.act_fn = ACT2FN[config.hidden_act]
self.group_w1_w3 = nn.Parameter(
torch.ones(
self.num_experts, self.intermediate_size_per_rank * 2, self.hidden_size
),
requires_grad=False,
)
self.group_w2 = nn.Parameter(
torch.ones(
self.num_experts, self.hidden_size, self.intermediate_size_per_rank
),
requires_grad=False,
)
def forward(self, hidden_states, expert_tokens, seq_len=None):
mm1_mm3 = torch_npu.npu_grouped_matmul(
[hidden_states],
[torch.transpose(self.group_w1_w3, 1, 2)],
group_list=expert_tokens,
group_type=0,
split_item=3,
)[0]
mm1, mm3 = mm1_mm3.chunk(2, dim=-1)
intermediate_hidden_states = self.act_fn(mm1) * mm3
hidden_states = torch_npu.npu_grouped_matmul(
[intermediate_hidden_states],
[torch.transpose(self.group_w2, 1, 2)],
group_list=expert_tokens,
group_type=0,
split_item=3,
)[0]
return hidden_states
class MoEGate(nn.Module):
def __init__(self, config):
super().__init__()
self.top_k = config.num_experts_per_tok
self.routed_scaling_factor = config.routed_scaling_factor
self.norm_topk_prob = config.norm_topk_prob
self.weight = nn.Parameter(
torch.empty((config.num_routed_experts, config.hidden_size))
)
def forward(self, hidden_states):
bsz, seq_len, h = hidden_states.shape
hidden_states = hidden_states.view(-1, h)
logits = F.linear(
hidden_states.to(torch.float32), self.weight.to(torch.float32), None
)
scores = logits.sigmoid()
scores_for_choice = scores.view(bsz * seq_len, -1)
_, topk_idx = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)
topk_weight = scores.gather(1, topk_idx)
if self.top_k > 1 and self.norm_topk_prob:
denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
topk_weight = topk_weight / denominator
topk_weight = topk_weight * self.routed_scaling_factor
return topk_idx, topk_weight
class PanguUltraMoE(nn.Module):
def __init__(self, config, runner_config):
super().__init__()
self.runner_config = runner_config
self.hidden_dim = config.hidden_size
self.moe_tp_size = self.runner_config.get("parallel_config").get(
"moe_tp_size", 1
)
self.batch_size_decode = self.runner_config.get("data_config").get(
"batch_size", 1
)
self.batch_size_prefill = self.batch_size_decode
self.num_experts_per_tok = config.num_experts_per_tok
self.num_experts = config.num_routed_experts
self.num_shared_experts = config.num_shared_experts
self.top_k = config.num_experts_per_tok
self.experts_per_rank = config.num_routed_experts
self.experts = MoE(
config, self.runner_config, intermediate_size=config.moe_intermediate_size
)
self.gate = MoEGate(config)
if self.num_shared_experts is not None:
intermediate_size = config.moe_intermediate_size * self.num_shared_experts
self.shared_experts = MLP(
config, self.runner_config, intermediate_size=intermediate_size
)
self.row_idx_decode_len = self.batch_size_decode * self.top_k
self.row_idx_decode = (
torch.arange(0, self.row_idx_decode_len, dtype=torch.int32)
.view(self.top_k, -1)
.permute(1, 0)
.int()
.contiguous()
.npu()
)
def forward(self, hidden_states):
identity = hidden_states
topk_idx, topk_weight = self.gate(hidden_states)
y = self.moe_npu(hidden_states, topk_idx, topk_weight)
if self.num_shared_experts is not None:
y = y + self.shared_experts(identity)
return y
def moe_npu(self, x, topk_ids, topk_weight):
batch_size, sequence_length, h = x.shape
hidden_states = x.view(-1, x.shape[-1])
routing_weights = topk_weight.to(x.dtype)
expert_idx = topk_ids.int()
if sequence_length == 1:
row_idx = self.row_idx_decode
else:
row_idx_prefill_len = self.batch_size_prefill * sequence_length * self.top_k
row_idx = (
torch.arange(
0, row_idx_prefill_len, dtype=torch.int32, device=topk_weight.device
)
.view(self.top_k, -1)
.permute(1, 0)
.int()
.contiguous()
)
active_num = batch_size * sequence_length
expanded_x, expanded_row_idx, expanded_expert_idx = (
torch_npu.npu_moe_init_routing(
hidden_states,
row_idx=row_idx,
expert_idx=expert_idx,
active_num=active_num,
)
)
expert_tokens = torch_npu.npu_moe_compute_expert_tokens(
expanded_expert_idx, self.num_experts
)
expert_tokens = expert_tokens.to(torch.int64)
hidden_states_ordered_by_experts = self.experts(
expanded_x, expert_tokens, seq_len=sequence_length
)
hidden_states = torch_npu.npu_moe_finalize_routing(
hidden_states_ordered_by_experts,
skip1=None,
skip2=None,
bias=None,
scales=routing_weights,
expanded_src_to_dst_row=expanded_row_idx,
export_for_source_row=expert_idx,
)
if self.moe_tp_size > 1:
dist.all_reduce(hidden_states)
hidden_states = hidden_states.view(batch_size, -1, self.hidden_dim)
return hidden_states
class PanguUltraMoEAttention(nn.Module):
def __init__(
self,
config: PanguUltraMoEConfig,
layer_idx: Optional[int] = None,
runner_config: Optional[Dict] = None,
):
super().__init__()
if runner_config is not None:
self.attn_tp_size = runner_config.get("parallel_config").get(
"attn_tp_size", 1
)
else:
self.attn_tp_size = 1
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.num_heads_per_rank = self.num_heads // self.attn_tp_size
self.num_key_value_heads_per_rank = self.num_heads_per_rank
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.attention_q_lora_dim = config.attention_q_lora_dim
self.attention_qk_rope_dim = config.attention_qk_rope_dim
self.attention_kv_lora_dim = config.attention_kv_lora_dim
self.attention_v_dim = config.attention_v_dim
self.attention_qk_dim = config.attention_qk_dim
self.q_head_dim = config.attention_qk_dim + config.attention_qk_rope_dim
if self.attention_q_lora_dim is None:
self.q_proj = nn.Linear(
self.hidden_size, self.num_heads_per_rank * self.q_head_dim, bias=False
)
else:
self.q_a_proj = nn.Linear(
self.hidden_size, config.attention_q_lora_dim, bias=False
)
self.q_a_layernorm = PanguUltraMoERMSNorm(config.attention_q_lora_dim)
self.q_b_proj = nn.Linear(
config.attention_q_lora_dim,
self.num_heads_per_rank * self.q_head_dim,
bias=False,
)
self.kv_a_proj_with_mqa = nn.Linear(
self.hidden_size,
config.attention_kv_lora_dim + config.attention_qk_rope_dim,
bias=False,
)
self.kv_a_layernorm = PanguUltraMoERMSNorm(config.attention_kv_lora_dim)
self.kv_b_proj_w_k = nn.Parameter(
torch.zeros(
self.num_heads_per_rank,
self.attention_qk_dim,
self.attention_kv_lora_dim,
)
)
self.kv_b_proj_w_v = nn.Parameter(
torch.zeros(
self.num_heads_per_rank,
self.attention_kv_lora_dim,
self.attention_v_dim,
)
)
self.o_proj = nn.Linear(
self.num_heads_per_rank * self.attention_v_dim,
self.hidden_size,
bias=False,
)
self.softmax_scale = self.q_head_dim ** (-0.5)
def bmm_5d(self, x, y):
b, s, n, _, d = x.shape
x = x.view(b * s, n, d).transpose(0, 1)
output = torch.matmul(x, y)
output = output.transpose(1, 0).view(b, s, n, -1)
return output
def prepare_qkv(
self,
hidden_states: torch.Tensor,
cos_sin: torch.Tensor = None,
kv_len: torch.IntTensor = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
**kwargs,
):
bsz, q_len, _ = hidden_states.size()
if self.attention_q_lora_dim is None:
q = self.q_proj(hidden_states)
else:
q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
compressed_kv, k_pe = torch.split(
compressed_kv,
[self.attention_kv_lora_dim, self.attention_qk_rope_dim],
dim=-1,
)
q = q.view(bsz, q_len, self.num_heads_per_rank, self.q_head_dim)
q_nope, q_pe = torch.split(
q, [self.attention_qk_dim, self.attention_qk_rope_dim], dim=-1
)
q_pe = q_pe.transpose(1, 2)
q_nope = self.bmm_5d(
q_nope.view(bsz, q_len, self.num_heads_per_rank, 1, self.attention_qk_dim),
self.kv_b_proj_w_k,
)
q_nope = q_nope.view(
bsz, q_len, self.num_heads_per_rank, self.attention_kv_lora_dim
)
q_nope = q_nope.transpose(1, 2)
k_pe = k_pe.view(bsz, q_len, 1, self.attention_qk_rope_dim).transpose(1, 2)
k_nope = (
self.kv_a_layernorm(compressed_kv)
.view(bsz, -1, 1, self.attention_kv_lora_dim)
.transpose(1, 2)
)
cos, sin = cos_sin
q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)
query_states = torch.cat([q_nope, q_pe], dim=-1)
key_states = torch.cat([k_nope, k_pe], dim=-1)
kv_seq_len = k_nope.shape[-2]
if past_key_value is not None:
past_key_states = past_key_value[self.layer_idx][0]
torch_npu.scatter_update_(past_key_states, kv_len, key_states, -2)
if q_len == 1:
key_states = past_key_states
kv_seq_len = past_key_value[0][0].size()[-2]
value_states = key_states
return query_states, key_states, value_states, kv_seq_len
def apply_attention_npu(
self,
query_states,
key_states,
value_states,
kv_seq_len,
attention_mask: Optional[torch.Tensor] = None,
actual_seq_lengths_kv: list = None,
output_attentions: bool = False,
past_key_value: Optional[Cache] = None,
):
# repeat k/v heads if n_kv_heads < n_heads
bsz, _, q_len, _ = query_states.size()
attn_weights = (
torch.matmul(query_states, key_states.transpose(2, 3)) * self.softmax_scale
)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
else:
raise ValueError("attention mask must not be None")
attn_weights = nn.functional.softmax(
attn_weights, dim=-1, dtype=torch.float32
).to(query_states.dtype)
value_states = value_states[..., : self.attention_kv_lora_dim]
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = self.bmm_5d(attn_output.unsqueeze(3), self.kv_b_proj_w_v)
attn_output = self.o_proj(attn_output.reshape(bsz, q_len, -1))
if self.attn_tp_size > 1:
dist.all_reduce(attn_output)
return attn_output
def forward(
self,
hidden_states: torch.Tensor,
kv_len: torch.IntTensor = None,
actual_seq_lengths_kv: list = None,
cos_sin: torch.Tensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
if "padding_mask" in kwargs:
warnings.warn(
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
)
query_states, key_states, value_states, kv_seq_len = self.prepare_qkv(
hidden_states=hidden_states,
cos_sin=cos_sin,
kv_len=kv_len,
position_ids=position_ids,
past_key_value=past_key_value,
)
output = self.apply_attention_npu(
query_states=query_states,
key_states=key_states,
value_states=value_states,
kv_seq_len=kv_seq_len,
actual_seq_lengths_kv=actual_seq_lengths_kv,
attention_mask=attention_mask,
output_attentions=output_attentions,
past_key_value=past_key_value,
)
return output
class PanguUltraMoEDecoderLayer(nn.Module):
def __init__(
self, config: PanguUltraMoEConfig, runner_config: Dict, layer_idx: int
):
super().__init__()
self.runner_config = runner_config
self.hidden_size = config.hidden_size
self.self_attn = PanguUltraMoEAttention(
config=config, runner_config=self.runner_config, layer_idx=layer_idx
)
self.mlp = (
PanguUltraMoE(config, self.runner_config)
if (
config.num_routed_experts is not None
and layer_idx >= config.num_dense_layers
)
else MLP(config, self.runner_config)
)
self.input_layernorm = PanguUltraMoERMSNorm(
config.hidden_size, epsilon=config.rms_norm_eps
)
self.post_attention_layernorm = PanguUltraMoERMSNorm(
config.hidden_size, epsilon=config.rms_norm_eps
)
if getattr(config, "sandwich_norm", False):
self.sandwich_norm = True
self.pre_mlp_layernorm = PanguUltraMoERMSNorm(
config.hidden_size, epsilon=config.rms_norm_eps
)
self.post_mlp_layernorm = PanguUltraMoERMSNorm(
config.hidden_size, epsilon=config.rms_norm_eps
)
else:
self.sandwich_norm = False
def forward(
self,
hidden_states: torch.Tensor,
kv_len: torch.IntTensor,
actual_seq_lengths_kv: list,
cos_sin: torch.Tensor,
past_residual: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
**kwargs,
) -> Tuple[torch.FloatTensor]:
hidden_states, residual = self.input_layernorm(hidden_states, past_residual)
# Self Attention
hidden_states = self.self_attn(
hidden_states=hidden_states,
kv_len=kv_len,
actual_seq_lengths_kv=actual_seq_lengths_kv,
cos_sin=cos_sin,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
)
if self.sandwich_norm:
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, residual = self.pre_mlp_layernorm(hidden_states, residual)
else:
hidden_states, residual = self.post_attention_layernorm(
hidden_states, residual
)
hidden_states = self.mlp(hidden_states)
if self.sandwich_norm:
hidden_states = self.post_mlp_layernorm(hidden_states)
outputs = (residual, hidden_states)
return outputs
class PanguUltraMoEPreTrainedModel(PreTrainedModel):
config_class = PanguUltraMoEConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["PanguUltraMoEDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_cache_class = True
def _init_weights(self, module):
pass
class PanguUltraMoEModel(PanguUltraMoEPreTrainedModel):
def __init__(self, config: PanguUltraMoEConfig, runner_config: Dict):
super().__init__(config)
self.config = config
self.runner_config = runner_config
self.local_rank = int(os.getenv("LOCAL_RANK", "0"))
self.rank_offset = int(os.getenv("RANK_OFFSET", "0"))
self.global_rank = self.local_rank + self.rank_offset
self.embed_tp_size = self.runner_config.get("parallel_config").get(
"embed_tp_size", 1
)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.vocab_size_per_rank = self.vocab_size // self.embed_tp_size
self.embed_tokens = nn.Embedding(
self.vocab_size_per_rank, config.hidden_size, self.padding_idx
)
self.layers = nn.ModuleList(
[
PanguUltraMoEDecoderLayer(config, self.runner_config, layer_idx)
for layer_idx in range(config.num_hidden_layers)
]
)
self.norm = PanguUltraMoERMSNorm(config.hidden_size, epsilon=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
self.rotary_emb = PanguUltraMoERotaryEmbedding(
self.config.attention_qk_rope_dim,
max_position_embeddings=self.config.max_position_embeddings,
base=self.config.rope_theta,
)
def forward(
self,
input_ids: torch.LongTensor,
kv_len: torch.IntTensor = None,
actual_seq_lengths_kv: list = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
):
batch_size, seq_length = input_ids.shape
past_key_values_length = past_key_values[0][0].size()[-2]
if position_ids is None:
device = input_ids.device
position_ids = torch.arange(
past_key_values_length,
seq_length + past_key_values_length,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
if self.embed_tp_size > 1:
new_input_ids = input_ids - self.global_rank * self.vocab_size_per_rank
mask = (new_input_ids >= 0) & (
new_input_ids < self.vocab_size_per_rank
) # (bs, qlen)
new_input_ids_per_rank = new_input_ids * mask
inputs_embeds = self.embed_tokens(new_input_ids_per_rank) * mask.unsqueeze(
-1
)
dist.all_reduce(inputs_embeds)
else:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
cos_sin = self.rotary_emb(
hidden_states, kv_len, self.config.max_position_embeddings
)
residual = None
for decoder_layer in self.layers:
residual, hidden_states = decoder_layer(
hidden_states,
kv_len,
actual_seq_lengths_kv,
cos_sin=cos_sin,
past_residual=residual,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_values,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class PanguUltraMoEForCausalLM(PanguUltraMoEPreTrainedModel):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config, runner_config):
super().__init__(config)
self.config = config
self.runner_config = runner_config
self.embed_tp_size = self.runner_config.get("parallel_config").get(
"embed_tp_size", 1
)
self.model = PanguUltraMoEModel(config, self.runner_config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(
config.hidden_size, config.vocab_size // self.embed_tp_size, bias=False
)
def forward(
self,
input_ids: torch.LongTensor = None,
kv_len: torch.IntTensor = None,
actual_seq_lengths_kv: list = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
):
outputs = self.model(
input_ids=input_ids,
kv_len=kv_len,
actual_seq_lengths_kv=actual_seq_lengths_kv,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
)
hidden_states = outputs
if hidden_states.size()[1] > 1:
gather_index, _ = torch.max(position_ids, dim=-1)
gather_index = (
gather_index.unsqueeze(1)
.unsqueeze(2)
.repeat(1, 1, hidden_states.shape[-1])
)
hidden_states = torch.gather(hidden_states, 1, gather_index)
logits = self.lm_head(hidden_states)
if self.embed_tp_size > 1:
new_logits = torch.zeros_like(logits).repeat(self.embed_tp_size, 1, 1)
dist.all_gather_into_tensor(new_logits, logits, group=_world._default_pg)
new_logits = new_logits.reshape(
self.embed_tp_size, logits.shape[0], logits.shape[1], -1
).permute(1, 2, 0, 3)
logits = new_logits.reshape(logits.shape[0], logits.shape[1], -1)
logits = logits.float()
return logits
def init_cache(self, input_ids):
batch_size, seq_len = input_ids.size()
cache_seq_len = self.config.max_position_embeddings
past_key_values = ()
cache_key_shape = (
batch_size,
1,
cache_seq_len,
self.config.attention_kv_lora_dim + self.config.attention_qk_rope_dim,
)
dtype = self.config.torch_dtype
for _ in range(self.config.num_hidden_layers):
key_cache = torch.zeros(
cache_key_shape, dtype=dtype, device=input_ids.device
)
past_key_values += ((key_cache,),)
return past_key_values
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
is_prefill=None,
kv_len=None,
share_mask_tril=None,
**kwargs,
):
batch_size, seq_len = input_ids.size()
if past_key_values is None:
past_key_values = self.init_cache(input_ids)
if is_prefill:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
attention_mask = share_mask_tril
kv_len = torch.zeros(
(position_ids.size()[0]), dtype=torch.int32, device=input_ids.device
)
actual_seq_lengths_kv = None
past_key_values_length = 0
input_mask = None
else:
attention_mask = None
position_ids = kv_len.unsqueeze(1)
actual_seq_lengths_kv = (kv_len + 1).cpu().detach().numpy().tolist()
past_key_values_length = self.config.max_position_embeddings - seq_len
input_mask = share_mask_tril
attention_mask = _prepare_4d_causal_attention_mask(
input_mask, (batch_size, seq_len), input_ids.float(), past_key_values_length
)
model_inputs = {}
model_inputs.update(
{
"input_ids": input_ids,
"position_ids": position_ids,
"past_key_values": past_key_values,
"attention_mask": attention_mask,
"kv_len": kv_len,
"actual_seq_lengths_kv": actual_seq_lengths_kv,
}
)
return model_inputs