File size: 7,033 Bytes
9830017
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModel

class AdaLNZero(nn.Module):
    def __init__(self, hidden_dim, cond_dim):
        super().__init__()
        self.norm = nn.LayerNorm(hidden_dim, elementwise_affine=False)
        self.proj = nn.Linear(cond_dim, 3 * hidden_dim)
        nn.init.zeros_(self.proj.weight)
        nn.init.zeros_(self.proj.bias)

    def forward(self, x, cond):
        params = self.proj(cond).unsqueeze(1)
        scale, shift, alpha = params.chunk(3, dim=-1)
        normalized = self.norm(x) * (1 + scale) + shift
        return normalized, alpha

class ModernBertLayerWithAdaLN(nn.Module):
    def __init__(self, pretrained_layer, hidden_dim, cond_dim):
        super().__init__()
        self.pretrained_layer = pretrained_layer
        self.cond_proj = nn.Linear(cond_dim, 6 * hidden_dim)
        nn.init.zeros_(self.cond_proj.weight)
        nn.init.zeros_(self.cond_proj.bias)
        
        # Tự động map đúng tên module của Qwen3
        self.attn_module = getattr(pretrained_layer, 'self_attn', getattr(pretrained_layer, 'attn', None))
        self.mlp_module = getattr(pretrained_layer, 'mlp', getattr(pretrained_layer, 'feed_forward', None))
        self.attn_norm = getattr(pretrained_layer, 'input_layernorm', getattr(pretrained_layer, 'attn_norm', nn.Identity()))
        self.mlp_norm = getattr(pretrained_layer, 'post_attention_layernorm', getattr(pretrained_layer, 'mlp_norm', nn.Identity()))

    def forward(self, hidden_states, cond, position_ids=None, attention_mask=None, position_embeddings=None):
        adaln_params = self.cond_proj(cond).unsqueeze(1)
        scale_attn, shift_attn, alpha_attn, scale_mlp, shift_mlp, alpha_mlp = adaln_params.chunk(6, dim=-1)
        
        # --- Attention Block ---
        normed_attn = self.attn_norm(hidden_states) * (1 + scale_attn) + shift_attn
        
        # Đóng gói kwargs an toàn để truyền cho Attention
        kwargs = {}
        if attention_mask is not None: kwargs['attention_mask'] = attention_mask
        if position_ids is not None: kwargs['position_ids'] = position_ids
        if position_embeddings is not None: kwargs['position_embeddings'] = position_embeddings
        
        attn_out = self.attn_module(normed_attn, **kwargs)
        attn_output = attn_out[0] if isinstance(attn_out, tuple) else attn_out
        hidden_states = hidden_states + (alpha_attn * attn_output)
        
        # --- MLP Block ---
        normed_mlp = self.mlp_norm(hidden_states) * (1 + scale_mlp) + shift_mlp
        mlp_out = self.mlp_module(normed_mlp)
        mlp_output = mlp_out[0] if isinstance(mlp_out, tuple) else mlp_out
        hidden_states = hidden_states + (alpha_mlp * mlp_output)
        
        return hidden_states

class ConditionalMDLM(nn.Module):
    def __init__(self, config):
        super().__init__()
        mc = config["model"]
        self.vocab_size = mc["vocab_size"]

        print(f"Loading pretrained backbone from {mc['pretrained_token_embeddings']}...")
        self.backbone = AutoModel.from_pretrained(mc["pretrained_token_embeddings"], trust_remote_code=True)
        
        # FIX CỐT LÕI: Tự động lấy kích thước chuẩn (1024) từ model gốc, bỏ qua khai báo sai trong yaml
        self.hidden_dim = self.backbone.config.hidden_size 
        self.cond_dim = mc["embedding_cond_dim"]
        print(f"Dynamically mapped: hidden_dim={self.hidden_dim}, cond_dim={self.cond_dim}")

        self.token_embed = self.backbone.get_input_embeddings()
        self.embed_norm = getattr(self.backbone, 'norm', nn.Identity())
        
        self.layers = nn.ModuleList([
            ModernBertLayerWithAdaLN(layer, self.hidden_dim, self.cond_dim)
            for layer in self.backbone.layers
        ])
        
        self.final_adaln = AdaLNZero(self.hidden_dim, self.cond_dim)
        self.output_proj = nn.Linear(self.hidden_dim, self.vocab_size, bias=False)
        
        if mc.get("tie_weights", True):
            self.output_proj.weight = self.token_embed.weight

    def _forward_impl(self, input_ids, cond_embedding, padding_mask, return_logits=False):
        device = input_ids.device
        batch_size, seq_len = input_ids.shape

        # 1. Chuẩn bị Attention Mask (Fix lõi SDPA dtype & Ép Bidirectional)
        if padding_mask is not None:
            # Chuyển sang bool: True = Attend (Giữ lại), False = PAD (Bỏ qua)
            attn_mask = (~padding_mask).bool() 
        else:
            attn_mask = torch.ones(batch_size, seq_len, dtype=torch.bool, device=device)
            
        # Mở rộng chiều thành [Batch, 1, 1, SeqLen] để broadcast với đa Head
        # Phải truyền mask này vào để tắt Causal Mask mặc định của Qwen3
        attn_mask = attn_mask.unsqueeze(1).unsqueeze(2)

        # 2. Chuẩn bị Position IDs
        pos_ids = torch.arange(seq_len, dtype=torch.long, device=device).unsqueeze(0).expand(batch_size, -1)

        # 3. Trích xuất RoPE đa năng
        pos_emb = None
        if hasattr(self.backbone, 'rotary_emb'):
            dummy_x = torch.empty(batch_size, seq_len, self.hidden_dim, device=device, dtype=torch.bfloat16)
            try:
                pos_emb = self.backbone.rotary_emb(dummy_x, pos_ids)
            except Exception:
                try:
                    pos_emb = self.backbone.rotary_emb(pos_ids)
                except Exception:
                    pass

        # 4. Forward Pass
        x = self.token_embed(input_ids)
        x = self.embed_norm(x)
        
        for layer in self.layers:
            # Đã truyền attn_mask chuẩn bool
            x = layer(x, cond_embedding, position_ids=pos_ids, attention_mask=attn_mask, position_embeddings=pos_emb)
            
        x, _ = self.final_adaln(x, cond_embedding)
        
        if return_logits:
            return self.output_proj(x)
        return x

    def forward(self, input_ids, cond_embedding, padding_mask=None):
        return self._forward_impl(input_ids, cond_embedding, padding_mask, return_logits=True)

    def forward_hidden(self, input_ids, cond_embedding, padding_mask=None):
        return self._forward_impl(input_ids, cond_embedding, padding_mask, return_logits=False)

    def count_params(self):
        total = sum(p.numel() for p in self.parameters())
        trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
        return total, trainable

def apply_mask(token_ids, mask_token_id, padding_mask=None):
    B, L = token_ids.shape
    device = token_ids.device
    u = torch.rand(B, 1, device=device)
    mask_ratio = (1 - (1 - 1e-3)**u).clamp(min=0.1, max=1.0)
    rand_scores = torch.rand(B, L, device=device)
    if padding_mask is not None:
        rand_scores[padding_mask] = 2.0
    target_mask = rand_scores < mask_ratio
    masked_ids = token_ids.clone()
    masked_ids[target_mask] = mask_token_id
    return masked_ids, target_mask, mask_ratio