wi-lab commited on
Commit
8df1cab
·
verified ·
1 Parent(s): ccbad27

Upload pretraining/pretrained_model.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. pretraining/pretrained_model.py +187 -0
pretraining/pretrained_model.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+
6
+
7
+ class LayerNormalization(nn.Module):
8
+ """Layer norm with learnable scale and bias."""
9
+
10
+ def __init__(self, d_model: int, eps: float = 1e-6) -> None:
11
+ super().__init__()
12
+ self.eps = eps
13
+ self.alpha = nn.Parameter(torch.ones(d_model))
14
+ self.bias = nn.Parameter(torch.zeros(d_model))
15
+
16
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
17
+ mean = x.mean(dim=-1, keepdim=True)
18
+ std = x.std(dim=-1, keepdim=True)
19
+ return self.alpha * (x - mean) / (std + self.eps) + self.bias
20
+
21
+
22
+ class Embedding(nn.Module):
23
+ """Linear projection + positional embedding with optional max_len override."""
24
+
25
+ def __init__(self, element_length: int, d_model: int, max_len: int | None = None) -> None:
26
+ super().__init__()
27
+ self.element_length = element_length
28
+ self.d_model = d_model
29
+ self.max_len = max_len if max_len is not None else 1025
30
+
31
+ self.proj = nn.Linear(element_length, d_model)
32
+ self.pos_embed = nn.Embedding(self.max_len, d_model)
33
+ self.norm = LayerNormalization(d_model)
34
+
35
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
36
+ seq_len = x.size(1)
37
+ if seq_len > self.max_len:
38
+ raise ValueError(f"Sequence length {seq_len} exceeds max_len {self.max_len}.")
39
+
40
+ pos = torch.arange(seq_len, dtype=torch.long, device=x.device)
41
+ pos_encodings = self.pos_embed(pos)
42
+ tok_emb = self.proj(x.float())
43
+ return self.norm(tok_emb + pos_encodings)
44
+
45
+
46
+ class ScaledDotProductAttention(nn.Module):
47
+ """Scaled dot-product attention."""
48
+
49
+ def __init__(self, d_k: int) -> None:
50
+ super().__init__()
51
+ self.d_k = d_k
52
+
53
+ def forward(self, Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
54
+ scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
55
+ attn = F.softmax(scores, dim=-1)
56
+ context = torch.matmul(attn, V)
57
+ return context, attn
58
+
59
+
60
+ class MultiHeadAttention(nn.Module):
61
+ """Multi-head self-attention module."""
62
+
63
+ def __init__(self, d_model: int, n_heads: int, dropout: float) -> None:
64
+ super().__init__()
65
+ if d_model % n_heads != 0:
66
+ raise ValueError(f"d_model ({d_model}) must be divisible by n_heads ({n_heads}).")
67
+
68
+ self.d_k = d_model // n_heads
69
+ self.d_v = d_model // n_heads
70
+ self.n_heads = n_heads
71
+
72
+ self.W_Q = nn.Linear(d_model, self.d_k * n_heads)
73
+ self.W_K = nn.Linear(d_model, self.d_k * n_heads)
74
+ self.W_V = nn.Linear(d_model, self.d_v * n_heads)
75
+ self.linear = nn.Linear(n_heads * self.d_v, d_model)
76
+ self.dropout = nn.Dropout(dropout)
77
+ self.scaled_dot_attn = ScaledDotProductAttention(self.d_k)
78
+
79
+ def forward(self, Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
80
+ residual = Q
81
+ batch_size = Q.size(0)
82
+
83
+ q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
84
+ k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
85
+ v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
86
+
87
+ context, attn = self.scaled_dot_attn(q_s, k_s, v_s)
88
+ output = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
89
+ output = self.linear(output)
90
+ return residual + self.dropout(output), attn
91
+
92
+
93
+ class PoswiseFeedForwardNet(nn.Module):
94
+ """Position-wise feed-forward network."""
95
+
96
+ def __init__(self, d_model: int, d_ff: int, dropout: float) -> None:
97
+ super().__init__()
98
+ self.fc1 = nn.Linear(d_model, d_ff)
99
+ self.fc2 = nn.Linear(d_ff, d_model)
100
+ self.dropout = nn.Dropout(dropout)
101
+
102
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
103
+ return self.fc2(self.dropout(F.relu(self.fc1(x))))
104
+
105
+
106
+ class EncoderLayer(nn.Module):
107
+ """Transformer encoder block."""
108
+
109
+ def __init__(self, d_model: int, n_heads: int, d_ff: int, dropout: float) -> None:
110
+ super().__init__()
111
+ self.enc_self_attn = MultiHeadAttention(d_model, n_heads, dropout)
112
+ self.pos_ffn = PoswiseFeedForwardNet(d_model, d_ff, dropout)
113
+ self.norm1 = LayerNormalization(d_model)
114
+ self.norm2 = LayerNormalization(d_model)
115
+
116
+ def forward(self, enc_inputs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
117
+ attn_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs)
118
+ attn_outputs = self.norm1(attn_outputs)
119
+ ff_outputs = self.pos_ffn(attn_outputs)
120
+ enc_outputs = self.norm2(attn_outputs + ff_outputs)
121
+ return enc_outputs, attn
122
+
123
+
124
+ class LWM(nn.Module):
125
+ """Large Wireless Model (Transformer encoder)."""
126
+
127
+ def __init__(
128
+ self,
129
+ element_length: int = 32,
130
+ d_model: int = 128,
131
+ n_layers: int = 12,
132
+ max_len: int | None = None,
133
+ n_heads: int = 8,
134
+ dropout: float = 0.1,
135
+ ) -> None:
136
+ super().__init__()
137
+
138
+ self.element_length = element_length
139
+ self.d_model = d_model
140
+ self.n_layers = n_layers
141
+ self.max_len = max_len if max_len is not None else 1025
142
+ self.n_heads = n_heads
143
+ self.dropout = dropout
144
+
145
+ self.embedding = Embedding(element_length, d_model, self.max_len)
146
+ self.layers = nn.ModuleList(
147
+ [EncoderLayer(d_model, n_heads, d_model * 4, dropout) for _ in range(n_layers)]
148
+ )
149
+ self.linear = nn.Linear(d_model, d_model)
150
+ self.norm = LayerNormalization(d_model)
151
+
152
+ embed_weight = self.embedding.proj.weight
153
+ _, n_dim = embed_weight.size()
154
+ self.decoder = nn.Linear(d_model, n_dim, bias=False)
155
+ self.decoder_bias = nn.Parameter(torch.zeros(n_dim))
156
+
157
+ def forward(
158
+ self,
159
+ input_ids: torch.Tensor,
160
+ masked_pos: torch.Tensor | None = None,
161
+ ) -> tuple[torch.Tensor, torch.Tensor] | torch.Tensor:
162
+ output = self.embedding(input_ids)
163
+
164
+ for layer in self.layers:
165
+ output, attn = layer(output)
166
+
167
+ if masked_pos is not None:
168
+ masked_pos = masked_pos.long()[:, :, None].expand(-1, -1, output.size(-1))
169
+ h_masked = torch.gather(output, 1, masked_pos)
170
+ h_masked = self.norm(F.relu(self.linear(h_masked)))
171
+ logits_lm = self.decoder(h_masked) + self.decoder_bias
172
+ return logits_lm, output
173
+
174
+ return output
175
+
176
+
177
+ def lwm(*args, **kwargs) -> LWM:
178
+ """Factory to preserve backward compatibility with older imports."""
179
+
180
+ return LWM(*args, **kwargs)
181
+
182
+
183
+ class PretrainedLWM(LWM):
184
+ """Alias retained for compatibility with existing inference scripts."""
185
+
186
+ def __init__(self, *args, **kwargs) -> None:
187
+ super().__init__(*args, **kwargs)