Upload tranformer-1.py
Browse files- tranformer-1.py +295 -0
tranformer-1.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
# --- 1. Positional Encoding ---
|
| 6 |
+
# Transformer tidak memiliki pemahaman inheren tentang urutan token (seperti RNN).
|
| 7 |
+
# Positional Encoding menambahkan informasi posisi ke dalam embedding input.
|
| 8 |
+
# Ini menggunakan fungsi sinus dan kosinus dengan frekuensi yang berbeda.
|
| 9 |
+
class PositionalEncoding(nn.Module):
|
| 10 |
+
def __init__(self, d_model, max_len=5000):
|
| 11 |
+
"""
|
| 12 |
+
Args:
|
| 13 |
+
d_model (int): Dimensi embedding, harus sama dengan dimensi model.
|
| 14 |
+
max_len (int): Panjang sekuens maksimum yang mungkin.
|
| 15 |
+
"""
|
| 16 |
+
super(PositionalEncoding, self).__init__()
|
| 17 |
+
|
| 18 |
+
# Buat matriks 'pe' (positional encoding) dengan ukuran (max_len, d_model)
|
| 19 |
+
pe = torch.zeros(max_len, d_model)
|
| 20 |
+
|
| 21 |
+
# Buat tensor 'position' yang berisi posisi [0, 1, 2, ..., max_len-1]
|
| 22 |
+
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
| 23 |
+
|
| 24 |
+
# Hitung pembagi untuk frekuensi sin/cos
|
| 25 |
+
# Ini adalah implementasi dari rumus dalam paper "Attention Is All You Need"
|
| 26 |
+
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
|
| 27 |
+
|
| 28 |
+
# Terapkan fungsi sin ke kolom genap
|
| 29 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
| 30 |
+
# Terapkan fungsi cos ke kolom ganjil
|
| 31 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
| 32 |
+
|
| 33 |
+
# Tambahkan dimensi batch di awal, menjadi (1, max_len, d_model)
|
| 34 |
+
pe = pe.unsqueeze(0)
|
| 35 |
+
|
| 36 |
+
# Daftarkan 'pe' sebagai buffer. Buffer adalah state dari modul
|
| 37 |
+
# yang bukan merupakan parameter (tidak di-train), tetapi harus disimpan.
|
| 38 |
+
self.register_buffer('pe', pe)
|
| 39 |
+
|
| 40 |
+
def forward(self, x):
|
| 41 |
+
"""
|
| 42 |
+
Args:
|
| 43 |
+
x (torch.Tensor): Tensor input embedding dengan shape (batch_size, seq_len, d_model)
|
| 44 |
+
Returns:
|
| 45 |
+
torch.Tensor: Tensor dengan informasi posisi yang ditambahkan, shape sama.
|
| 46 |
+
"""
|
| 47 |
+
# Tambahkan positional encoding ke tensor input x.
|
| 48 |
+
# x.size(1) adalah panjang sekuens aktual dari input.
|
| 49 |
+
x = x + self.pe[:, :x.size(1), :]
|
| 50 |
+
return x
|
| 51 |
+
|
| 52 |
+
# --- 2. Multi-Head Attention ---
|
| 53 |
+
# Mekanisme ini memungkinkan model untuk bersama-sama memperhatikan informasi
|
| 54 |
+
# dari posisi yang berbeda dalam sekuens. "Multi-head" berarti kita melakukannya
|
| 55 |
+
# beberapa kali secara paralel, masing-masing "head" fokus pada aspek yang berbeda.
|
| 56 |
+
class MultiHeadAttention(nn.Module):
|
| 57 |
+
def __init__(self, d_model, num_heads):
|
| 58 |
+
"""
|
| 59 |
+
Args:
|
| 60 |
+
d_model (int): Dimensi model.
|
| 61 |
+
num_heads (int): Jumlah "attention heads".
|
| 62 |
+
"""
|
| 63 |
+
super(MultiHeadAttention, self).__init__()
|
| 64 |
+
assert d_model % num_heads == 0, "d_model harus bisa dibagi dengan num_heads"
|
| 65 |
+
|
| 66 |
+
self.d_model = d_model
|
| 67 |
+
self.num_heads = num_heads
|
| 68 |
+
self.d_k = d_model // num_heads # Dimensi per head
|
| 69 |
+
|
| 70 |
+
# Lapisan linear untuk Query, Key, Value, dan output
|
| 71 |
+
self.W_q = nn.Linear(d_model, d_model)
|
| 72 |
+
self.W_k = nn.Linear(d_model, d_model)
|
| 73 |
+
self.W_v = nn.Linear(d_model, d_model)
|
| 74 |
+
self.W_o = nn.Linear(d_model, d_model)
|
| 75 |
+
|
| 76 |
+
def scaled_dot_product_attention(self, Q, K, V, mask=None):
|
| 77 |
+
"""
|
| 78 |
+
Ini adalah inti dari mekanisme attention.
|
| 79 |
+
Rumus: Attention(Q, K, V) = softmax( (Q * K^T) / sqrt(d_k) ) * V
|
| 80 |
+
"""
|
| 81 |
+
# 1. Hitung skor attention (dot product antara Q dan K)
|
| 82 |
+
attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)
|
| 83 |
+
|
| 84 |
+
# 2. Terapkan mask (jika ada)
|
| 85 |
+
# Mask digunakan di decoder untuk mencegah model "melihat" token masa depan.
|
| 86 |
+
if mask is not None:
|
| 87 |
+
attn_scores = attn_scores.masked_fill(mask == 0, -1e9)
|
| 88 |
+
|
| 89 |
+
# 3. Terapkan softmax untuk mendapatkan bobot attention
|
| 90 |
+
attn_probs = torch.softmax(attn_scores, dim=-1)
|
| 91 |
+
|
| 92 |
+
# 4. Kalikan bobot dengan V untuk mendapatkan output
|
| 93 |
+
output = torch.matmul(attn_probs, V)
|
| 94 |
+
return output
|
| 95 |
+
|
| 96 |
+
def split_heads(self, x):
|
| 97 |
+
"""
|
| 98 |
+
Memecah tensor input menjadi beberapa head.
|
| 99 |
+
Input: (batch_size, seq_len, d_model)
|
| 100 |
+
Output: (batch_size, num_heads, seq_len, d_k)
|
| 101 |
+
"""
|
| 102 |
+
batch_size, seq_len, _ = x.size()
|
| 103 |
+
return x.view(batch_size, seq_len, self.num_heads, self.d_k).transpose(1, 2)
|
| 104 |
+
|
| 105 |
+
def combine_heads(self, x):
|
| 106 |
+
"""
|
| 107 |
+
Menggabungkan kembali hasil dari semua head.
|
| 108 |
+
Input: (batch_size, num_heads, seq_len, d_k)
|
| 109 |
+
Output: (batch_size, seq_len, d_model)
|
| 110 |
+
"""
|
| 111 |
+
batch_size, _, seq_len, _ = x.size()
|
| 112 |
+
return x.transpose(1, 2).contiguous().view(batch_size, seq_len, self.d_model)
|
| 113 |
+
|
| 114 |
+
def forward(self, Q, K, V, mask=None):
|
| 115 |
+
# 1. Proyeksikan Q, K, V menggunakan lapisan linear
|
| 116 |
+
Q = self.W_q(Q)
|
| 117 |
+
K = self.W_k(K)
|
| 118 |
+
V = self.W_v(V)
|
| 119 |
+
|
| 120 |
+
# 2. Pecah menjadi beberapa head
|
| 121 |
+
Q = self.split_heads(Q)
|
| 122 |
+
K = self.split_heads(K)
|
| 123 |
+
V = self.split_heads(V)
|
| 124 |
+
|
| 125 |
+
# 3. Lakukan scaled dot-product attention
|
| 126 |
+
attn_output = self.scaled_dot_product_attention(Q, K, V, mask)
|
| 127 |
+
|
| 128 |
+
# 4. Gabungkan kembali head-head tersebut
|
| 129 |
+
output = self.combine_heads(attn_output)
|
| 130 |
+
|
| 131 |
+
# 5. Lewatkan melalui lapisan linear output akhir
|
| 132 |
+
output = self.W_o(output)
|
| 133 |
+
return output
|
| 134 |
+
|
| 135 |
+
# --- 3. Position-wise Feed-Forward Network ---
|
| 136 |
+
# Ini adalah jaringan neural network sederhana yang diterapkan pada setiap posisi
|
| 137 |
+
# secara terpisah dan identik setelah attention.
|
| 138 |
+
class PositionwiseFeedForward(nn.Module):
|
| 139 |
+
def __init__(self, d_model, d_ff):
|
| 140 |
+
"""
|
| 141 |
+
Args:
|
| 142 |
+
d_model (int): Dimensi model.
|
| 143 |
+
d_ff (int): Dimensi lapisan tersembunyi (feed-forward).
|
| 144 |
+
"""
|
| 145 |
+
super(PositionwiseFeedForward, self).__init__()
|
| 146 |
+
self.fc1 = nn.Linear(d_model, d_ff)
|
| 147 |
+
self.fc2 = nn.Linear(d_ff, d_model)
|
| 148 |
+
self.relu = nn.ReLU()
|
| 149 |
+
|
| 150 |
+
def forward(self, x):
|
| 151 |
+
return self.fc2(self.relu(self.fc1(x)))
|
| 152 |
+
|
| 153 |
+
# --- 4. Encoder Layer ---
|
| 154 |
+
# Satu lapisan Encoder terdiri dari Multi-Head Attention dan Feed-Forward Network.
|
| 155 |
+
# Terdapat juga koneksi residual (Add) dan normalisasi lapisan (Norm).
|
| 156 |
+
class EncoderLayer(nn.Module):
|
| 157 |
+
def __init__(self, d_model, num_heads, d_ff, dropout):
|
| 158 |
+
super(EncoderLayer, self).__init__()
|
| 159 |
+
self.self_attn = MultiHeadAttention(d_model, num_heads)
|
| 160 |
+
self.feed_forward = PositionwiseFeedForward(d_model, d_ff)
|
| 161 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 162 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 163 |
+
self.dropout = nn.Dropout(dropout)
|
| 164 |
+
|
| 165 |
+
def forward(self, x, mask):
|
| 166 |
+
# Sub-layer 1: Multi-Head Attention
|
| 167 |
+
attn_output = self.self_attn(x, x, x, mask)
|
| 168 |
+
# Add & Norm
|
| 169 |
+
x = self.norm1(x + self.dropout(attn_output))
|
| 170 |
+
|
| 171 |
+
# Sub-layer 2: Feed-Forward
|
| 172 |
+
ff_output = self.feed_forward(x)
|
| 173 |
+
# Add & Norm
|
| 174 |
+
x = self.norm2(x + self.dropout(ff_output))
|
| 175 |
+
return x
|
| 176 |
+
|
| 177 |
+
# --- 5. Decoder Layer ---
|
| 178 |
+
# Mirip dengan Encoder, tetapi memiliki dua sub-layer attention:
|
| 179 |
+
# 1. Masked Multi-Head Attention: Untuk memperhatikan token sebelumnya di output.
|
| 180 |
+
# 2. Encoder-Decoder Attention: Untuk memperhatikan output dari Encoder.
|
| 181 |
+
class DecoderLayer(nn.Module):
|
| 182 |
+
def __init__(self, d_model, num_heads, d_ff, dropout):
|
| 183 |
+
super(DecoderLayer, self).__init__()
|
| 184 |
+
self.self_attn = MultiHeadAttention(d_model, num_heads)
|
| 185 |
+
self.cross_attn = MultiHeadAttention(d_model, num_heads)
|
| 186 |
+
self.feed_forward = PositionwiseFeedForward(d_model, d_ff)
|
| 187 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 188 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 189 |
+
self.norm3 = nn.LayerNorm(d_model)
|
| 190 |
+
self.dropout = nn.Dropout(dropout)
|
| 191 |
+
|
| 192 |
+
def forward(self, x, enc_output, src_mask, tgt_mask):
|
| 193 |
+
# Sub-layer 1: Masked Multi-Head Attention (Self-Attention)
|
| 194 |
+
attn_output = self.self_attn(x, x, x, tgt_mask)
|
| 195 |
+
# Add & Norm
|
| 196 |
+
x = self.norm1(x + self.dropout(attn_output))
|
| 197 |
+
|
| 198 |
+
# Sub-layer 2: Encoder-Decoder Attention (Cross-Attention)
|
| 199 |
+
# Q dari decoder, K dan V dari output encoder
|
| 200 |
+
attn_output = self.cross_attn(x, enc_output, enc_output, src_mask)
|
| 201 |
+
# Add & Norm
|
| 202 |
+
x = self.norm2(x + self.dropout(attn_output))
|
| 203 |
+
|
| 204 |
+
# Sub-layer 3: Feed-Forward
|
| 205 |
+
ff_output = self.feed_forward(x)
|
| 206 |
+
# Add & Norm
|
| 207 |
+
x = self.norm3(x + self.dropout(ff_output))
|
| 208 |
+
return x
|
| 209 |
+
|
| 210 |
+
# --- 6. Transformer Model ---
|
| 211 |
+
# Menggabungkan semua komponen menjadi arsitektur Encoder-Decoder.
|
| 212 |
+
class Transformer(nn.Module):
|
| 213 |
+
def __init__(self, src_vocab_size, tgt_vocab_size, d_model, num_heads, num_layers, d_ff, max_len, dropout):
|
| 214 |
+
super(Transformer, self).__init__()
|
| 215 |
+
|
| 216 |
+
# Lapisan Embedding dan Positional Encoding
|
| 217 |
+
self.encoder_embedding = nn.Embedding(src_vocab_size, d_model)
|
| 218 |
+
self.decoder_embedding = nn.Embedding(tgt_vocab_size, d_model)
|
| 219 |
+
self.positional_encoding = PositionalEncoding(d_model, max_len)
|
| 220 |
+
|
| 221 |
+
# Tumpukan Encoder dan Decoder Layers
|
| 222 |
+
self.encoder_layers = nn.ModuleList([EncoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)])
|
| 223 |
+
self.decoder_layers = nn.ModuleList([DecoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)])
|
| 224 |
+
|
| 225 |
+
# Lapisan Linear akhir dan Softmax untuk prediksi kata berikutnya
|
| 226 |
+
self.fc_out = nn.Linear(d_model, tgt_vocab_size)
|
| 227 |
+
self.dropout = nn.Dropout(dropout)
|
| 228 |
+
|
| 229 |
+
def generate_mask(self, src, tgt):
|
| 230 |
+
# src_mask: Untuk padding pada input encoder. Shape: (batch, 1, 1, src_len)
|
| 231 |
+
src_mask = (src != 0).unsqueeze(1).unsqueeze(2)
|
| 232 |
+
|
| 233 |
+
# tgt_mask: Untuk padding dan mencegah atensi ke token masa depan pada decoder.
|
| 234 |
+
tgt_pad_mask = (tgt != 0).unsqueeze(1).unsqueeze(3)
|
| 235 |
+
seq_len = tgt.size(1)
|
| 236 |
+
# Buat matriks segitiga bawah
|
| 237 |
+
tgt_sub_mask = torch.tril(torch.ones((seq_len, seq_len), device=src.device)).bool()
|
| 238 |
+
tgt_mask = tgt_pad_mask & tgt_sub_mask # Gabungkan keduanya
|
| 239 |
+
return src_mask, tgt_mask
|
| 240 |
+
|
| 241 |
+
def forward(self, src, tgt):
|
| 242 |
+
src_mask, tgt_mask = self.generate_mask(src, tgt)
|
| 243 |
+
|
| 244 |
+
# Proses Encoder
|
| 245 |
+
# 1. Embedding + Positional Encoding
|
| 246 |
+
src_embedded = self.dropout(self.positional_encoding(self.encoder_embedding(src)))
|
| 247 |
+
# 2. Lewatkan ke setiap layer encoder
|
| 248 |
+
enc_output = src_embedded
|
| 249 |
+
for layer in self.encoder_layers:
|
| 250 |
+
enc_output = layer(enc_output, src_mask)
|
| 251 |
+
|
| 252 |
+
# Proses Decoder
|
| 253 |
+
# 1. Embedding + Positional Encoding
|
| 254 |
+
tgt_embedded = self.dropout(self.positional_encoding(self.decoder_embedding(tgt)))
|
| 255 |
+
# 2. Lewatkan ke setiap layer decoder
|
| 256 |
+
dec_output = tgt_embedded
|
| 257 |
+
for layer in self.decoder_layers:
|
| 258 |
+
dec_output = layer(dec_output, enc_output, src_mask, tgt_mask)
|
| 259 |
+
|
| 260 |
+
# 3. Lapisan output
|
| 261 |
+
output = self.fc_out(dec_output)
|
| 262 |
+
return output
|
| 263 |
+
|
| 264 |
+
# --- Contoh Penggunaan ---
|
| 265 |
+
if __name__ == '__main__':
|
| 266 |
+
# Parameter model (dibuat kecil untuk tujuan belajar)
|
| 267 |
+
src_vocab_size = 5000 # Ukuran kosakata bahasa sumber
|
| 268 |
+
tgt_vocab_size = 5000 # Ukuran kosakata bahasa target
|
| 269 |
+
d_model = 128 # Dimensi model (harus genap)
|
| 270 |
+
num_heads = 4 # Jumlah attention heads
|
| 271 |
+
num_layers = 3 # Jumlah tumpukan Encoder/Decoder
|
| 272 |
+
d_ff = 512 # Dimensi hidden layer di Feed Forward
|
| 273 |
+
max_len = 100 # Panjang sekuens maksimum
|
| 274 |
+
dropout = 0.1
|
| 275 |
+
|
| 276 |
+
# Inisialisasi model
|
| 277 |
+
model = Transformer(src_vocab_size, tgt_vocab_size, d_model, num_heads, num_layers, d_ff, max_len, dropout)
|
| 278 |
+
print(f"Model Transformer berhasil dibuat dengan {sum(p.numel() for p in model.parameters() if p.requires_grad):,} parameter.")
|
| 279 |
+
|
| 280 |
+
# Buat data input dummy
|
| 281 |
+
# Anggap '0' adalah token untuk padding
|
| 282 |
+
src_data = torch.randint(1, src_vocab_size, (64, max_len)) # (batch_size, seq_len)
|
| 283 |
+
tgt_data = torch.randint(1, tgt_vocab_size, (64, max_len))
|
| 284 |
+
|
| 285 |
+
# Jalankan model (forward pass)
|
| 286 |
+
try:
|
| 287 |
+
output = model(src_data, tgt_data)
|
| 288 |
+
print("\nForward pass berhasil!")
|
| 289 |
+
print(f"Bentuk input sumber (src): {src_data.shape}")
|
| 290 |
+
print(f"Bentuk input target (tgt): {tgt_data.shape}")
|
| 291 |
+
print(f"Bentuk output model: {output.shape}")
|
| 292 |
+
# Bentuk output: (batch_size, seq_len, tgt_vocab_size)
|
| 293 |
+
# Ini adalah probabilitas (logits) untuk setiap kata dalam kosakata target
|
| 294 |
+
except Exception as e:
|
| 295 |
+
print(f"\nTerjadi error saat forward pass: {e}")
|