openlem2 / ulm.py
OpenLab-NLP's picture
Upload ulm.py
0a95383 verified
raw
history blame
8.42 kB
import os, random, requests
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, Model
import sentencepiece as spm
# =========================
# 설정
# =========================
TOKENIZER_PATH = "bpe.model"
DATA_PATH = "shuffled_corpus.txt"
MAX_LEN = 128
EMBED_DIM = 384
LATENT_DIM = 384
BATCH_SIZE = 512
EPOCHS = 1
SHUFFLE_BUFFER = 200000
LEARNING_RATE = 1e-4
TEMPERATURE = 0.05
DROPOUT_AUG = 0.1
EMBED_DROPOUT = 0.1
def download_file(url, save_path):
if os.path.exists(save_path):
print(f"exists: {save_path}")
return
print(f"Downloading {save_path} ...")
r = requests.get(url, stream=True)
r.raise_for_status()
with open(save_path, "wb") as f:
for chunk in r.iter_content(8192*2):
if not chunk:
break
f.write(chunk)
print(f"✅ {save_path} saved")
download_file(
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/bpe.model?download=true",
TOKENIZER_PATH
)
download_file(
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/shuffled_corpus%20(1).txt?download=true",
DATA_PATH
)
sp = spm.SentencePieceProcessor()
sp.load(TOKENIZER_PATH)
pad_id = sp.piece_to_id("<pad>") if sp.piece_to_id("<pad>") != -1 else 0
vocab_size = sp.get_piece_size()
# Python-side encoder for small utility
def encode_sentence_py(s: str):
ids = sp.encode(s, out_type=int)[:MAX_LEN]
if len(ids) < MAX_LEN:
ids = ids + [pad_id] * (MAX_LEN - len(ids))
else:
ids = ids[:MAX_LEN]
return np.array(ids, dtype=np.int32)
def tf_encode(line):
# line: tf.Tensor (tf.string)
def _encode_py(s_tensor):
# s_tensor는 tf.Tensor -> numpy bytes
s = s_tensor.numpy().decode("utf-8")
return encode_sentence_py(s)
# tf.py_function은 tf.Tensor -> tf.int32
ids = tf.py_function(func=_encode_py, inp=[line], Tout=tf.int32)
ids.set_shape([MAX_LEN])
return ids
def token_dropout(tokens, drop_prob=DROPOUT_AUG):
# tokens: (MAX_LEN,) int32
rnd = tf.random.uniform(tf.shape(tokens), 0, 1)
keep_mask = rnd > drop_prob
return tf.where(keep_mask, tokens, tf.cast(pad_id, tf.int32))
def make_views(tokens):
v1 = token_dropout(tokens)
v2 = token_dropout(tokens)
return v1, v2
ds = tf.data.TextLineDataset(DATA_PATH)
ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.filter(lambda x: tf.not_equal(x, ""))
# encode
ds = ds.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE)
# shuffle, repeat, create views, batch
ds = ds.shuffle(SHUFFLE_BUFFER)
ds = ds.repeat()
ds = ds.map(lambda t: make_views(t), num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.batch(BATCH_SIZE, drop_remainder=True) # (BATCH, MAX_LEN) for v1 and v2
# model.fit expects (inputs, labels)
ds = ds.map(lambda v1, v2: ((v1, v2), tf.zeros([BATCH_SIZE], dtype=tf.float32)), num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.prefetch(tf.data.AUTOTUNE)
class DynamicConv(layers.Layer):
def __init__(self, k=7):
super().__init__()
assert k % 2 == 1
self.k = k
self.generator = layers.Dense(k)
def call(self, x):
B = tf.shape(x)[0]
L = tf.shape(x)[1]
D = tf.shape(x)[2]
kernels = self.generator(x) # (B,L,k)
kernels = tf.nn.softmax(kernels, axis=-1)
pad = (self.k - 1) // 2
x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]])
x_pad_4d = tf.expand_dims(x_pad, axis=1)
patches = tf.image.extract_patches(
images=x_pad_4d,
sizes=[1,1,self.k,1],
strides=[1,1,1,1],
rates=[1,1,1,1],
padding='VALID'
) # (B,1,L,k*D)
patches = tf.reshape(patches, [B, L, self.k, D])
kernels_exp = tf.expand_dims(kernels, axis=-1)
out = tf.reduce_sum(patches * kernels_exp, axis=2)
return out
class EncoderBlock(layers.Layer):
def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, num_conv_layers=2, dropout_rate=EMBED_DROPOUT):
super().__init__()
self.fc1 = layers.Dense(ff_dim)
self.fc2 = layers.Dense(embed_dim)
self.blocks = [DynamicConv(k=7) for _ in range(num_conv_layers)]
self.ln = layers.LayerNormalization(epsilon=1e-5)
self.ln1 = layers.LayerNormalization(epsilon=1e-5)
self.ln2 = layers.LayerNormalization(epsilon=1e-5)
self.dropout = layers.Dropout(dropout_rate)
def call(self, x, training=None):
x_norm = self.ln(x)
out = x_norm
for block in self.blocks:
out = block(out)
out = self.dropout(out, training=training)
x = x_norm + self.ln1(out)
v = out
h = self.fc1(v)
g, v_split = tf.split(h, 2, axis=-1)
h = tf.nn.silu(g) * v_split
h = self.fc2(h)
h = self.dropout(h, training=training)
x = x + self.ln2(h)
return x
class L2NormLayer(layers.Layer):
def __init__(self, axis=1, epsilon=1e-10, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.epsilon = epsilon
def call(self, inputs):
return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon)
class SentenceEncoder(Model):
def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=EMBED_DROPOUT):
super().__init__()
self.pad_id = pad_id
self.embed = layers.Embedding(vocab_size, embed_dim)
self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
self.dropout = layers.Dropout(dropout_rate)
self.blocks = [EncoderBlock() for _ in range(2)]
self.attn_pool = layers.Dense(1)
self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
self.latent = layers.Dense(latent_dim, activation=None)
self.l2norm = L2NormLayer(axis=1)
def call(self, x, training=None):
positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
x_embed = self.embed(x) + self.pos_embed(positions)
x_embed = self.dropout(x_embed, training=training)
mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
h = x_embed
for block in self.blocks:
h = block(h, training=training)
h = self.ln_f(h)
scores = self.attn_pool(h)
scores = tf.where(tf.equal(mask[..., tf.newaxis], 0), -1e9, scores)
scores = tf.nn.softmax(scores, axis=1)
pooled = tf.reduce_sum(h * scores, axis=1)
latent = self.latent(pooled)
return self.l2norm(latent) # (B, D)
encoder = SentenceEncoder(vocab_size=vocab_size)
# =========================
# Wrapper model for model.fit
# takes (v1, v2) and returns concat([z1, z2]) shape (2B, D)
# =========================
input1 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view1")
input2 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view2")
z1 = encoder(input1)
z2 = encoder(input2)
out = layers.Concatenate(axis=0)([z1, z2]) # (2B, D)
model = Model(inputs=[input1, input2], outputs=out)
# =========================
# NT-Xent loss as Keras loss (ignores y_true)
# =========================
def nt_xent_loss(y_true, y_pred):
# y_pred: (2N, D) normalized
z = y_pred
z = tf.cast(z, tf.float32)
sim = tf.matmul(z, z, transpose_b=True) # (2N, 2N)
sim = sim / TEMPERATURE
# large negative on diagonal to avoid trivial argmax
diag = tf.eye(tf.shape(sim)[0])
sim = sim - diag * 1e9
N2 = tf.shape(sim)[0]
N = N2 // 2
# positive index for i: if i < N => i+N, else i-N
labels_pos = tf.concat([tf.range(N, N2), tf.range(0, N)], axis=0)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_pos, logits=sim)
return tf.reduce_mean(loss)
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer, loss=nt_xent_loss)
model.summary()
steps_per_epoch = 36757266 // 512
#steps_per_epoch = 1000000 // BATCH_SIZE
model.fit(ds, epochs=EPOCHS, steps_per_epoch=steps_per_epoch)
# 저장
encoder.save_weights("encoder_fit.weights.h5")
print("Training finished and weights saved.")