|
|
|
|
|
import os, requests, math
|
|
|
import numpy as np
|
|
|
import tensorflow as tf
|
|
|
from tensorflow.keras import layers, Model
|
|
|
import sentencepiece as spm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TOKENIZER_PATH = "bpe.model"
|
|
|
DATA_PATH = "shuffled_corpus.txt"
|
|
|
MAX_LEN = 384
|
|
|
EMBED_DIM = 512
|
|
|
LATENT_DIM = 512
|
|
|
BATCH_SIZE = 768
|
|
|
EPOCHS = 1
|
|
|
SHUFFLE_BUFFER = 200000
|
|
|
LEARNING_RATE = 1e-4
|
|
|
TEMPERATURE = 0.05
|
|
|
DROPOUT_AUG = 0.1
|
|
|
EMBED_DROPOUT = 0.1
|
|
|
SEED = 42
|
|
|
|
|
|
print('1')
|
|
|
tf.get_logger().setLevel("ERROR")
|
|
|
tf.random.set_seed(SEED)
|
|
|
np.random.seed(SEED)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
on_tpu = False
|
|
|
try:
|
|
|
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local")
|
|
|
tf.tpu.experimental.initialize_tpu_system(resolver)
|
|
|
strategy = tf.distribute.TPUStrategy(resolver)
|
|
|
print("โ
TPU ์ด๊ธฐํ ์๋ฃ:", resolver.cluster_spec().as_dict())
|
|
|
on_tpu = True
|
|
|
except Exception as e:
|
|
|
print("โ ๏ธ TPU ๋ฏธ์ฌ์ฉ, GPU/CPU๋ก ์งํ:", e)
|
|
|
strategy = tf.distribute.get_strategy()
|
|
|
|
|
|
|
|
|
from tensorflow.keras import mixed_precision
|
|
|
policy = mixed_precision.Policy("mixed_bfloat16" if on_tpu else "float32")
|
|
|
mixed_precision.set_global_policy(policy)
|
|
|
print("โ
Mixed precision:", policy)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def download_file(url, save_path):
|
|
|
if os.path.exists(save_path):
|
|
|
print(f"exists: {save_path}")
|
|
|
return
|
|
|
print(f"Downloading {save_path} ...")
|
|
|
r = requests.get(url, stream=True)
|
|
|
r.raise_for_status()
|
|
|
with open(save_path, "wb") as f:
|
|
|
for chunk in r.iter_content(8192*2):
|
|
|
if not chunk:
|
|
|
break
|
|
|
f.write(chunk)
|
|
|
print(f"โ
{save_path} saved")
|
|
|
|
|
|
|
|
|
download_file(
|
|
|
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/bpe.model?download=true",
|
|
|
TOKENIZER_PATH
|
|
|
)
|
|
|
download_file(
|
|
|
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/shuffled_corpus%20(1).txt?download=true",
|
|
|
DATA_PATH
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sp = spm.SentencePieceProcessor()
|
|
|
sp.load(TOKENIZER_PATH)
|
|
|
pad_id = sp.piece_to_id("<pad>")
|
|
|
if pad_id == -1:
|
|
|
pad_id = 0
|
|
|
vocab_size = sp.get_piece_size()
|
|
|
print("vocab_size:", vocab_size, "pad_id:", pad_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def encode_sentence_py(s: str):
|
|
|
ids = sp.encode(s, out_type=int)[:MAX_LEN]
|
|
|
if len(ids) < MAX_LEN:
|
|
|
ids = ids + [pad_id] * (MAX_LEN - len(ids))
|
|
|
else:
|
|
|
ids = ids[:MAX_LEN]
|
|
|
return np.array(ids, dtype=np.int32)
|
|
|
|
|
|
def tf_encode(line):
|
|
|
def _encode_py(s_tensor):
|
|
|
s = s_tensor.numpy().decode("utf-8")
|
|
|
return encode_sentence_py(s)
|
|
|
ids = tf.py_function(func=_encode_py, inp=[line], Tout=tf.int32)
|
|
|
ids.set_shape([MAX_LEN])
|
|
|
return ids
|
|
|
|
|
|
def token_dropout(tokens, drop_prob=DROPOUT_AUG):
|
|
|
rnd = tf.random.uniform(tf.shape(tokens), 0, 1)
|
|
|
keep_mask = rnd > drop_prob
|
|
|
return tf.where(keep_mask, tokens, tf.cast(pad_id, tf.int32))
|
|
|
|
|
|
def make_views(tokens):
|
|
|
v1 = token_dropout(tokens)
|
|
|
v2 = token_dropout(tokens)
|
|
|
return v1, v2
|
|
|
|
|
|
|
|
|
ds = tf.data.TextLineDataset(DATA_PATH)
|
|
|
ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=tf.data.AUTOTUNE)
|
|
|
ds = ds.filter(lambda x: tf.not_equal(x, ""))
|
|
|
|
|
|
ds = ds.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE)
|
|
|
ds = ds.shuffle(SHUFFLE_BUFFER, seed=SEED)
|
|
|
ds = ds.repeat()
|
|
|
ds = ds.map(lambda t: make_views(t), num_parallel_calls=tf.data.AUTOTUNE)
|
|
|
ds = ds.batch(BATCH_SIZE, drop_remainder=True)
|
|
|
|
|
|
ds = ds.map(lambda v1, v2: ((v1, v2), tf.zeros([BATCH_SIZE], dtype=tf.float32)), num_parallel_calls=tf.data.AUTOTUNE)
|
|
|
ds = ds.prefetch(tf.data.AUTOTUNE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DynamicConv(layers.Layer):
|
|
|
def __init__(self, d_model, k=7):
|
|
|
super().__init__()
|
|
|
assert k % 2 == 1
|
|
|
self.k = k
|
|
|
self.dense = layers.Dense(d_model, activation='silu')
|
|
|
self.proj = layers.Dense(d_model)
|
|
|
self.generator = layers.Dense(k, dtype='float32')
|
|
|
def call(self, x):
|
|
|
x_in = x
|
|
|
x = tf.cast(x, tf.float32)
|
|
|
|
|
|
B = tf.shape(x)[0]
|
|
|
L = tf.shape(x)[1]
|
|
|
D = tf.shape(x)[2]
|
|
|
|
|
|
kernels = self.generator(self.dense(x))
|
|
|
kernels = tf.nn.softmax(kernels, axis=-1)
|
|
|
|
|
|
pad = (self.k - 1) // 2
|
|
|
x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]])
|
|
|
|
|
|
x_pad_4d = tf.expand_dims(x_pad, axis=1)
|
|
|
patches = tf.image.extract_patches(
|
|
|
images=x_pad_4d,
|
|
|
sizes=[1,1,self.k,1],
|
|
|
strides=[1,1,1,1],
|
|
|
rates=[1,1,1,1],
|
|
|
padding='VALID'
|
|
|
)
|
|
|
patches = tf.reshape(patches, [B, L, self.k, D])
|
|
|
|
|
|
kernels_exp = tf.expand_dims(kernels, axis=-1)
|
|
|
out = tf.reduce_sum(patches * kernels_exp, axis=2)
|
|
|
out = self.proj(out)
|
|
|
|
|
|
|
|
|
return tf.cast(out, x_in.dtype)
|
|
|
|
|
|
class EncoderBlock(tf.keras.layers.Layer):
|
|
|
def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, seq_len=MAX_LEN, num_conv_layers=2):
|
|
|
super().__init__()
|
|
|
self.embed_dim = embed_dim
|
|
|
self.seq_len = seq_len
|
|
|
|
|
|
|
|
|
self.fc1 = layers.Dense(ff_dim)
|
|
|
self.fc2 = layers.Dense(embed_dim)
|
|
|
self.blocks = [DynamicConv(d_model=embed_dim, k=7) for _ in range(num_conv_layers)]
|
|
|
|
|
|
self.ln = layers.LayerNormalization(epsilon=1e-5)
|
|
|
self.ln1 = layers.LayerNormalization(epsilon=1e-5)
|
|
|
self.ln2 = layers.LayerNormalization(epsilon=1e-5)
|
|
|
|
|
|
def call(self, x, mask=None):
|
|
|
|
|
|
x_norm = self.ln(x)
|
|
|
|
|
|
|
|
|
out = x_norm
|
|
|
for block in self.blocks: out = block(out)
|
|
|
|
|
|
x = x_norm + self.ln1(out)
|
|
|
|
|
|
|
|
|
v = out
|
|
|
h = self.fc1(v)
|
|
|
g, v_split = tf.split(h, 2, axis=-1)
|
|
|
h = tf.nn.silu(g) * v_split
|
|
|
h = self.fc2(h)
|
|
|
|
|
|
|
|
|
x = x + self.ln2(h)
|
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
class L2NormLayer(layers.Layer):
|
|
|
def __init__(self, axis=1, epsilon=1e-10, **kwargs):
|
|
|
super().__init__(**kwargs)
|
|
|
self.axis = axis
|
|
|
self.epsilon = epsilon
|
|
|
def call(self, inputs):
|
|
|
return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon)
|
|
|
|
|
|
class SentenceEncoder(Model):
|
|
|
def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=EMBED_DROPOUT):
|
|
|
super().__init__()
|
|
|
self.pad_id = pad_id
|
|
|
self.embed = layers.Embedding(vocab_size, embed_dim)
|
|
|
self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
|
|
|
self.dropout = layers.Dropout(dropout_rate)
|
|
|
self.blocks = [EncoderBlock() for _ in range(2)]
|
|
|
self.attn_pool = layers.Dense(1)
|
|
|
self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
|
|
|
self.latent = layers.Dense(latent_dim, activation=None)
|
|
|
self.l2norm = L2NormLayer(axis=1)
|
|
|
|
|
|
def call(self, x, training=None):
|
|
|
positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
|
|
|
x_embed = self.embed(x) + self.pos_embed(positions)
|
|
|
x_embed = self.dropout(x_embed, training=training)
|
|
|
|
|
|
mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
|
|
|
|
|
|
h = x_embed
|
|
|
for block in self.blocks:
|
|
|
h = block(h, training=training)
|
|
|
|
|
|
h = self.ln_f(h)
|
|
|
|
|
|
|
|
|
scores = self.attn_pool(h)
|
|
|
scores = tf.cast(scores, tf.float32)
|
|
|
|
|
|
scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores)
|
|
|
scores = tf.nn.softmax(scores, axis=1)
|
|
|
|
|
|
pooled = tf.reduce_sum(h * scores, axis=1)
|
|
|
latent = self.latent(pooled)
|
|
|
latent = self.l2norm(latent)
|
|
|
|
|
|
|
|
|
return tf.cast(latent, tf.float32)
|
|
|
|
|
|
|
|
|
def build_contrastive_model(vocab_size):
|
|
|
encoder = SentenceEncoder(vocab_size=vocab_size)
|
|
|
input1 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view1")
|
|
|
input2 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view2")
|
|
|
z1 = encoder(input1)
|
|
|
z2 = encoder(input2)
|
|
|
out = layers.Concatenate(axis=0)([z1, z2])
|
|
|
return Model(inputs=[input1, input2], outputs=out), encoder
|
|
|
|
|
|
def nt_xent_loss(y_true, y_pred):
|
|
|
|
|
|
z = y_pred
|
|
|
z = tf.cast(z, tf.float32)
|
|
|
sim = tf.matmul(z, z, transpose_b=True)
|
|
|
sim = sim / TEMPERATURE
|
|
|
|
|
|
diag = tf.eye(tf.shape(sim)[0])
|
|
|
sim = sim - diag * 1e9
|
|
|
N2 = tf.shape(sim)[0]
|
|
|
N = N2 // 2
|
|
|
|
|
|
labels_pos = tf.concat([tf.range(N, N2), tf.range(0, N)], axis=0)
|
|
|
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_pos, logits=sim)
|
|
|
return tf.reduce_mean(loss)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with strategy.scope():
|
|
|
model, encoder = build_contrastive_model(vocab_size)
|
|
|
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
|
|
|
model.compile(optimizer=optimizer, loss=nt_xent_loss)
|
|
|
model.summary()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
with open(DATA_PATH, "r", encoding="utf-8") as f:
|
|
|
num_lines = sum(1 for _ in f)
|
|
|
except Exception as e:
|
|
|
print("Warning: ๋ฐ์ดํฐ ํ์ผ ๋ผ์ธ ์ ๊ณ์ฐ ์คํจ:", e)
|
|
|
num_lines = None
|
|
|
|
|
|
if num_lines:
|
|
|
steps_per_epoch = max(1, num_lines // BATCH_SIZE)
|
|
|
else:
|
|
|
|
|
|
steps_per_epoch = 1000
|
|
|
|
|
|
print("steps_per_epoch:", steps_per_epoch)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
history = model.fit(ds, epochs=EPOCHS, steps_per_epoch=steps_per_epoch, verbose=1)
|
|
|
|
|
|
|
|
|
encoder.save_weights("encoder_fit.weights.h5")
|
|
|
print("Training finished and weights saved.")
|
|
|
|