OpenLab-NLP commited on
Commit
0a95383
·
verified ·
1 Parent(s): 110f24b

Upload ulm.py

Browse files
Files changed (1) hide show
  1. ulm.py +234 -0
ulm.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, random, requests
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from tensorflow.keras import layers, Model
5
+ import sentencepiece as spm
6
+
7
+ # =========================
8
+ # 설정
9
+ # =========================
10
+ TOKENIZER_PATH = "bpe.model"
11
+ DATA_PATH = "shuffled_corpus.txt"
12
+ MAX_LEN = 128
13
+ EMBED_DIM = 384
14
+ LATENT_DIM = 384
15
+ BATCH_SIZE = 512
16
+ EPOCHS = 1
17
+ SHUFFLE_BUFFER = 200000
18
+ LEARNING_RATE = 1e-4
19
+ TEMPERATURE = 0.05
20
+ DROPOUT_AUG = 0.1
21
+ EMBED_DROPOUT = 0.1
22
+
23
+ def download_file(url, save_path):
24
+ if os.path.exists(save_path):
25
+ print(f"exists: {save_path}")
26
+ return
27
+ print(f"Downloading {save_path} ...")
28
+ r = requests.get(url, stream=True)
29
+ r.raise_for_status()
30
+ with open(save_path, "wb") as f:
31
+ for chunk in r.iter_content(8192*2):
32
+ if not chunk:
33
+ break
34
+ f.write(chunk)
35
+ print(f"✅ {save_path} saved")
36
+
37
+ download_file(
38
+ "https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/bpe.model?download=true",
39
+ TOKENIZER_PATH
40
+ )
41
+ download_file(
42
+ "https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/shuffled_corpus%20(1).txt?download=true",
43
+ DATA_PATH
44
+ )
45
+
46
+ sp = spm.SentencePieceProcessor()
47
+ sp.load(TOKENIZER_PATH)
48
+ pad_id = sp.piece_to_id("<pad>") if sp.piece_to_id("<pad>") != -1 else 0
49
+ vocab_size = sp.get_piece_size()
50
+
51
+ # Python-side encoder for small utility
52
+ def encode_sentence_py(s: str):
53
+ ids = sp.encode(s, out_type=int)[:MAX_LEN]
54
+ if len(ids) < MAX_LEN:
55
+ ids = ids + [pad_id] * (MAX_LEN - len(ids))
56
+ else:
57
+ ids = ids[:MAX_LEN]
58
+ return np.array(ids, dtype=np.int32)
59
+
60
+ def tf_encode(line):
61
+ # line: tf.Tensor (tf.string)
62
+ def _encode_py(s_tensor):
63
+ # s_tensor는 tf.Tensor -> numpy bytes
64
+ s = s_tensor.numpy().decode("utf-8")
65
+ return encode_sentence_py(s)
66
+
67
+ # tf.py_function은 tf.Tensor -> tf.int32
68
+ ids = tf.py_function(func=_encode_py, inp=[line], Tout=tf.int32)
69
+ ids.set_shape([MAX_LEN])
70
+ return ids
71
+
72
+ def token_dropout(tokens, drop_prob=DROPOUT_AUG):
73
+ # tokens: (MAX_LEN,) int32
74
+ rnd = tf.random.uniform(tf.shape(tokens), 0, 1)
75
+ keep_mask = rnd > drop_prob
76
+ return tf.where(keep_mask, tokens, tf.cast(pad_id, tf.int32))
77
+
78
+ def make_views(tokens):
79
+ v1 = token_dropout(tokens)
80
+ v2 = token_dropout(tokens)
81
+ return v1, v2
82
+
83
+ ds = tf.data.TextLineDataset(DATA_PATH)
84
+ ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=tf.data.AUTOTUNE)
85
+ ds = ds.filter(lambda x: tf.not_equal(x, ""))
86
+
87
+ # encode
88
+ ds = ds.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE)
89
+
90
+ # shuffle, repeat, create views, batch
91
+ ds = ds.shuffle(SHUFFLE_BUFFER)
92
+ ds = ds.repeat()
93
+ ds = ds.map(lambda t: make_views(t), num_parallel_calls=tf.data.AUTOTUNE)
94
+ ds = ds.batch(BATCH_SIZE, drop_remainder=True) # (BATCH, MAX_LEN) for v1 and v2
95
+ # model.fit expects (inputs, labels)
96
+ ds = ds.map(lambda v1, v2: ((v1, v2), tf.zeros([BATCH_SIZE], dtype=tf.float32)), num_parallel_calls=tf.data.AUTOTUNE)
97
+ ds = ds.prefetch(tf.data.AUTOTUNE)
98
+
99
+ class DynamicConv(layers.Layer):
100
+ def __init__(self, k=7):
101
+ super().__init__()
102
+ assert k % 2 == 1
103
+ self.k = k
104
+ self.generator = layers.Dense(k)
105
+ def call(self, x):
106
+ B = tf.shape(x)[0]
107
+ L = tf.shape(x)[1]
108
+ D = tf.shape(x)[2]
109
+ kernels = self.generator(x) # (B,L,k)
110
+ kernels = tf.nn.softmax(kernels, axis=-1)
111
+ pad = (self.k - 1) // 2
112
+ x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]])
113
+ x_pad_4d = tf.expand_dims(x_pad, axis=1)
114
+ patches = tf.image.extract_patches(
115
+ images=x_pad_4d,
116
+ sizes=[1,1,self.k,1],
117
+ strides=[1,1,1,1],
118
+ rates=[1,1,1,1],
119
+ padding='VALID'
120
+ ) # (B,1,L,k*D)
121
+ patches = tf.reshape(patches, [B, L, self.k, D])
122
+ kernels_exp = tf.expand_dims(kernels, axis=-1)
123
+ out = tf.reduce_sum(patches * kernels_exp, axis=2)
124
+ return out
125
+
126
+ class EncoderBlock(layers.Layer):
127
+ def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, num_conv_layers=2, dropout_rate=EMBED_DROPOUT):
128
+ super().__init__()
129
+ self.fc1 = layers.Dense(ff_dim)
130
+ self.fc2 = layers.Dense(embed_dim)
131
+ self.blocks = [DynamicConv(k=7) for _ in range(num_conv_layers)]
132
+ self.ln = layers.LayerNormalization(epsilon=1e-5)
133
+ self.ln1 = layers.LayerNormalization(epsilon=1e-5)
134
+ self.ln2 = layers.LayerNormalization(epsilon=1e-5)
135
+ self.dropout = layers.Dropout(dropout_rate)
136
+ def call(self, x, training=None):
137
+ x_norm = self.ln(x)
138
+ out = x_norm
139
+ for block in self.blocks:
140
+ out = block(out)
141
+ out = self.dropout(out, training=training)
142
+ x = x_norm + self.ln1(out)
143
+ v = out
144
+ h = self.fc1(v)
145
+ g, v_split = tf.split(h, 2, axis=-1)
146
+ h = tf.nn.silu(g) * v_split
147
+ h = self.fc2(h)
148
+ h = self.dropout(h, training=training)
149
+ x = x + self.ln2(h)
150
+ return x
151
+
152
+ class L2NormLayer(layers.Layer):
153
+ def __init__(self, axis=1, epsilon=1e-10, **kwargs):
154
+ super().__init__(**kwargs)
155
+ self.axis = axis
156
+ self.epsilon = epsilon
157
+ def call(self, inputs):
158
+ return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon)
159
+
160
+ class SentenceEncoder(Model):
161
+ def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=EMBED_DROPOUT):
162
+ super().__init__()
163
+ self.pad_id = pad_id
164
+ self.embed = layers.Embedding(vocab_size, embed_dim)
165
+ self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
166
+ self.dropout = layers.Dropout(dropout_rate)
167
+ self.blocks = [EncoderBlock() for _ in range(2)]
168
+ self.attn_pool = layers.Dense(1)
169
+ self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
170
+ self.latent = layers.Dense(latent_dim, activation=None)
171
+ self.l2norm = L2NormLayer(axis=1)
172
+ def call(self, x, training=None):
173
+ positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
174
+ x_embed = self.embed(x) + self.pos_embed(positions)
175
+ x_embed = self.dropout(x_embed, training=training)
176
+ mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
177
+ h = x_embed
178
+ for block in self.blocks:
179
+ h = block(h, training=training)
180
+ h = self.ln_f(h)
181
+ scores = self.attn_pool(h)
182
+ scores = tf.where(tf.equal(mask[..., tf.newaxis], 0), -1e9, scores)
183
+ scores = tf.nn.softmax(scores, axis=1)
184
+ pooled = tf.reduce_sum(h * scores, axis=1)
185
+ latent = self.latent(pooled)
186
+ return self.l2norm(latent) # (B, D)
187
+
188
+ encoder = SentenceEncoder(vocab_size=vocab_size)
189
+
190
+ # =========================
191
+ # Wrapper model for model.fit
192
+ # takes (v1, v2) and returns concat([z1, z2]) shape (2B, D)
193
+ # =========================
194
+ input1 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view1")
195
+ input2 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view2")
196
+ z1 = encoder(input1)
197
+ z2 = encoder(input2)
198
+ out = layers.Concatenate(axis=0)([z1, z2]) # (2B, D)
199
+ model = Model(inputs=[input1, input2], outputs=out)
200
+
201
+ # =========================
202
+ # NT-Xent loss as Keras loss (ignores y_true)
203
+ # =========================
204
+ def nt_xent_loss(y_true, y_pred):
205
+ # y_pred: (2N, D) normalized
206
+ z = y_pred
207
+ z = tf.cast(z, tf.float32)
208
+ sim = tf.matmul(z, z, transpose_b=True) # (2N, 2N)
209
+ sim = sim / TEMPERATURE
210
+ # large negative on diagonal to avoid trivial argmax
211
+ diag = tf.eye(tf.shape(sim)[0])
212
+ sim = sim - diag * 1e9
213
+ N2 = tf.shape(sim)[0]
214
+ N = N2 // 2
215
+ # positive index for i: if i < N => i+N, else i-N
216
+ labels_pos = tf.concat([tf.range(N, N2), tf.range(0, N)], axis=0)
217
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_pos, logits=sim)
218
+ return tf.reduce_mean(loss)
219
+
220
+ optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
221
+ model.compile(optimizer=optimizer, loss=nt_xent_loss)
222
+
223
+ model.summary()
224
+
225
+
226
+ steps_per_epoch = 36757266 // 512
227
+
228
+ #steps_per_epoch = 1000000 // BATCH_SIZE
229
+
230
+ model.fit(ds, epochs=EPOCHS, steps_per_epoch=steps_per_epoch)
231
+
232
+ # 저장
233
+ encoder.save_weights("encoder_fit.weights.h5")
234
+ print("Training finished and weights saved.")