OpenLab-NLP commited on
Commit
00b0f63
ยท
verified ยท
1 Parent(s): dc42d11

Upload 2 files

Browse files
Files changed (2) hide show
  1. encoder_fit.weights.h5 +2 -2
  2. openlem-tpu.py +320 -0
encoder_fit.weights.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e96f51aab52613405dba99b5ee59cdcde90000a238d61c45c8dde75fed8e7f04
3
- size 56592008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c9369d711855613888ac432acd74d774f9ad0db5c0fd71ea70dea847ad37c42
3
+ size 84631384
openlem-tpu.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TPU-compatible training script (์ˆ˜์ •๋ณธ)
2
+ import os, requests, math
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ from tensorflow.keras import layers, Model
6
+ import sentencepiece as spm
7
+
8
+ # =========================
9
+ # ์„ค์ • (ํ•„์š”ํ•˜๋ฉด ๋ณ€๊ฒฝ)
10
+ # =========================
11
+ TOKENIZER_PATH = "bpe.model"
12
+ DATA_PATH = "shuffled_corpus.txt"
13
+ MAX_LEN = 384
14
+ EMBED_DIM = 512
15
+ LATENT_DIM = 512
16
+ BATCH_SIZE = 768 # global batch size (Keras/TPU๊ฐ€ replica-wise๋กœ ๋‚˜๋ˆ ์„œ ์ฒ˜๋ฆฌ)
17
+ EPOCHS = 1
18
+ SHUFFLE_BUFFER = 200000
19
+ LEARNING_RATE = 1e-4
20
+ TEMPERATURE = 0.05
21
+ DROPOUT_AUG = 0.1
22
+ EMBED_DROPOUT = 0.1
23
+ SEED = 42
24
+
25
+ print('1')
26
+ tf.get_logger().setLevel("ERROR")
27
+ tf.random.set_seed(SEED)
28
+ np.random.seed(SEED)
29
+
30
+ # =========================
31
+ # TPU ์ดˆ๊ธฐํ™” / ๋ถ„์‚ฐ์ „๋žต ์„ ํƒ
32
+ # =========================
33
+ on_tpu = False
34
+ try:
35
+ resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local")
36
+ tf.tpu.experimental.initialize_tpu_system(resolver)
37
+ strategy = tf.distribute.TPUStrategy(resolver)
38
+ print("โœ… TPU ์ดˆ๊ธฐํ™” ์™„๋ฃŒ:", resolver.cluster_spec().as_dict())
39
+ on_tpu = True
40
+ except Exception as e:
41
+ print("โš ๏ธ TPU ๋ฏธ์‚ฌ์šฉ, GPU/CPU๋กœ ์ง„ํ–‰:", e)
42
+ strategy = tf.distribute.get_strategy()
43
+
44
+ # Mixed precision
45
+ from tensorflow.keras import mixed_precision
46
+ policy = mixed_precision.Policy("mixed_bfloat16" if on_tpu else "float32")
47
+ mixed_precision.set_global_policy(policy)
48
+ print("โœ… Mixed precision:", policy)
49
+
50
+ # =========================
51
+ # ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ (ํ•„์š”์‹œ)
52
+ # =========================
53
+ def download_file(url, save_path):
54
+ if os.path.exists(save_path):
55
+ print(f"exists: {save_path}")
56
+ return
57
+ print(f"Downloading {save_path} ...")
58
+ r = requests.get(url, stream=True)
59
+ r.raise_for_status()
60
+ with open(save_path, "wb") as f:
61
+ for chunk in r.iter_content(8192*2):
62
+ if not chunk:
63
+ break
64
+ f.write(chunk)
65
+ print(f"โœ… {save_path} saved")
66
+
67
+ # (ํ•„์š”ํ•˜๋ฉด ์ฃผ์„ ํ•ด์ œ/์‚ฌ์šฉ)
68
+ download_file(
69
+ "https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/bpe.model?download=true",
70
+ TOKENIZER_PATH
71
+ )
72
+ download_file(
73
+ "https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/shuffled_corpus%20(1).txt?download=true",
74
+ DATA_PATH
75
+ )
76
+
77
+ # =========================
78
+ # Tokenizer ๋กœ๋“œ
79
+ # =========================
80
+ sp = spm.SentencePieceProcessor()
81
+ sp.load(TOKENIZER_PATH)
82
+ pad_id = sp.piece_to_id("<pad>")
83
+ if pad_id == -1:
84
+ pad_id = 0
85
+ vocab_size = sp.get_piece_size()
86
+ print("vocab_size:", vocab_size, "pad_id:", pad_id)
87
+
88
+ # =========================
89
+ # ์ธ์ฝ”๋”ฉ/๋ฐ์ดํ„ฐ ํŒŒ์ดํ”„๋ผ์ธ
90
+ # =========================
91
+ def encode_sentence_py(s: str):
92
+ ids = sp.encode(s, out_type=int)[:MAX_LEN]
93
+ if len(ids) < MAX_LEN:
94
+ ids = ids + [pad_id] * (MAX_LEN - len(ids))
95
+ else:
96
+ ids = ids[:MAX_LEN]
97
+ return np.array(ids, dtype=np.int32)
98
+
99
+ def tf_encode(line):
100
+ def _encode_py(s_tensor):
101
+ s = s_tensor.numpy().decode("utf-8")
102
+ return encode_sentence_py(s)
103
+ ids = tf.py_function(func=_encode_py, inp=[line], Tout=tf.int32)
104
+ ids.set_shape([MAX_LEN])
105
+ return ids
106
+
107
+ def token_dropout(tokens, drop_prob=DROPOUT_AUG):
108
+ rnd = tf.random.uniform(tf.shape(tokens), 0, 1)
109
+ keep_mask = rnd > drop_prob
110
+ return tf.where(keep_mask, tokens, tf.cast(pad_id, tf.int32))
111
+
112
+ def make_views(tokens):
113
+ v1 = token_dropout(tokens)
114
+ v2 = token_dropout(tokens)
115
+ return v1, v2
116
+
117
+ # ํ…์ŠคํŠธํŒŒ์ผ ๊ธฐ๋ฐ˜ ๋ฐ์ดํ„ฐ์…‹
118
+ ds = tf.data.TextLineDataset(DATA_PATH)
119
+ ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=tf.data.AUTOTUNE)
120
+ ds = ds.filter(lambda x: tf.not_equal(x, ""))
121
+
122
+ ds = ds.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE)
123
+ ds = ds.shuffle(SHUFFLE_BUFFER, seed=SEED)
124
+ ds = ds.repeat()
125
+ ds = ds.map(lambda t: make_views(t), num_parallel_calls=tf.data.AUTOTUNE)
126
+ ds = ds.batch(BATCH_SIZE, drop_remainder=True)
127
+ # model.fit expects (inputs, labels), labels๋Š” ์‚ฌ์šฉํ•˜์ง€ ์•Š์Œ(๋”๋ฏธ)
128
+ ds = ds.map(lambda v1, v2: ((v1, v2), tf.zeros([BATCH_SIZE], dtype=tf.float32)), num_parallel_calls=tf.data.AUTOTUNE)
129
+ ds = ds.prefetch(tf.data.AUTOTUNE)
130
+
131
+ # =========================
132
+ # ๋ชจ๋ธ ๊ตฌ์„ฑ (์›๋ž˜ ๋งŒ๋“  SentenceEncoder ์‚ฌ์šฉ)
133
+ # =========================
134
+ class DynamicConv(layers.Layer):
135
+ def __init__(self, d_model, k=7):
136
+ super().__init__()
137
+ assert k % 2 == 1
138
+ self.k = k
139
+ self.dense = layers.Dense(d_model, activation='silu')
140
+ self.proj = layers.Dense(d_model)
141
+ self.generator = layers.Dense(k, dtype='float32')
142
+ def call(self, x):
143
+ x_in = x
144
+ x = tf.cast(x, tf.float32)
145
+
146
+ B = tf.shape(x)[0]
147
+ L = tf.shape(x)[1]
148
+ D = tf.shape(x)[2]
149
+
150
+ kernels = self.generator(self.dense(x))
151
+ kernels = tf.nn.softmax(kernels, axis=-1)
152
+
153
+ pad = (self.k - 1) // 2
154
+ x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]])
155
+
156
+ x_pad_4d = tf.expand_dims(x_pad, axis=1)
157
+ patches = tf.image.extract_patches(
158
+ images=x_pad_4d,
159
+ sizes=[1,1,self.k,1],
160
+ strides=[1,1,1,1],
161
+ rates=[1,1,1,1],
162
+ padding='VALID'
163
+ )
164
+ patches = tf.reshape(patches, [B, L, self.k, D])
165
+
166
+ kernels_exp = tf.expand_dims(kernels, axis=-1)
167
+ out = tf.reduce_sum(patches * kernels_exp, axis=2)
168
+ out = self.proj(out)
169
+
170
+ # ๐Ÿ”ฅ ์›๋ž˜ dtype์œผ๋กœ ๋Œ๋ ค์คŒ
171
+ return tf.cast(out, x_in.dtype)
172
+
173
+ class EncoderBlock(tf.keras.layers.Layer):
174
+ def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, seq_len=MAX_LEN, num_conv_layers=2):
175
+ super().__init__()
176
+ self.embed_dim = embed_dim
177
+ self.seq_len = seq_len
178
+
179
+ # MLP / FFN
180
+ self.fc1 = layers.Dense(ff_dim)
181
+ self.fc2 = layers.Dense(embed_dim)
182
+ self.blocks = [DynamicConv(d_model=embed_dim, k=7) for _ in range(num_conv_layers)]
183
+ # LayerNorm
184
+ self.ln = layers.LayerNormalization(epsilon=1e-5) # ์ž…๋ ฅ ์ •๊ทœํ™”
185
+ self.ln1 = layers.LayerNormalization(epsilon=1e-5) # Conv residual
186
+ self.ln2 = layers.LayerNormalization(epsilon=1e-5) # FFN residual
187
+
188
+ def call(self, x, mask=None):
189
+ # ์ž…๋ ฅ ์ •๊ทœํ™”
190
+ x_norm = self.ln(x)
191
+
192
+ # DynamicConv ์—ฌ๋Ÿฌ ์ธต ํ†ต๊ณผ
193
+ out = x_norm
194
+ for block in self.blocks: out = block(out)
195
+ # Conv residual ์—ฐ๊ฒฐ
196
+ x = x_norm + self.ln1(out)
197
+
198
+ # FFN / GLU
199
+ v = out
200
+ h = self.fc1(v)
201
+ g, v_split = tf.split(h, 2, axis=-1)
202
+ h = tf.nn.silu(g) * v_split
203
+ h = self.fc2(h)
204
+
205
+ # FFN residual ์—ฐ๊ฒฐ
206
+ x = x + self.ln2(h)
207
+
208
+ return x
209
+
210
+
211
+ class L2NormLayer(layers.Layer):
212
+ def __init__(self, axis=1, epsilon=1e-10, **kwargs):
213
+ super().__init__(**kwargs)
214
+ self.axis = axis
215
+ self.epsilon = epsilon
216
+ def call(self, inputs):
217
+ return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon)
218
+
219
+ class SentenceEncoder(Model):
220
+ def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=EMBED_DROPOUT):
221
+ super().__init__()
222
+ self.pad_id = pad_id
223
+ self.embed = layers.Embedding(vocab_size, embed_dim)
224
+ self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
225
+ self.dropout = layers.Dropout(dropout_rate)
226
+ self.blocks = [EncoderBlock() for _ in range(2)]
227
+ self.attn_pool = layers.Dense(1)
228
+ self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
229
+ self.latent = layers.Dense(latent_dim, activation=None)
230
+ self.l2norm = L2NormLayer(axis=1)
231
+
232
+ def call(self, x, training=None):
233
+ positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
234
+ x_embed = self.embed(x) + self.pos_embed(positions)
235
+ x_embed = self.dropout(x_embed, training=training)
236
+
237
+ mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
238
+
239
+ h = x_embed
240
+ for block in self.blocks:
241
+ h = block(h, training=training)
242
+
243
+ h = self.ln_f(h)
244
+
245
+ # ๐Ÿ”ฅ scores๋ฅผ float32 ๊ฐ•์ œ
246
+ scores = self.attn_pool(h)
247
+ scores = tf.cast(scores, tf.float32)
248
+
249
+ scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores)
250
+ scores = tf.nn.softmax(scores, axis=1)
251
+
252
+ pooled = tf.reduce_sum(h * scores, axis=1)
253
+ latent = self.latent(pooled)
254
+ latent = self.l2norm(latent)
255
+
256
+ # ๐Ÿ”ฅ ์ถœ๋ ฅ๋งŒ float32
257
+ return tf.cast(latent, tf.float32)
258
+
259
+ # contrastive wrapper: ๋‘ ๋ทฐ๋ฅผ ์ธ์ฝ”๋”ฉํ•˜๊ณ  (2B, D) concat ๋ฐ˜ํ™˜
260
+ def build_contrastive_model(vocab_size):
261
+ encoder = SentenceEncoder(vocab_size=vocab_size)
262
+ input1 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view1")
263
+ input2 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view2")
264
+ z1 = encoder(input1)
265
+ z2 = encoder(input2)
266
+ out = layers.Concatenate(axis=0)([z1, z2]) # (2B, D)
267
+ return Model(inputs=[input1, input2], outputs=out), encoder
268
+
269
+ def nt_xent_loss(y_true, y_pred):
270
+ # y_pred: (2N, D) normalized
271
+ z = y_pred
272
+ z = tf.cast(z, tf.float32)
273
+ sim = tf.matmul(z, z, transpose_b=True) # (2N, 2N)
274
+ sim = sim / TEMPERATURE
275
+ # large negative on diagonal to avoid trivial argmax
276
+ diag = tf.eye(tf.shape(sim)[0])
277
+ sim = sim - diag * 1e9
278
+ N2 = tf.shape(sim)[0]
279
+ N = N2 // 2
280
+ # positive index for i: if i < N => i+N, else i-N
281
+ labels_pos = tf.concat([tf.range(N, N2), tf.range(0, N)], axis=0)
282
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_pos, logits=sim)
283
+ return tf.reduce_mean(loss)
284
+
285
+ # =========================
286
+ # ๋ชจ๋ธ ์ƒ์„ฑ / ์ปดํŒŒ์ผ (strategy.scope ์•ˆ์—์„œ)
287
+ # =========================
288
+ with strategy.scope():
289
+ model, encoder = build_contrastive_model(vocab_size)
290
+ optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
291
+ model.compile(optimizer=optimizer, loss=nt_xent_loss)
292
+ model.summary()
293
+
294
+ # =========================
295
+ # steps_per_epoch ๊ณ„์‚ฐ (ํŒŒ์ผ ๋ผ์ธ ์ˆ˜ ๊ธฐ๋ฐ˜)
296
+ # =========================
297
+ try:
298
+ with open(DATA_PATH, "r", encoding="utf-8") as f:
299
+ num_lines = sum(1 for _ in f)
300
+ except Exception as e:
301
+ print("Warning: ๋ฐ์ดํ„ฐ ํŒŒ์ผ ๋ผ์ธ ์ˆ˜ ๊ณ„์‚ฐ ์‹คํŒจ:", e)
302
+ num_lines = None
303
+
304
+ if num_lines:
305
+ steps_per_epoch = max(1, num_lines // BATCH_SIZE)
306
+ else:
307
+ # fallback (์ž‘๊ฒŒ ์žก์Œ)
308
+ steps_per_epoch = 1000
309
+
310
+ print("steps_per_epoch:", steps_per_epoch)
311
+
312
+
313
+ # =========================
314
+ # ํ•™์Šต ์‹คํ–‰
315
+ # =========================
316
+ history = model.fit(ds, epochs=EPOCHS, steps_per_epoch=steps_per_epoch, verbose=1)
317
+
318
+ # encoder ๊ฐ€์ค‘์น˜ ์ €์žฅ
319
+ encoder.save_weights("encoder_fit.weights.h5")
320
+ print("Training finished and weights saved.")