File size: 9,134 Bytes
37ba2d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cdfb1b
37ba2d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c184aa
37ba2d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1289019
37ba2d7
 
 
 
 
 
 
 
7cdfb1b
37ba2d7
 
 
 
 
1289019
37ba2d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cdfb1b
37ba2d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cdfb1b
 
 
 
37ba2d7
 
 
 
 
 
 
 
7cdfb1b
37ba2d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
import json  
import numpy as np  
import pandas as pd
import tensorflow as tf  
from tensorflow.keras import layers 
import sentencepiece as spm  
import requests

# ⬇️ ν† ν¬λ‚˜μ΄μ € 뢈러였기
sp = spm.SentencePieceProcessor()
sp.load("ko_unigram.model")

# ⬇️ 특수 토큰 ID μΆ”μΆœ
pad_id = sp.piece_to_id("<pad>") if sp.piece_to_id("<pad>") != -1 else 0  
start_id = sp.piece_to_id("<start>")  
sep_id = sp.piece_to_id("<sep>")  
end_id = sp.piece_to_id("<end>")  
unk_id = sp.piece_to_id("<unk>")  

vocab_size = sp.get_piece_size()
print(f"βœ… Vocabulary size: {vocab_size}")

# ⬇️ ν…μŠ€νŠΈ <-> ID λ³€ν™˜ ν•¨μˆ˜
def text_to_ids(text):
    return sp.encode(text, out_type=int)

def ids_to_text(ids):
    return sp.decode(ids)

max_len = 230
batch_size = 128

class Lo(layers.Layer):
    def __init__(self, d_model):
        super().__init__()
        # λ‚΄λΆ€ 계산은 float32둜 μœ μ§€
        self.proj = layers.Dense(d_model, use_bias=True, dtype='float32')
        self.p = layers.Dense(96, use_bias=True, dtype='float32')
        self._out_dtype = 'float32'

    def call(self, x):
        # x may be bfloat16; cast to float32 for stable intermediate computation
        x_f32 = tf.cast(x, tf.float32)
        x = self.proj(x_f32)
        x = tf.nn.gelu(x)
        x = self.p(x)
        # cast back to model dtype for consistency
        return tf.cast(x, self._out_dtype)

class LoSoU(layers.Layer):
    """
    μ•ˆμ •ν™”λœ LoSoU λ ˆμ΄μ–΄ (동적 alpha μ‚¬μš©)
    - alpha 값을 μž…λ ₯에 따라 λ™μ μœΌλ‘œ 계산: alpha = sigmoid(Linear(x))
    - λˆ„μ ν•© λŒ€μ‹  μ§€μˆ˜μ΄λ™ν‰κ· (EMA) μ‚¬μš© (alpha: smoothing factor)
    - λ‚΄λΆ€ 계산은 float32둜 μˆ˜ν–‰ (TPU bfloat16 μ•ˆμ •μ„± ν–₯상)
    - EMA κ²°κ³Ό 클리핑 및 μž‘μ€ epsilon 적용
    - μ•ˆμ „ν•œ split 처리 (짝수 차원 κ°€μ •; μ•„λ‹ˆλΌλ©΄ λ§ˆμ§€λ§‰ 차원 pad ν•„μš”)
    """
    def __init__(self, d_model, clip_value=5.0, eps=1e-6):
        super().__init__()
        # λŒ€λΆ€λΆ„ 연산을 float32둜 μˆ˜ν–‰
        self.d_model = d_model
        self.clip_value = float(clip_value)
        self.eps = float(eps)

        # projection / gating layers in float32
        self.Q = layers.Dense(96, dtype='float32')
        self.K = layers.Dense(96, dtype='float32')
        self.V = layers.Dense(96, activation='gelu', dtype='float32')
        self.proj = layers.Dense(d_model, use_bias=True, dtype='float32')
        self.norm = layers.LayerNormalization(epsilon=1e-5, dtype='float32')

        # 동적 alpha 계산을 μœ„ν•œ λ ˆμ΄μ–΄
        # alphaλŠ” [0, 1] λ²”μœ„μ—¬μ•Ό ν•˜λ―€λ‘œ sigmoid μ‚¬μš©
        # μž…λ ₯ x의 d_model 차원을 μ‚¬μš©ν•˜μ—¬ 각 μƒ˜ν”Œμ— λŒ€ν•΄ alpha 계산
        # 예: (B, L, d_model) -> (B, L, 1) -> (B, L, 1) with sigmoid
        # λ˜λŠ” (B, L, d_model) -> (B, L, d_model) -> global reduce -> (B, L, 1)
        # κ°„λ‹¨νžˆ 각 μœ„μΉ˜μ— λŒ€ν•΄ λ™μΌν•œ alpha μ‚¬μš© (μž…λ ₯의 평균 기반)
        # λ˜λŠ” μœ„μΉ˜λ³„λ‘œ λ‹€λ₯΄κ²Œ μ‚¬μš© (각 μœ„μΉ˜μ— λŒ€ν•΄ 계산)
        # μ—¬κΈ°μ„œλŠ” μœ„μΉ˜λ³„λ‘œ λ‹€λ₯΄κ²Œ 계산 (B, L, 1)
        self.alpha_linear = layers.Dense(1, activation='sigmoid', dtype='float32')

    def _ema_over_time(self, score, alpha_dynamic):
        # score: (B, L, D) float32 in [0,1] roughly
        # alpha_dynamic: (B, L, 1) float32 in [0,1]

        # transpose to (L, B, D) to scan over time steps
        seq = tf.transpose(score, perm=[1, 0, 2])  # (L, B, D)
        alpha_seq = tf.transpose(alpha_dynamic, perm=[1, 0, 2])  # (L, B, 1)

        def step(prev_ema, inputs):
            x_t, alpha_t = inputs
            # prev_ema: (B, D), x_t: (B, D), alpha_t: (B, 1)
            new = alpha_t * x_t + (1.0 - alpha_t) * prev_ema
            return new

        # μ΄ˆκΈ°κ°’μ„ 첫 step κ°’μœΌλ‘œ μ„€μ •
        init = seq[0]  # (B, D)
        first_alpha = alpha_seq[0]  # (B, 1)

        # scan의 elemsλŠ” (L-1, B, D) 및 (L-1, B, 1) 이어야 함
        remaining_seq = seq[1:]  # (L-1, B, D)
        remaining_alpha = alpha_seq[1:]  # (L-1, B, 1)

        # elemsλŠ” 두 ν…μ„œμ˜ νŠœν”Œλ‘œ ꡬ성: (x_t, alpha_t)
        elems = (remaining_seq, remaining_alpha)

        ema_seq = tf.scan(fn=step, elems=elems, initializer=init)
        # μ΄ˆκΈ°κ°’ 포함
        ema_seq = tf.concat([tf.expand_dims(init, 0), ema_seq], axis=0)  # (L, B, D)

        # transpose back to (B, L, D)
        ema = tf.transpose(ema_seq, perm=[1, 0, 2])
        return ema

    def call(self, x):
        # x: (B, L, d_model) maybe bfloat16 or float32
        # cast to float32 for all internal computations
        x_f32 = tf.cast(x, tf.float32)
        residual = x_f32

        # Q, K, V
        q = self.Q(x_f32)   # (B, L, 96)
        k = self.K(x_f32)   # (B, L, 96)
        V = tf.cast(self.V(x), tf.float32)  # ensure V's output is float32

        # gating signals in (0,1)
        g_q = tf.nn.sigmoid(q)
        g_k = tf.nn.tanh(k)

        # elementwise product -> bounded roughly [0,1]
        score = g_q * g_k

        # 동적 alpha 계산: (B, L, d_model) -> (B, L, 1)
        alpha_dynamic = self.alpha_linear(x_f32) * 0.8 + 0.1 # (B, L, 1)
        # ν•„μš”μ‹œ alpha_dynamic에 λŒ€ν•œ ν›„μ²˜λ¦¬ (예: min/max λ“±) κ°€λŠ₯
        # ex: alpha_dynamic = tf.clip_by_value(alpha_dynamic, 0.01, 0.99)

        # EMA across time (stable alternative to cumsum)
        score_ema = self._ema_over_time(score, alpha_dynamic)

        # optionally normalize by (mean + eps) across last dim to reduce scale variations
        mean_last = tf.reduce_mean(score_ema, axis=-1, keepdims=True)  # (B, L, 1)
        denom = tf.maximum(mean_last, self.eps)
        score_norm = score_ema / denom

        # clip to avoid extremes
        score_clipped = tf.clip_by_value(score_norm, -self.clip_value, self.clip_value)

        # combine with V
        x_comb = score_clipped * V  # (B, L, d_model)

        out = self.proj(x_comb)  # (B, L, d_model)
        out = self.norm(out)

        # cast back to original dtype for downstream layers
        return tf.cast(out, x.dtype)

class Block(layers.Layer):
    def __init__(self, d_model, hyper_n):
        super().__init__()
        self.losou = [LoSoU(d_model) for _ in range(hyper_n)]

    def call(self, x):
        for losou in self.losou:
            x = losou(x)
        return x

class ReLaM(tf.keras.Model):
    def __init__(self, vocab_size, max_seq_len, d_model, n_layers, dropout_rate=0.1):
        super().__init__()
        self.token_embedding = layers.Embedding(vocab_size, 128)
        self.pos_embedding = layers.Embedding(max_seq_len, 128)
        self.blocks = [Block(d_model, hyper_n=1) for _ in range(n_layers)]
        self.proj = layers.Dense(128)
        self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype="float32")

    def call(self, x, training=False):
        batch_size, seq_len = tf.shape(x)[0], tf.shape(x)[1]
        positions = tf.range(seq_len)[tf.newaxis, :]
        x = self.token_embedding(x) + self.pos_embedding(positions)
        for block in self.blocks:
            x = block(x)
        x = self.proj(x)
        x = self.ln_f(x)
        embedding_matrix = tf.cast(self.token_embedding.embeddings, x.dtype)
        logits = tf.matmul(x, embedding_matrix, transpose_b=True)
        return tf.cast(logits, tf.float32)

# λͺ¨λΈ 생성
model = ReLaM(
    vocab_size=vocab_size,
    max_seq_len=max_len,
    d_model=256,
    n_layers=1
)

dummy_input = tf.zeros((1, max_len), dtype=tf.int32)
_ = model(dummy_input)
model.load_weights('/content/Cobra.weights.h5')
print("λͺ¨λΈ κ°€μ€‘μΉ˜ λ‘œλ“œ μ™„λ£Œ!")

def generate_text_topp(model, prompt, max_len=100, max_gen=98, p=0.9, temperature=0.8, min_len=30):
    model_input = text_to_ids(f"<start> {prompt} <sep>")
    model_input = model_input[:max_len]
    generated = list(model_input)
    for step in range(max_gen):
        if len(generated) > max_len:
            input_seq = generated[-max_len:]
        else:
            input_seq = generated
        input_padded = np.pad(input_seq, (0, max_len - len(input_seq)), constant_values=pad_id)
        input_tensor = tf.convert_to_tensor([input_padded])
        logits = model(input_tensor, training=False)
        next_token_logits = logits[0, len(input_seq) - 1].numpy()
        next_token_logits[end_id] -= 5.0
        next_token_logits[pad_id] -= 10.0
        probs = tf.nn.softmax(next_token_logits / temperature).numpy()
        sorted_indices = np.argsort(probs)[::-1]
        sorted_probs = probs[sorted_indices]
        cumulative_probs = np.cumsum(sorted_probs)
        cutoff = np.searchsorted(cumulative_probs, p)
        top_indices = sorted_indices[:cutoff + 1]
        top_probs = sorted_probs[:cutoff + 1]
        top_probs /= np.sum(top_probs)
        next_token_id = np.random.choice(top_indices, p=top_probs)
        if next_token_id == end_id and len(generated) >= min_len:
            break
        generated.append(int(next_token_id))
    return ids_to_text(generated)

print("\n\n===== 생성 κ²°κ³Ό =====")  
print(generate_text_topp(model, "μ œκ°€ 이따가 λ²„μŠ€λ₯Ό 타야 ν•΄μ„œ μ€€λΉ„ μ’€ ν•΄μ•Όκ² μ–΄μš”. μž¬λ―ΈμžˆλŠ” λŒ€ν™”μ˜€μŠ΅λ‹ˆλ‹€!", p=0.8))