File size: 8,334 Bytes
cbda9b7
 
 
 
 
 
 
 
 
 
 
 
a2fbb2f
 
 
 
cbda9b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2fbb2f
 
 
 
 
 
 
 
 
 
 
 
 
cbda9b7
 
 
a2fbb2f
 
 
cbda9b7
a2fbb2f
 
 
cbda9b7
a2fbb2f
cbda9b7
 
 
 
 
 
 
 
 
 
 
 
 
a2fbb2f
cbda9b7
 
a2fbb2f
 
cbda9b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2fbb2f
cbda9b7
 
 
 
 
 
a2fbb2f
cbda9b7
 
 
 
 
a2fbb2f
cbda9b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
import torch
import torch.nn as nn
import torch.nn.functional as F

from transformers import PreTrainedModel
from transformers.modeling_outputs import CausalLMOutput

from .modules import Attention
from .utils import nearest_power_of_two
from .layers import AttentionLayer
from .configuration_minitransformer import MiniTransformerConfig

from .attn_masks import causal_mask
from .attn_mods import generate_tanh_softcap
from .rotary_emb import precompute_freqs_cis

try:
    from liger_kernel.transformers.rms_norm import LigerRMSNorm as TritonNorm
    triton_norm = True
except ImportError as e:
    print(
        f"Unable to import Triton-based RMSNorm: {e}. Falling back to PyTorch implementation."
    )
    from torch.nn import RMSNorm
    triton_norm = False
# Load the tokenizer

from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "Hazan-Lab/Transformer_500M"
tokenizer = AutoTokenizer.from_pretrained(
    model_name,
    trust_remote_code=True
)

class MiniTransformer(PreTrainedModel):
    config_class = MiniTransformerConfig

    def __init__(self, config) -> None:
        super(MiniTransformer, self).__init__(config)
        self.num_layers = config.num_layers
        assert config.dim % config.num_heads == 0, f"dim ({self.dim}) must be divisible num_heads ({self.num_heads})"
        self.head_dim = config.dim // config.num_heads
        logit_softcap = generate_tanh_softcap(soft_cap=config.softcap)

        # From pytorch/pytorch#123411, we set persistent=True for torch.compile and PP compatibility
        self.register_buffer("freqs_cis", precompute_freqs_cis(
            head_dim=self.head_dim,
            max_seq_len=config.seq_len,
            theta=config.theta,
        ), persistent=True)

        self.tok_emb = nn.Embedding(config.vocab_size, config.dim)
        self.dropout = nn.Dropout(config.dropout)

        self.layers = nn.ModuleList()
        for _ in range(self.num_layers):
            layer = AttentionLayer(config, mask_mod=causal_mask, score_mod=logit_softcap)
            self.layers.append(layer)

        self.norm = nn.RMSNorm(config.dim)
        self.lm_head = nn.Linear(config.dim, config.vocab_size, bias=config.bias)
        # self.tok_emb.weight = self.lm_head.weight

        self.std = (config.dim) ** -0.5
        self.apply(self._init_weights)
        print("Model Parameter Count: %.2fM\n" % (self._get_num_params() / 1e6,))

    def forward(
        self,
        input_ids: torch.Tensor,
        labels: torch.Tensor = None,
        **kwargs
    ) -> CausalLMOutput:
        # Compute embeddings
        tok_emb = self.tok_emb(input_ids)

        for layer in self.layers:
            tok_emb = layer(tok_emb, self.freqs_cis)

        # Normalize and project to vocabulary
        tok_emb = self.norm(tok_emb)
        logits = self.lm_head(tok_emb)

        loss = None
        if labels is not None:
            # Shift so that tokens predict the next token
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            loss_fct = nn.CrossEntropyLoss()
            loss = loss_fct(
                shift_logits.view(-1, shift_logits.size(-1)),
                shift_labels.view(-1)
            )

        return CausalLMOutput(
            loss=loss,
            logits=logits,
        )

    def _get_num_params(self):
        n_params = sum(p.numel() for p in self.parameters())
        if hasattr(self, "pos_emb") and self.pos_emb is not None:
            n_params -= self.pos_emb.weight.numel()
        if self.tok_emb.weight is self.lm_head.weight:
            n_params -= self.tok_emb.weight.numel()
        return n_params

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            if hasattr(module, "SCALE_INIT"):
                self.std *= (2 * self.num_layers) ** -0.5
            torch.nn.init.normal_(module.weight, mean=0.0, std=self.std)
            if module.bias is not None:
                torch.nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Embedding):
            torch.nn.init.normal_(module.weight, mean=0.0, std=self.std)

    @staticmethod
    def top_k_top_p_filtering(
        logits: torch.Tensor,
        top_k: int = 50,
        top_p: float = 0.95,
        filter_value: float = float("-inf"),
    ):
        """
        Filters a distribution of logits using top-k and/or nucleus (top-p) filtering.
        """
        # top_k
        if top_k > 0:
            top_k = min(top_k, logits.size(-1))
            # Remove all logits that are not in the top k
            indices_to_remove = logits < torch.topk(logits, top_k, dim=-1).values[:, -1, None]
            logits[indices_to_remove] = filter_value

        # top_p (nucleus)
        if 0 < top_p < 1.0:
            sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
            cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)

            # Remove tokens with cumulative probability above the threshold
            sorted_indices_to_remove = cumulative_probs > top_p
            # Shift the indices to the right to keep also the first token above the threshold
            sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
            sorted_indices_to_remove[:, 0] = False

            indices_to_remove = sorted_indices_to_remove.scatter(
                dim=1, index=sorted_indices, src=sorted_indices_to_remove
            )
            logits[indices_to_remove] = filter_value

        return logits

    def generate(
        self,
        input_ids: torch.LongTensor,
        max_new_tokens: int = 50,
        temperature: float = 0.5,
        top_k: int = 50,
        top_p: float = 0.95,
        eos_token_id: int = None,
        pad_token_id: int = 0,
        **kwargs
    ):
        """
        Naive token-by-token generation loop that uses top-k/top-p filtering and optional temperature.

        Args:
            input_ids (torch.LongTensor): shape (batch_size, sequence_length).
            max_new_tokens (int): max number of tokens to generate (beyond input_ids length).
            temperature (float): sampling temperature (>=0).
            top_k (int): Top-K sampling cutoff.
            top_p (float): Nucleus sampling cutoff.
            eos_token_id (int): If set, stop generation when this token is produced.
            pad_token_id (int): If set, can be used to pad sequences. (Not fully used here.)
            kwargs: Unused arguments (like num_beams) for compatibility.

        Returns:
            torch.LongTensor: shape (batch_size, sequence_length + generated_tokens).
        """
        device = input_ids.device
        print("1=====================")
        print(tokenizer.decode(input_ids[0], skip_special_tokens=True))
        print("1=====================")

        # We'll accumulate new tokens into generated_ids
        generated_ids = input_ids.clone()

        for _ in range(max_new_tokens):
            # Forward pass to get logits for the last token
            outputs = self.forward(generated_ids)
            logits = outputs.logits[:, -1, :]  # shape: (batch_size, vocab_size)

            # Scale logits by temperature
            if temperature != 1.0:
                logits = logits / temperature

            # Filter logits using top-k and/or top-p
            logits = self.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)

            # Convert to probabilities
            probabilities = F.softmax(logits, dim=-1)

            # Sample from the distribution
            next_token = torch.multinomial(probabilities, num_samples=1)  # (batch_size, 1)

            # Append next token
            generated_ids = torch.cat([generated_ids, next_token], dim=1)

            # If eos_token_id is set and any sample produced it, we optionally could break early
            if eos_token_id is not None:
                # Check if all sequences in the batch ended
                # or if you want to do a more fine-grained approach
                if (next_token == eos_token_id).all():
                    break
        print("2=====================")
        print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
        print("2=====================")
        return generated_ids