File size: 2,448 Bytes
20cc425
 
 
d53ff49
20cc425
 
 
 
 
 
 
 
 
 
 
 
 
 
327d3a1
 
 
 
 
f9d9fb7
010105c
20cc425
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc1f6f3
20cc425
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64

import torch
import torch.nn as nn
from transformers import PreTrainedModel, PretrainedConfig, GenerationMixin
from transformers.modeling_outputs import CausalLMOutput

class SlopiestConfig(PretrainedConfig):
    model_type = "slopiest"

    def __init__(self, vocab_size=50257, embeddings_size=384,
                 head_size=6, layer_size=6, block_size=256, **kwargs):
        super().__init__(**kwargs)
        self.vocab_size = vocab_size
        self.embeddings_size = embeddings_size
        self.head_size = head_size
        self.layer_size = layer_size
        self.block_size = block_size

        self.hidden_size = embeddings_size
        self.num_hidden_layers = layer_size
        self.num_attention_heads = head_size
        self.max_position_embeddings = block_size

class SlopiestForCausalLM(PreTrainedModel, GenerationMixin):
    config_class = SlopiestConfig

    def __init__(self, config):
        super().__init__(config)
        self.token_embeddings = nn.Embedding(config.vocab_size, config.embeddings_size)
        self.positional_embeddings = nn.Embedding(config.block_size, config.embeddings_size)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=config.embeddings_size,
            nhead=config.head_size,
            dim_feedforward=config.embeddings_size * 4,
            batch_first=True,
            norm_first=True,
            activation="gelu",
            bias=False
        )
        self.transformer = nn.TransformerEncoder(
            encoder_layer,
            num_layers=config.layer_size,
            norm=nn.LayerNorm(config.embeddings_size)
        )
        self.lm_head = nn.Linear(config.embeddings_size, config.vocab_size, bias=False)
        self.post_init()

    def forward(self, input_ids, labels=None, **kwargs):
        batch, seq_len = input_ids.shape
        tok_emb = self.token_embeddings(input_ids)
        pos = torch.arange(seq_len, device=input_ids.device)
        pos_emb = self.positional_embeddings(pos)
        x = tok_emb + pos_emb
        mask = nn.Transformer.generate_square_subsequent_mask(seq_len, device=input_ids.device)
        x = self.transformer(x, mask=mask, is_causal=True)
        logits = self.lm_head(x)

        loss = None
        if labels is not None:
            loss = nn.functional.cross_entropy(
                logits.view(-1, logits.size(-1)), labels.view(-1)
            )
        return CausalLMOutput(loss=loss, logits=logits)