KartikGPT commited on
Commit
5db0083
·
verified ·
1 Parent(s): 12dc99f

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. README.md +11 -0
  2. config.json +1 -0
  3. handler.py +30 -0
  4. modeling.py +101 -0
  5. pytorch_model.bin +3 -0
  6. vocab.json +1 -0
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: pytorch
3
+ tags:
4
+ - text-generation
5
+ - transformer
6
+ - char-level
7
+ ---
8
+
9
+ # Char-Level Transformer Language Model
10
+
11
+ A small GPT-style character-level Transformer trained on Shakespeare text.
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"vocab_size": 65, "block_size": 32, "n_embd": 64, "n_head": 4, "n_layer": 4, "dropout": 0.0}
handler.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import torch
3
+ from modeling import BigramLanguageModel
4
+
5
+ class EndpointHandler:
6
+ def __init__(self, path=""):
7
+ with open(f"{path}/config.json") as f:
8
+ self.config = json.load(f)
9
+
10
+ with open(f"{path}/vocab.json") as f:
11
+ self.stoi = json.load(f)
12
+
13
+ self.itos = {int(v): k for k, v in self.stoi.items()}
14
+
15
+ self.model = BigramLanguageModel(self.config)
16
+ self.model.load_state_dict(
17
+ torch.load(f"{path}/pytorch_model.bin", map_location="cpu")
18
+ )
19
+ self.model.eval()
20
+
21
+ def __call__(self, data):
22
+ text = data["inputs"]
23
+ max_new_tokens = data.get("max_new_tokens", 200)
24
+
25
+ idx = torch.tensor([[self.stoi[c] for c in text]], dtype=torch.long)
26
+ out = self.model.generate(idx, max_new_tokens)[0]
27
+
28
+ return {
29
+ "generated_text": "".join(self.itos[i.item()] for i in out)
30
+ }
modeling.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import json
5
+
6
+ class BigramLanguageModel(nn.Module):
7
+ def __init__(self, config):
8
+ super().__init__()
9
+ self.vocab_size = config["vocab_size"]
10
+ self.block_size = config["block_size"]
11
+ n_embd = config["n_embd"]
12
+ n_head = config["n_head"]
13
+ n_layer = config["n_layer"]
14
+
15
+ self.token_embedding_table = nn.Embedding(self.vocab_size, n_embd)
16
+ self.position_embedding_table = nn.Embedding(self.block_size, n_embd)
17
+
18
+ self.blocks = nn.Sequential(*[
19
+ Block(n_embd, n_head) for _ in range(n_layer)
20
+ ])
21
+ self.ln_f = nn.LayerNorm(n_embd)
22
+ self.lm_head = nn.Linear(n_embd, self.vocab_size)
23
+
24
+ def forward(self, idx):
25
+ B, T = idx.shape
26
+ tok_emb = self.token_embedding_table(idx)
27
+ pos_emb = self.position_embedding_table(torch.arange(T, device=idx.device))
28
+ x = tok_emb + pos_emb
29
+ x = self.blocks(x)
30
+ x = self.ln_f(x)
31
+ return self.lm_head(x)
32
+
33
+ @torch.no_grad()
34
+ def generate(self, idx, max_new_tokens):
35
+ for _ in range(max_new_tokens):
36
+ idx_cond = idx[:, -self.block_size:]
37
+ logits = self(idx_cond)
38
+ logits = logits[:, -1, :]
39
+ probs = F.softmax(logits, dim=-1)
40
+ idx_next = torch.multinomial(probs, 1)
41
+ idx = torch.cat([idx, idx_next], dim=1)
42
+ return idx
43
+
44
+
45
+ class Head(nn.Module):
46
+ def __init__(self, n_embd, head_size, block_size):
47
+ super().__init__()
48
+ self.key = nn.Linear(n_embd, head_size, bias=False)
49
+ self.query = nn.Linear(n_embd, head_size, bias=False)
50
+ self.value = nn.Linear(n_embd, head_size, bias=False)
51
+ self.register_buffer("tril", torch.tril(torch.ones(block_size, block_size)))
52
+
53
+ def forward(self, x):
54
+ B, T, C = x.shape
55
+ k = self.key(x)
56
+ q = self.query(x)
57
+ wei = q @ k.transpose(-2, -1) * C ** -0.5
58
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float("-inf"))
59
+ wei = F.softmax(wei, dim=-1)
60
+ v = self.value(x)
61
+ return wei @ v
62
+
63
+
64
+ class MultiHeadAttention(nn.Module):
65
+ def __init__(self, n_embd, n_head, block_size):
66
+ super().__init__()
67
+ head_size = n_embd // n_head
68
+ self.heads = nn.ModuleList([
69
+ Head(n_embd, head_size, block_size) for _ in range(n_head)
70
+ ])
71
+ self.proj = nn.Linear(n_embd, n_embd)
72
+
73
+ def forward(self, x):
74
+ return self.proj(torch.cat([h(x) for h in self.heads], dim=-1))
75
+
76
+
77
+ class FeedForward(nn.Module):
78
+ def __init__(self, n_embd):
79
+ super().__init__()
80
+ self.net = nn.Sequential(
81
+ nn.Linear(n_embd, 4 * n_embd),
82
+ nn.ReLU(),
83
+ nn.Linear(4 * n_embd, n_embd),
84
+ )
85
+
86
+ def forward(self, x):
87
+ return self.net(x)
88
+
89
+
90
+ class Block(nn.Module):
91
+ def __init__(self, n_embd, n_head):
92
+ super().__init__()
93
+ self.sa = MultiHeadAttention(n_embd, n_head, block_size=32)
94
+ self.ffwd = FeedForward(n_embd)
95
+ self.ln1 = nn.LayerNorm(n_embd)
96
+ self.ln2 = nn.LayerNorm(n_embd)
97
+
98
+ def forward(self, x):
99
+ x = x + self.sa(self.ln1(x))
100
+ x = x + self.ffwd(self.ln2(x))
101
+ return x
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26a2821c37b079691dc78d9ffe3e4d3a83a832c00f1443dcb33dcd605bded3f5
3
+ size 944859
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"\n": 0, " ": 1, "!": 2, "$": 3, "&": 4, "'": 5, ",": 6, "-": 7, ".": 8, "3": 9, ":": 10, ";": 11, "?": 12, "A": 13, "B": 14, "C": 15, "D": 16, "E": 17, "F": 18, "G": 19, "H": 20, "I": 21, "J": 22, "K": 23, "L": 24, "M": 25, "N": 26, "O": 27, "P": 28, "Q": 29, "R": 30, "S": 31, "T": 32, "U": 33, "V": 34, "W": 35, "X": 36, "Y": 37, "Z": 38, "a": 39, "b": 40, "c": 41, "d": 42, "e": 43, "f": 44, "g": 45, "h": 46, "i": 47, "j": 48, "k": 49, "l": 50, "m": 51, "n": 52, "o": 53, "p": 54, "q": 55, "r": 56, "s": 57, "t": 58, "u": 59, "v": 60, "w": 61, "x": 62, "y": 63, "z": 64}