chudai1019 commited on
Commit
eae2184
·
verified ·
1 Parent(s): 6574a80

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +44 -0
  2. model10M.pt +3 -0
  3. model_transformer.py +37 -0
  4. requirements.txt +3 -0
  5. tokenizer.json +135 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from model_transformer import TransformerLM
3
+ from tokenizers import Tokenizer
4
+ import gradio as gr
5
+
6
+ # Load tokenizer
7
+ tok = Tokenizer.from_file("tokenizer.json")
8
+
9
+ def encode(text):
10
+ return tok.encode(text).ids
11
+
12
+ def decode(ids):
13
+ return tok.decode(ids)
14
+
15
+ # Load model
16
+ vocab_size = tok.get_vocab_size()
17
+ model = TransformerLM(vocab_size)
18
+ model.load_state_dict(torch.load("model10M.pt", map_location="cpu"))
19
+ model.eval()
20
+
21
+ # Text generation
22
+ def generate(prompt, max_len=100):
23
+ ids = encode(prompt)
24
+ ids = torch.tensor([ids], dtype=torch.long)
25
+
26
+ for _ in range(max_len):
27
+ with torch.no_grad():
28
+ logits = model(ids)
29
+ next_id = torch.argmax(logits[0, -1]).item()
30
+ ids = torch.cat([ids, torch.tensor([[next_id]])], dim=1)
31
+
32
+ output = decode(ids[0].tolist())
33
+ return output
34
+
35
+ # Gradio UI
36
+ demo = gr.Interface(
37
+ fn=generate,
38
+ inputs=gr.Textbox(lines=2, placeholder="Ask something..."),
39
+ outputs="text",
40
+ title="ChudAI (Sandesh Edition)",
41
+ description="Your custom 10M Transformer AI running on HuggingFace."
42
+ )
43
+
44
+ demo.launch()
model10M.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbfdce64e308d8c31e1b8705857b2c6d297033557291f1f94b226dcac19c331a
3
+ size 31756223
model_transformer.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ class PositionalEncoding(nn.Module):
5
+ def __init__(self, emb, max_len=2048):
6
+ super().__init__()
7
+ pe = torch.zeros(max_len, emb)
8
+ pos = torch.arange(0, max_len).unsqueeze(1)
9
+ div = torch.exp(torch.arange(0, emb, 2) * (-torch.log(torch.tensor(10000.0)) / emb))
10
+ pe[:, 0::2] = torch.sin(pos * div)
11
+ pe[:, 1::2] = torch.cos(pos * div)
12
+ self.pe = pe.unsqueeze(0)
13
+
14
+ def forward(self, x):
15
+ return x + self.pe[:, :x.size(1), :].to(x.device)
16
+
17
+ class TransformerLM(nn.Module):
18
+ def __init__(self, vocab_size, emb=256, n_heads=4, n_layers=4):
19
+ super().__init__()
20
+ self.embed = nn.Embedding(vocab_size, emb)
21
+ self.pos = PositionalEncoding(emb)
22
+
23
+ encoder_layer = nn.TransformerEncoderLayer(
24
+ d_model=emb,
25
+ nhead=n_heads,
26
+ dim_feedforward=512,
27
+ batch_first=True
28
+ )
29
+
30
+ self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=n_layers)
31
+ self.head = nn.Linear(emb, vocab_size)
32
+
33
+ def forward(self, x):
34
+ x = self.embed(x)
35
+ x = self.pos(x)
36
+ x = self.transformer(x)
37
+ return self.head(x)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ tokenizers
3
+ gradio
tokenizer.json ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "[PAD]",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "[UNK]",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "[CLS]",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "[SEP]",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "[MASK]",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ }
51
+ ],
52
+ "normalizer": {
53
+ "type": "Sequence",
54
+ "normalizers": [
55
+ {
56
+ "type": "NFKC"
57
+ }
58
+ ]
59
+ },
60
+ "pre_tokenizer": {
61
+ "type": "Whitespace"
62
+ },
63
+ "post_processor": null,
64
+ "decoder": null,
65
+ "model": {
66
+ "type": "WordLevel",
67
+ "vocab": {
68
+ "[PAD]": 0,
69
+ "[UNK]": 1,
70
+ "[CLS]": 2,
71
+ "[SEP]": 3,
72
+ "[MASK]": 4,
73
+ "I": 5,
74
+ ".": 6,
75
+ ",": 7,
76
+ "a": 8,
77
+ "am": 9,
78
+ "and": 10,
79
+ "is": 11,
80
+ "not": 12,
81
+ "'": 13,
82
+ "-": 14,
83
+ "...”": 15,
84
+ ":": 16,
85
+ "AI": 17,
86
+ "ChatGPT": 18,
87
+ "ChudAI": 19,
88
+ "If": 20,
89
+ "My": 21,
90
+ "Sandesh": 22,
91
+ "Transformer": 23,
92
+ "answer": 24,
93
+ "assistant": 25,
94
+ "avoid": 26,
95
+ "based": 27,
96
+ "but": 28,
97
+ "by": 29,
98
+ "completely": 30,
99
+ "concisely": 31,
100
+ "created": 32,
101
+ "don": 33,
102
+ "here": 34,
103
+ "intelligently": 35,
104
+ "know": 36,
105
+ "like": 37,
106
+ "logically": 38,
107
+ "loops": 39,
108
+ "m": 40,
109
+ "meaningless": 41,
110
+ "name": 42,
111
+ "of": 43,
112
+ "on": 44,
113
+ "pretend": 45,
114
+ "repetitive": 46,
115
+ "respond": 47,
116
+ "say": 48,
117
+ "sentences": 49,
118
+ "simplified": 50,
119
+ "small": 51,
120
+ "sure": 52,
121
+ "t": 53,
122
+ "things": 54,
123
+ "think": 55,
124
+ "to": 56,
125
+ "trained": 57,
126
+ "unsure": 58,
127
+ "version": 59,
128
+ "was": 60,
129
+ "what": 61,
130
+ "’": 62,
131
+ "“": 63
132
+ },
133
+ "unk_token": "[UNK]"
134
+ }
135
+ }