vinay0123 commited on
Commit
788daf6
·
verified ·
1 Parent(s): 6b08ab3

Upload 4 files

Browse files
Files changed (4) hide show
  1. config.json +9 -0
  2. inference.py +40 -0
  3. model.py +44 -0
  4. vocab.json +0 -0
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "input_type": "text",
3
+ "output_type": "text",
4
+ "examples": [
5
+ "What is a savings account?",
6
+ "How to apply for a loan?",
7
+ "Tell me about net banking."
8
+ ]
9
+ }
inference.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # inference.py
2
+
3
+ import torch
4
+ from model import GPTModel, ScratchTokenizer
5
+
6
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
+
8
+ # Initialize tokenizer
9
+ tokenizer = ScratchTokenizer()
10
+
11
+ # You must rebuild vocab manually or load a saved vocab
12
+ # For Hugging Face Spaces, it is recommended you hardcode or load a saved vocab here
13
+ # Example: loading vocab from a file if you saved earlier.
14
+ import json
15
+ with open("vocab.json", "r") as f:
16
+ vocab = json.load(f)
17
+
18
+ tokenizer.word2idx = vocab["word2idx"]
19
+ tokenizer.idx2word = {int(k): v for k, v in vocab["idx2word"].items()}
20
+ tokenizer.vocab_size = vocab["vocab_size"]
21
+
22
+ # Load model
23
+ model = GPTModel(vocab_size=tokenizer.vocab_size)
24
+ model.load_state_dict(torch.load("gpt_model.pth", map_location=device))
25
+ model.to(device)
26
+ model.eval()
27
+
28
+ # Generation function
29
+ def generate_response(query, max_length=200):
30
+ src = torch.tensor(tokenizer.encode(query)).unsqueeze(0).to(device)
31
+ tgt = torch.tensor([[1]]).to(device) # <SOS> token
32
+
33
+ for _ in range(max_length):
34
+ output = model(src, tgt)
35
+ next_word = output.argmax(-1)[:, -1].unsqueeze(1)
36
+ tgt = torch.cat([tgt, next_word], dim=1)
37
+ if next_word.item() == 2: # <EOS>
38
+ break
39
+
40
+ return tokenizer.decode(tgt.squeeze(0).tolist())
model.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model.py
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ # Scratch Tokenizer
7
+ class ScratchTokenizer:
8
+ def __init__(self):
9
+ self.word2idx = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
10
+ self.idx2word = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
11
+ self.vocab_size = 4
12
+
13
+ def build_vocab(self, texts):
14
+ for text in texts:
15
+ for word in text.split():
16
+ if word not in self.word2idx:
17
+ self.word2idx[word] = self.vocab_size
18
+ self.idx2word[self.vocab_size] = word
19
+ self.vocab_size += 1
20
+
21
+ def encode(self, text, max_len=200):
22
+ tokens = [self.word2idx.get(word, 3) for word in text.split()]
23
+ tokens = [1] + tokens[:max_len - 2] + [2]
24
+ return tokens + [0] * (max_len - len(tokens))
25
+
26
+ def decode(self, tokens):
27
+ return " ".join([self.idx2word.get(idx, "<UNK>") for idx in tokens if idx > 0])
28
+
29
+ # Transformer Model
30
+ class GPTModel(nn.Module):
31
+ def __init__(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
32
+ super(GPTModel, self).__init__()
33
+ self.embedding = nn.Embedding(vocab_size, embed_size)
34
+ self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
35
+ self.transformer = nn.TransformerDecoder(nn.TransformerDecoderLayer(d_model=embed_size, nhead=num_heads), num_layers=num_layers)
36
+ self.fc_out = nn.Linear(embed_size, vocab_size)
37
+
38
+ def forward(self, src, tgt):
39
+ src_emb = self.embedding(src) + self.pos_embedding[:, :src.size(1), :]
40
+ tgt_emb = self.embedding(tgt) + self.pos_embedding[:, :tgt.size(1), :]
41
+
42
+ tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.size(1)).to(tgt.device)
43
+ output = self.transformer(tgt_emb.permute(1, 0, 2), src_emb.permute(1, 0, 2), tgt_mask=tgt_mask)
44
+ return self.fc_out(output.permute(1, 0, 2))
vocab.json ADDED
The diff for this file is too large to render. See raw diff