Akshitha1 commited on
Commit
b8d324f
·
verified ·
1 Parent(s): 1871bb7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -14,7 +14,7 @@ df = pd.read_csv(url)
14
 
15
  # Tokenizer
16
  class ScratchTokenizer:
17
- def _init_(self):
18
  self.word2idx = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
19
  self.idx2word = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
20
  self.vocab_size = 4
@@ -44,15 +44,15 @@ tokenizer.build_vocab(train_data["instruction"].tolist() + train_data["response"
44
 
45
  # Dataset Class (not used in inference but useful for training)
46
  class TextDataset(Dataset):
47
- def _init_(self, data, tokenizer, max_len=200):
48
  self.data = data
49
  self.tokenizer = tokenizer
50
  self.max_len = max_len
51
 
52
- def _len_(self):
53
  return len(self.data)
54
 
55
- def _getitem_(self, idx):
56
  src_text = self.data.iloc[idx]["instruction"]
57
  tgt_text = self.data.iloc[idx]["response"]
58
  src = torch.tensor(self.tokenizer.encode(src_text), dtype=torch.long)
@@ -61,8 +61,8 @@ class TextDataset(Dataset):
61
 
62
  # Model
63
  class GPTModel(nn.Module):
64
- def _init_(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
65
- super(GPTModel, self)._init_()
66
  self.embedding = nn.Embedding(vocab_size, embed_size)
67
  self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
68
  self.transformer = nn.TransformerDecoder(
 
14
 
15
  # Tokenizer
16
  class ScratchTokenizer:
17
+ def __init__(self):
18
  self.word2idx = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
19
  self.idx2word = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
20
  self.vocab_size = 4
 
44
 
45
  # Dataset Class (not used in inference but useful for training)
46
  class TextDataset(Dataset):
47
+ def __init__(self, data, tokenizer, max_len=200):
48
  self.data = data
49
  self.tokenizer = tokenizer
50
  self.max_len = max_len
51
 
52
+ def __len__(self):
53
  return len(self.data)
54
 
55
+ def __getitem__(self, idx):
56
  src_text = self.data.iloc[idx]["instruction"]
57
  tgt_text = self.data.iloc[idx]["response"]
58
  src = torch.tensor(self.tokenizer.encode(src_text), dtype=torch.long)
 
61
 
62
  # Model
63
  class GPTModel(nn.Module):
64
+ def __init__(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
65
+ super(GPTModel, self).__init__()
66
  self.embedding = nn.Embedding(vocab_size, embed_size)
67
  self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
68
  self.transformer = nn.TransformerDecoder(