Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ df = pd.read_csv(url)
|
|
| 14 |
|
| 15 |
# Tokenizer
|
| 16 |
class ScratchTokenizer:
|
| 17 |
-
def
|
| 18 |
self.word2idx = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
|
| 19 |
self.idx2word = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
|
| 20 |
self.vocab_size = 4
|
|
@@ -44,15 +44,15 @@ tokenizer.build_vocab(train_data["instruction"].tolist() + train_data["response"
|
|
| 44 |
|
| 45 |
# Dataset Class (not used in inference but useful for training)
|
| 46 |
class TextDataset(Dataset):
|
| 47 |
-
def
|
| 48 |
self.data = data
|
| 49 |
self.tokenizer = tokenizer
|
| 50 |
self.max_len = max_len
|
| 51 |
|
| 52 |
-
def
|
| 53 |
return len(self.data)
|
| 54 |
|
| 55 |
-
def
|
| 56 |
src_text = self.data.iloc[idx]["instruction"]
|
| 57 |
tgt_text = self.data.iloc[idx]["response"]
|
| 58 |
src = torch.tensor(self.tokenizer.encode(src_text), dtype=torch.long)
|
|
@@ -61,8 +61,8 @@ class TextDataset(Dataset):
|
|
| 61 |
|
| 62 |
# Model
|
| 63 |
class GPTModel(nn.Module):
|
| 64 |
-
def
|
| 65 |
-
super(GPTModel, self).
|
| 66 |
self.embedding = nn.Embedding(vocab_size, embed_size)
|
| 67 |
self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
|
| 68 |
self.transformer = nn.TransformerDecoder(
|
|
|
|
| 14 |
|
| 15 |
# Tokenizer
|
| 16 |
class ScratchTokenizer:
|
| 17 |
+
def __init__(self):
|
| 18 |
self.word2idx = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
|
| 19 |
self.idx2word = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
|
| 20 |
self.vocab_size = 4
|
|
|
|
| 44 |
|
| 45 |
# Dataset Class (not used in inference but useful for training)
|
| 46 |
class TextDataset(Dataset):
|
| 47 |
+
def __init__(self, data, tokenizer, max_len=200):
|
| 48 |
self.data = data
|
| 49 |
self.tokenizer = tokenizer
|
| 50 |
self.max_len = max_len
|
| 51 |
|
| 52 |
+
def __len__(self):
|
| 53 |
return len(self.data)
|
| 54 |
|
| 55 |
+
def __getitem__(self, idx):
|
| 56 |
src_text = self.data.iloc[idx]["instruction"]
|
| 57 |
tgt_text = self.data.iloc[idx]["response"]
|
| 58 |
src = torch.tensor(self.tokenizer.encode(src_text), dtype=torch.long)
|
|
|
|
| 61 |
|
| 62 |
# Model
|
| 63 |
class GPTModel(nn.Module):
|
| 64 |
+
def __init__(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
|
| 65 |
+
super(GPTModel, self).__init__()
|
| 66 |
self.embedding = nn.Embedding(vocab_size, embed_size)
|
| 67 |
self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
|
| 68 |
self.transformer = nn.TransformerDecoder(
|