aframson commited on
Commit
7cae85e
·
1 Parent(s): 6b67779
Files changed (1) hide show
  1. modelLM.py +26 -24
modelLM.py CHANGED
@@ -12,7 +12,7 @@ class OBILanguageModel(PreTrainedModel):
12
  super(OBILanguageModel,self).__init__(config)
13
  self.token_embedding_table = nn.Embedding(config.vocab_size, config.hidden_size) # Use length of SentencePiece vocab
14
  self.position_embedding_table = nn.Embedding(config.block_size, config.hidden_size)
15
-
16
 
17
  self.transformer = nn.Transformer(
18
  d_model=config.hidden_size,
@@ -29,29 +29,31 @@ class OBILanguageModel(PreTrainedModel):
29
 
30
 
31
 
32
- def forward(self, idx, targets=None):
33
- tok_emb = self.token_embedding_table(idx)
34
- # pos_emb = self.position_embedding_table(torch.arange(idx.size(1), device=device))
35
- pos_emb = None # Initialize pos_emb to None
36
- try:
37
- pos_emb = self.position_embedding_table(torch.arange(idx.size(1), device='cpu'))
38
- except IndexError as e:
39
- # Print relevant information for debugging
40
- print(f"IndexError: {e}")
41
- print(f"idx.size(1): {idx.size(1)}")
42
- print(f"Positional embedding table shape: {self.position_embedding_table.weight.shape}")
43
- x = tok_emb + pos_emb
44
- x = self.transformer(x, x)
45
- x = self.ln1(x)
46
- x = self.ln2(x)
47
- logits = self.lm_head(x)
48
-
49
- if targets is None:
50
- loss = None
51
- else:
52
- loss = F.cross_entropy(logits.view(-1, self.config.vocab_size), targets.view(-1))
53
-
54
- return logits, loss
 
 
55
 
56
  def generate(self, idx, max_new_tokens):
57
  for _ in range(max_new_tokens):
 
12
  super(OBILanguageModel,self).__init__(config)
13
  self.token_embedding_table = nn.Embedding(config.vocab_size, config.hidden_size) # Use length of SentencePiece vocab
14
  self.position_embedding_table = nn.Embedding(config.block_size, config.hidden_size)
15
+
16
 
17
  self.transformer = nn.Transformer(
18
  d_model=config.hidden_size,
 
29
 
30
 
31
 
32
+ def forward(self, idx, targets=None):
33
+ tok_emb = self.token_embedding_table(idx)
34
+ pos_emb = None # Initialize pos_emb to None
35
+ try:
36
+ pos_emb = self.position_embedding_table(torch.arange(idx.size(1), device='cpu'))
37
+ except IndexError as e:
38
+ # Handle the IndexError by initializing pos_emb with zeros
39
+ print(f"IndexError: {e}")
40
+ print(f"idx.size(1): {idx.size(1)}")
41
+ print(f"Positional embedding table shape: {self.position_embedding_table.weight.shape}")
42
+ pos_emb = torch.zeros((idx.size(1), self.config.hidden_size), device=device)
43
+
44
+ x = tok_emb + pos_emb
45
+ x = self.transformer(x, x)
46
+ x = self.ln1(x)
47
+ x = self.ln2(x)
48
+ logits = self.lm_head(x)
49
+
50
+ if targets is None:
51
+ loss = None
52
+ else:
53
+ loss = F.cross_entropy(logits.view(-1, self.config.vocab_size), targets.view(-1))
54
+
55
+ return logits, loss
56
+
57
 
58
  def generate(self, idx, max_new_tokens):
59
  for _ in range(max_new_tokens):