alverciito commited on
Commit
6faa82b
·
1 Parent(s): 2fde924

fix positional encoding error

Browse files
model.py CHANGED
@@ -288,9 +288,9 @@ class SentenceCoseNet(PreTrainedModel):
288
  """
289
  # Convert to type:
290
  if len(input_ids.shape) == 2:
291
- x = input_ids.int().unsqueeze(0)
292
- mask = attention_mask.unsqueeze(0) if attention_mask is not None else None
293
- output = self.model(x=x, mask=mask).squeeze(0)
294
  elif len(input_ids.shape) == 3:
295
  x = input_ids.int()
296
  mask = attention_mask if attention_mask is not None else None
 
288
  """
289
  # Convert to type:
290
  if len(input_ids.shape) == 2:
291
+ x = input_ids.int().unsqueeze(1)
292
+ mask = attention_mask.unsqueeze(1) if attention_mask is not None else None
293
+ output = self.model(x=x, mask=mask).squeeze(1)
294
  elif len(input_ids.shape) == 3:
295
  x = input_ids.int()
296
  mask = attention_mask if attention_mask is not None else None
src/model/transformers/positional_encoding.py CHANGED
@@ -56,7 +56,7 @@ class PositionalEncoding(torch.nn.Module):
56
  torch.Tensor
57
  Tensor of the same shape as the input with positional encodings added.
58
  """
59
- return x + self.positional_encoding[:, :x.size(1), :]
60
  # - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
61
  # END OF FILE #
62
  # - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
 
56
  torch.Tensor
57
  Tensor of the same shape as the input with positional encodings added.
58
  """
59
+ return x + self.positional_encoding[:, :x.size(-2), :]
60
  # - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
61
  # END OF FILE #
62
  # - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #