moelanoby commited on
Commit
9706dbe
·
verified ·
1 Parent(s): f169b89

Update bucket_memory_model.py

Browse files
Files changed (1) hide show
  1. bucket_memory_model.py +0 -2
bucket_memory_model.py CHANGED
@@ -20,7 +20,6 @@ class BucketMemoryConfig(PretrainedConfig):
20
  ):
21
  super().__init__(**kwargs)
22
  self.vocab_size = vocab_size
23
- self.hidden_size = hidden_size
24
  self.d_model = d_model
25
  self.num_layers = num_layers
26
  self.num_buckets = num_buckets
@@ -252,7 +251,6 @@ class BucketMemoryModel(PreTrainedModel):
252
  def __init__(self, config, adapter_kwargs=None):
253
  super().__init__(config)
254
  self.d_model = config.d_model
255
- self.hidden_size = hidden_size
256
  self.token_embedding = nn.Embedding(config.vocab_size, config.d_model)
257
  self.pos_encoding = nn.Parameter(torch.zeros(1, config.max_seq_length, config.d_model))
258
  self._init_positional_encoding(config.max_seq_length, config.d_model)
 
20
  ):
21
  super().__init__(**kwargs)
22
  self.vocab_size = vocab_size
 
23
  self.d_model = d_model
24
  self.num_layers = num_layers
25
  self.num_buckets = num_buckets
 
251
  def __init__(self, config, adapter_kwargs=None):
252
  super().__init__(config)
253
  self.d_model = config.d_model
 
254
  self.token_embedding = nn.Embedding(config.vocab_size, config.d_model)
255
  self.pos_encoding = nn.Parameter(torch.zeros(1, config.max_seq_length, config.d_model))
256
  self._init_positional_encoding(config.max_seq_length, config.d_model)