klemenk commited on
Commit
d5b0c6b
·
verified ·
1 Parent(s): 3bd534c

Update modeling_auristream.py

Browse files
Files changed (1) hide show
  1. modeling_auristream.py +19 -8
modeling_auristream.py CHANGED
@@ -76,7 +76,7 @@ class AuriStream(PreTrainedModel):
76
  elif isinstance(module, nn.Embedding):
77
  torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
78
 
79
- def forward(self, seq, tgt=None, output_hidden_states=False, return_dict=False, up_until_layer=None):
80
  """
81
  Input: coch: torch.Tensor of shape (b, t)
82
  tgt_coch: torch.Tensor of shape (b, t) or None
@@ -106,13 +106,12 @@ class AuriStream(PreTrainedModel):
106
  x = block(x)
107
  if self.dwa is not None:
108
  x = self.dwa(x)
109
-
110
 
111
  # append the last hidden state if we did not exit early
112
  if up_until_layer is None or block_idx == len(self.transformer.h) - 1:
113
  all_hidden_states.append(x)
114
 
115
- if output_hidden_states:
116
  model_output = BaseModelOutput(
117
  last_hidden_state=x,
118
  hidden_states=all_hidden_states,
@@ -123,6 +122,10 @@ class AuriStream(PreTrainedModel):
123
  logits = self.coch_head(x)
124
 
125
  if tgt is not None:
 
 
 
 
126
  loss = F.cross_entropy(
127
  logits.reshape(-1, self.config.vocab_size), tgt.reshape(-1),
128
  )
@@ -134,14 +137,22 @@ class AuriStream(PreTrainedModel):
134
  loss += F.cross_entropy(
135
  future_logits.reshape(-1, self.config.vocab_size), tgt[:, (i+1):].reshape(-1),
136
  )
 
 
137
  # divide loss by number of future heads
138
  loss = loss / (len(self.future_heads) + 1)
139
-
140
  if return_dict:
141
- model_output = CausalLMOutput(
142
- loss=loss,
143
- logits=logits,
144
- )
 
 
 
 
 
 
145
  return model_output
146
 
147
  return logits, loss
 
76
  elif isinstance(module, nn.Embedding):
77
  torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
78
 
79
+ def forward(self, seq, tgt=None, output_logits=False, output_hidden_states=False, return_dict=False, up_until_layer=None):
80
  """
81
  Input: coch: torch.Tensor of shape (b, t)
82
  tgt_coch: torch.Tensor of shape (b, t) or None
 
106
  x = block(x)
107
  if self.dwa is not None:
108
  x = self.dwa(x)
 
109
 
110
  # append the last hidden state if we did not exit early
111
  if up_until_layer is None or block_idx == len(self.transformer.h) - 1:
112
  all_hidden_states.append(x)
113
 
114
+ if output_hidden_states and not output_logits:
115
  model_output = BaseModelOutput(
116
  last_hidden_state=x,
117
  hidden_states=all_hidden_states,
 
122
  logits = self.coch_head(x)
123
 
124
  if tgt is not None:
125
+
126
+ if output_logits:
127
+ all_logits = [logits]
128
+
129
  loss = F.cross_entropy(
130
  logits.reshape(-1, self.config.vocab_size), tgt.reshape(-1),
131
  )
 
137
  loss += F.cross_entropy(
138
  future_logits.reshape(-1, self.config.vocab_size), tgt[:, (i+1):].reshape(-1),
139
  )
140
+ if output_logits:
141
+ all_logits.append(future_logits)
142
  # divide loss by number of future heads
143
  loss = loss / (len(self.future_heads) + 1)
144
+
145
  if return_dict:
146
+ if output_logits:
147
+ model_output = CausalLMOutput(
148
+ loss=loss,
149
+ logits=all_logits,
150
+ )
151
+ else:
152
+ model_output = CausalLMOutput(
153
+ loss=loss,
154
+ logits=logits,
155
+ )
156
  return model_output
157
 
158
  return logits, loss