Commit ·
54c847e
1
Parent(s): 576edf6
Upload roberta_layers.py
Browse files- roberta_layers.py +3 -3
roberta_layers.py
CHANGED
|
@@ -199,9 +199,9 @@ class RobertaSelfAttention(nn.Module):
|
|
| 199 |
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 200 |
|
| 201 |
if xformers_available:
|
| 202 |
-
query_layer =
|
| 203 |
-
key_layer =
|
| 204 |
-
value_layer =
|
| 205 |
context_layer = xops.memory_efficient_attention(
|
| 206 |
query_layer, key_layer, value_layer, p=self.dropout_prob
|
| 207 |
)
|
|
|
|
| 199 |
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 200 |
|
| 201 |
if xformers_available:
|
| 202 |
+
query_layer = mixed_query_layer
|
| 203 |
+
key_layer = self.key(hidden_states)
|
| 204 |
+
value_layer = self.value(hidden_states)
|
| 205 |
context_layer = xops.memory_efficient_attention(
|
| 206 |
query_layer, key_layer, value_layer, p=self.dropout_prob
|
| 207 |
)
|