Update modeling_nemotron.py
Browse files- modeling_nemotron.py +7 -7
modeling_nemotron.py
CHANGED
|
@@ -24,19 +24,19 @@ import torch.utils.checkpoint
|
|
| 24 |
from torch import nn, Tensor, Size
|
| 25 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 26 |
|
| 27 |
-
from
|
| 28 |
-
from
|
| 29 |
-
from
|
| 30 |
-
from
|
| 31 |
BaseModelOutputWithPast,
|
| 32 |
CausalLMOutputWithPast,
|
| 33 |
QuestionAnsweringModelOutput,
|
| 34 |
SequenceClassifierOutputWithPast,
|
| 35 |
TokenClassifierOutput,
|
| 36 |
)
|
| 37 |
-
from
|
| 38 |
-
from
|
| 39 |
-
from
|
| 40 |
add_start_docstrings,
|
| 41 |
add_start_docstrings_to_model_forward,
|
| 42 |
is_flash_attn_2_available,
|
|
|
|
| 24 |
from torch import nn, Tensor, Size
|
| 25 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 26 |
|
| 27 |
+
from transformers.activations import ACT2FN
|
| 28 |
+
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
| 29 |
+
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
| 30 |
+
from transformers.modeling_outputs import (
|
| 31 |
BaseModelOutputWithPast,
|
| 32 |
CausalLMOutputWithPast,
|
| 33 |
QuestionAnsweringModelOutput,
|
| 34 |
SequenceClassifierOutputWithPast,
|
| 35 |
TokenClassifierOutput,
|
| 36 |
)
|
| 37 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 38 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
| 39 |
+
from transformers.utils import (
|
| 40 |
add_start_docstrings,
|
| 41 |
add_start_docstrings_to_model_forward,
|
| 42 |
is_flash_attn_2_available,
|