| import torch | |
| from transformers.models.bert.modeling_bert import BertModel | |
| from ...modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions | |
| from ...processing_utils import Unpack | |
| from ...utils import TransformersKwargs | |
| class DummyBertModel(BertModel): | |
| def forward( | |
| self, | |
| input_ids: torch.Tensor | None = None, | |
| attention_mask: torch.Tensor | None = None, | |
| token_type_ids: torch.Tensor | None = None, | |
| position_ids: torch.Tensor | None = None, | |
| inputs_embeds: torch.Tensor | None = None, | |
| encoder_hidden_states: torch.Tensor | None = None, | |
| encoder_attention_mask: torch.Tensor | None = None, | |
| past_key_values: list[torch.FloatTensor] | None = None, | |
| use_cache: bool | None = None, | |
| output_attentions: bool | None = None, | |
| output_hidden_states: bool | None = None, | |
| return_dict: bool | None = None, | |
| cache_position: torch.Tensor | None = None, | |
| **kwargs: Unpack[TransformersKwargs], | |
| ) -> tuple[torch.Tensor] | BaseModelOutputWithPoolingAndCrossAttentions: | |
| return super().forward(input_ids, **kwargs) | |