dingzx97 commited on
Commit
2c32dba
·
1 Parent(s): d96d57c

Upload 2 files

Browse files
configuration_lddbert.py.txt ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LddBERT model configuration"""
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+ LDDBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
23
+ # "lddbert-base-uncased": "https://huggingface.co/lddbert-base-uncased/resolve/main/config.json",
24
+ }
25
+
26
+
27
+ class LddBertConfig(PretrainedConfig):
28
+ r"""
29
+ 模型配置。
30
+
31
+ Args:
32
+ vocab_size (`int`, *optional*, defaults to 30522):
33
+ Vocabulary size of the LddBERT model. Defines the number of different tokens that can be represented by
34
+ the `inputs_ids` passed when calling [`LddBertModel`] or [`TFLddBertModel`].
35
+ max_position_embeddings (`int`, *optional*, defaults to 512):
36
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
37
+ just in case (e.g., 512 or 1024 or 2048).
38
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
39
+ Whether to use sinusoidal positional embeddings.
40
+ n_layers (`int`, *optional*, defaults to 6):
41
+ Number of hidden layers in the Transformer encoder.
42
+ n_gru_layers (`int`, *optional*, defaults to 1):
43
+ GRU 层数.
44
+ n_heads (`int`, *optional*, defaults to 12):
45
+ Number of attention heads for each attention layer in the Transformer encoder.
46
+ dim (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ hidden_dim (`int`, *optional*, defaults to 3072):
49
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
50
+ dropout (`float`, *optional*, defaults to 0.1):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ attention_dropout (`float`, *optional*, defaults to 0.1):
53
+ The dropout ratio for the attention probabilities.
54
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ type_vocab_size (`int`, *optional*, defaults to 2):
58
+ The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+ qa_dropout (`float`, *optional*, defaults to 0.1):
62
+ The dropout probabilities used in the question answering model [`LddBertForQuestionAnswering`].
63
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
64
+ The dropout probabilities used in the sequence classification and the multiple choice model
65
+ [`LddBertForSequenceClassification`].
66
+
67
+ Examples:
68
+
69
+ ```python
70
+ >>> from transformers import LddBertModel, LddBertConfig
71
+
72
+ >>> # Initializing a LddBERT configuration
73
+ >>> configuration = LddBertConfig()
74
+
75
+ >>> # Initializing a model from the configuration
76
+ >>> model = LddBertModel(configuration)
77
+
78
+ >>> # Accessing the model configuration
79
+ >>> configuration = model.config
80
+ ```"""
81
+ model_type = "lddbert"
82
+ attribute_map = {
83
+ "hidden_size": "dim",
84
+ "num_attention_heads": "n_heads",
85
+ "num_hidden_layers": "n_layers",
86
+ }
87
+
88
+ def __init__(
89
+ self,
90
+ n_layers=6,
91
+ n_heads=12,
92
+ dim=768,
93
+ hidden_dim=4*768,
94
+ activation="gelu",
95
+ initializer_range=0.02,
96
+ vocab_size=30522,
97
+ max_position_embeddings=512,
98
+ sinusoidal_pos_embds=False,
99
+ pad_token_id=0,
100
+ type_vocab_size=2,
101
+ dropout=0.1,
102
+ attention_dropout=0.1,
103
+ qa_dropout=0.1,
104
+ seq_classif_dropout=0.2,
105
+ n_gru_layers=6,
106
+ n_cnn_layers=6,
107
+ cnn_kernel_size=5,
108
+ **kwargs
109
+ ):
110
+ self.vocab_size = vocab_size
111
+ self.max_position_embeddings = max_position_embeddings
112
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
113
+ self.n_layers = n_layers
114
+ self.n_gru_layers = n_gru_layers
115
+ self.n_cnn_layers = n_cnn_layers
116
+ self.cnn_kernel_size = cnn_kernel_size
117
+ self.n_heads = n_heads
118
+ self.dim = dim
119
+ self.hidden_dim = hidden_dim
120
+ self.dropout = dropout
121
+ self.attention_dropout = attention_dropout
122
+ self.activation = activation
123
+ self.type_vocab_size = type_vocab_size
124
+ self.initializer_range = initializer_range
125
+ self.qa_dropout = qa_dropout
126
+ self.seq_classif_dropout = seq_classif_dropout
127
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
128
+
modeling_lddbert.py.txt ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import math
3
+ from typing import Dict, List, Optional, Set, Tuple, Union
4
+
5
+ # import numpy as np
6
+ import torch
7
+ from packaging import version
8
+ from torch import nn
9
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
10
+ from transformers.activations import get_activation
11
+ from transformers.configuration_utils import PretrainedConfig
12
+ # from transformers.deepspeed import is_deepspeed_zero3_enabled
13
+ from transformers.modeling_outputs import (
14
+ BaseModelOutput,
15
+ MaskedLMOutput,
16
+ # MultipleChoiceModelOutput,
17
+ # QuestionAnsweringModelOutput,
18
+ SequenceClassifierOutput,
19
+ # TokenClassifierOutput,
20
+ )
21
+ from transformers.modeling_utils import PreTrainedModel
22
+ from transformers.models.distilbert.modeling_distilbert import (
23
+ create_sinusoidal_embeddings,
24
+ DISTILBERT_START_DOCSTRING,
25
+ DISTILBERT_INPUTS_DOCSTRING,
26
+
27
+ )
28
+ from transformers.pytorch_utils import (
29
+ apply_chunking_to_forward,
30
+ find_pruneable_heads_and_indices,
31
+ prune_linear_layer,
32
+ )
33
+ from transformers.utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ # replace_return_docstrings,
39
+ )
40
+
41
+ from .configuration_lddbert import LddBertConfig
42
+
43
+ logger = logging.get_logger(__name__)
44
+ _CHECKPOINT_FOR_DOC = "lddbert"
45
+ _CONFIG_FOR_DOC = "LddBertConfig"
46
+ _TOKENIZER_FOR_DOC = "LddBertTokenizer"
47
+
48
+
49
+ class Embeddings(nn.Module):
50
+ def __init__(self, config: PretrainedConfig):
51
+ super().__init__()
52
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
53
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
54
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
55
+
56
+ if config.sinusoidal_pos_embds:
57
+ create_sinusoidal_embeddings(
58
+ n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight
59
+ )
60
+
61
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
62
+ self.dropout = nn.Dropout(config.dropout)
63
+ if version.parse(torch.__version__) > version.parse("1.6.0"):
64
+ self.register_buffer(
65
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
66
+ )
67
+ self.register_buffer(
68
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
69
+ )
70
+
71
+ def forward(
72
+ self,
73
+ input_ids: torch.Tensor,
74
+ token_type_ids: Optional[torch.Tensor] = None,
75
+ ) -> torch.Tensor:
76
+ """
77
+ Parameters:
78
+ input_ids: torch.tensor(bs, max_seq_length) The token ids to embed.
79
+
80
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
81
+ embeddings)
82
+ """
83
+ input_shape = input_ids.size()
84
+ seq_length = input_shape[1]
85
+
86
+ if token_type_ids is None:
87
+ if hasattr(self, "token_type_ids"):
88
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
89
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
90
+ token_type_ids = buffered_token_type_ids_expanded
91
+ else:
92
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
93
+
94
+ if hasattr(self, "position_ids"):
95
+ position_ids = self.position_ids[:, :seq_length]
96
+ else:
97
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
98
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
99
+
100
+ word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
101
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
102
+ token_type_embeddings = self.token_type_embeddings(token_type_ids) # (bs, max_seq_length, dim)
103
+
104
+ embeddings = word_embeddings + position_embeddings + token_type_embeddings # (bs, max_seq_length, dim)
105
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
106
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
107
+ return embeddings
108
+
109
+
110
+ class MultiHeadSelfAttention(nn.Module):
111
+ def __init__(self, config: PretrainedConfig):
112
+ super().__init__()
113
+
114
+ self.n_heads = config.n_heads
115
+ self.dim = config.dim
116
+ self.dropout = nn.Dropout(p=config.attention_dropout)
117
+
118
+ assert self.dim % self.n_heads == 0
119
+
120
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
121
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
122
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
123
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
124
+
125
+ self.pruned_heads: Set[int] = set()
126
+
127
+ def prune_heads(self, heads: List[int]):
128
+ attention_head_size = self.dim // self.n_heads
129
+ if len(heads) == 0:
130
+ return
131
+ heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
132
+ # Prune linear layers
133
+ self.q_lin = prune_linear_layer(self.q_lin, index)
134
+ self.k_lin = prune_linear_layer(self.k_lin, index)
135
+ self.v_lin = prune_linear_layer(self.v_lin, index)
136
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
137
+ # Update hyper params
138
+ self.n_heads = self.n_heads - len(heads)
139
+ self.dim = attention_head_size * self.n_heads
140
+ self.pruned_heads = self.pruned_heads.union(heads)
141
+
142
+ def forward(
143
+ self,
144
+ query: torch.Tensor,
145
+ key: torch.Tensor,
146
+ value: torch.Tensor,
147
+ mask: torch.Tensor,
148
+ head_mask: Optional[torch.Tensor] = None,
149
+ output_attentions: bool = False,
150
+ ) -> Tuple[torch.Tensor, ...]:
151
+ """
152
+ Parameters:
153
+ query: torch.tensor(bs, seq_length, dim)
154
+ key: torch.tensor(bs, seq_length, dim)
155
+ value: torch.tensor(bs, seq_length, dim)
156
+ mask: torch.tensor(bs, seq_length)
157
+
158
+ Returns:
159
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
160
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
161
+ """
162
+ bs, q_length, dim = query.size()
163
+ k_length = key.size(1)
164
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
165
+ # assert key.size() == value.size()
166
+
167
+ dim_per_head = self.dim // self.n_heads
168
+
169
+ mask_reshp = (bs, 1, 1, k_length)
170
+
171
+ def shape(x: torch.Tensor) -> torch.Tensor:
172
+ """separate heads"""
173
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
174
+
175
+ def unshape(x: torch.Tensor) -> torch.Tensor:
176
+ """group heads"""
177
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
178
+
179
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
180
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
181
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
182
+
183
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
184
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
185
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
186
+ scores = scores.masked_fill(mask, -float("inf")) # (bs, n_heads, q_length, k_length)
187
+
188
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
189
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
190
+
191
+ # Mask heads if we want to
192
+ if head_mask is not None:
193
+ weights = weights * head_mask
194
+
195
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
196
+ context = unshape(context) # (bs, q_length, dim)
197
+ context = self.out_lin(context) # (bs, q_length, dim)
198
+
199
+ if output_attentions:
200
+ return (context, weights)
201
+ else:
202
+ return (context,)
203
+
204
+
205
+ class FFN(nn.Module):
206
+ def __init__(self, config: PretrainedConfig):
207
+ super().__init__()
208
+ self.dropout = nn.Dropout(p=config.dropout)
209
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
210
+ self.seq_len_dim = 1
211
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
212
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
213
+ self.activation = get_activation(config.activation)
214
+
215
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
216
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
217
+
218
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
219
+ x = self.lin1(input)
220
+ x = self.activation(x)
221
+ x = self.lin2(x)
222
+ x = self.dropout(x)
223
+ return x
224
+
225
+
226
+ class TransformerBlock(nn.Module):
227
+ def __init__(self, config: PretrainedConfig):
228
+ super().__init__()
229
+
230
+ assert config.dim % config.n_heads == 0
231
+
232
+ self.attention = MultiHeadSelfAttention(config)
233
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
234
+
235
+ self.ffn = FFN(config)
236
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
237
+
238
+ def forward(
239
+ self,
240
+ x: torch.Tensor,
241
+ attn_mask: Optional[torch.Tensor] = None,
242
+ head_mask: Optional[torch.Tensor] = None,
243
+ output_attentions: bool = False,
244
+ ) -> Tuple[torch.Tensor, ...]:
245
+ """
246
+ Parameters:
247
+ x: torch.tensor(bs, seq_length, dim)
248
+ attn_mask: torch.tensor(bs, seq_length)
249
+
250
+ Returns:
251
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
252
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
253
+ """
254
+ # Self-Attention
255
+ sa_output = self.attention(
256
+ query=x,
257
+ key=x,
258
+ value=x,
259
+ mask=attn_mask,
260
+ head_mask=head_mask,
261
+ output_attentions=output_attentions,
262
+ )
263
+ if output_attentions:
264
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
265
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
266
+ assert type(sa_output) == tuple
267
+ sa_output = sa_output[0]
268
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
269
+
270
+ # Feed Forward Network
271
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
272
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
273
+
274
+ output = (ffn_output,)
275
+ if output_attentions:
276
+ output = (sa_weights,) + output
277
+ return output
278
+
279
+
280
+ class Transformer(nn.Module):
281
+ def __init__(self, config: PretrainedConfig):
282
+ super().__init__()
283
+ self.n_layers = config.n_layers
284
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
285
+
286
+ def forward(
287
+ self,
288
+ x: torch.Tensor,
289
+ attn_mask: Optional[torch.Tensor] = None,
290
+ head_mask: Optional[torch.Tensor] = None,
291
+ output_attentions: bool = False,
292
+ output_hidden_states: bool = False,
293
+ return_dict: Optional[bool] = None,
294
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
295
+ """
296
+ Parameters:
297
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
298
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
299
+
300
+ Returns:
301
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
302
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
303
+ Tuple of length n_layers with the hidden states from each layer.
304
+ Optional: only if output_hidden_states=True
305
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
306
+ Tuple of length n_layers with the attention weights from each layer
307
+ Optional: only if output_attentions=True
308
+ """
309
+ all_hidden_states = () if output_hidden_states else None
310
+ all_attentions = () if output_attentions else None
311
+
312
+ hidden_state = x
313
+ for i, layer_module in enumerate(self.layer):
314
+ if output_hidden_states:
315
+ all_hidden_states = all_hidden_states + (hidden_state,)
316
+
317
+ layer_outputs = layer_module(
318
+ x=hidden_state, attn_mask=attn_mask, head_mask=head_mask[i], output_attentions=output_attentions
319
+ )
320
+ hidden_state = layer_outputs[-1]
321
+
322
+ if output_attentions:
323
+ assert len(layer_outputs) == 2
324
+ attentions = layer_outputs[0]
325
+ all_attentions = all_attentions + (attentions,)
326
+ else:
327
+ assert len(layer_outputs) == 1
328
+
329
+ # Add last layer
330
+ if output_hidden_states:
331
+ all_hidden_states = all_hidden_states + (hidden_state,)
332
+
333
+ if not return_dict:
334
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
335
+ return BaseModelOutput(
336
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
337
+ )
338
+
339
+
340
+ class LddBertPreTrainedModel(PreTrainedModel):
341
+ """
342
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
343
+ models.
344
+ """
345
+
346
+ config_class = LddBertConfig
347
+ load_tf_weights = None
348
+ base_model_prefix = "lddbert"
349
+
350
+ def _init_weights(self, module: nn.Module):
351
+ """Initialize the weights."""
352
+ if isinstance(module, nn.Linear):
353
+ # Slightly different from the TF version which uses truncated_normal for initialization
354
+ # cf https://github.com/pytorch/pytorch/pull/5617
355
+ module.weight.data.normal_(
356
+ mean=0.0, std=self.config.initializer_range)
357
+ if module.bias is not None:
358
+ module.bias.data.zero_()
359
+ elif isinstance(module, nn.Embedding):
360
+ module.weight.data.normal_(
361
+ mean=0.0, std=self.config.initializer_range)
362
+ if module.padding_idx is not None:
363
+ module.weight.data[module.padding_idx].zero_()
364
+ elif isinstance(module, nn.LayerNorm):
365
+ module.bias.data.zero_()
366
+ module.weight.data.fill_(1.0)
367
+
368
+
369
+ LDDBERT_START_DOCSTRING = DISTILBERT_START_DOCSTRING
370
+
371
+ LDDBERT_INPUTS_DOCSTRING = DISTILBERT_INPUTS_DOCSTRING
372
+
373
+
374
+ @add_start_docstrings(
375
+ "The bare LddBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
376
+ LDDBERT_START_DOCSTRING,
377
+ )
378
+ class LddBertModel(LddBertPreTrainedModel):
379
+ def __init__(self, config: PretrainedConfig):
380
+ super().__init__(config)
381
+ assert config.cnn_kernel_size%2 == 1
382
+
383
+ self.embeddings = Embeddings(config) # Embeddings
384
+ self.transformer = Transformer(config) # Encoder
385
+ self.gru = nn.GRU(config.dim , config.dim//2, config.n_gru_layers, batch_first=True, bidirectional=True)
386
+
387
+ self.activation_cnn = get_activation('relu')
388
+ self.cnn = nn.ModuleList([
389
+ nn.Sequential(
390
+ nn.Conv2d(in_channels=1,
391
+ out_channels=1,
392
+ kernel_size=config.cnn_kernel_size,
393
+ padding=(config.cnn_kernel_size-1)//2),
394
+ self.activation_cnn
395
+ )
396
+ for _ in range(config.n_cnn_layers)
397
+ ])
398
+
399
+ # Initialize weights and apply final processing
400
+ self.post_init()
401
+
402
+ def get_position_embeddings(self) -> nn.Embedding:
403
+ """
404
+ Returns the position embeddings
405
+ """
406
+ return self.embeddings.position_embeddings
407
+
408
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
409
+ """
410
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
411
+
412
+ Arguments:
413
+ new_num_position_embeddings (`int`):
414
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
415
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
416
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
417
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
418
+ the size will remove vectors from the end.
419
+ """
420
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
421
+
422
+ # no resizing needs to be done if the length stays the same
423
+ if num_position_embeds_diff == 0:
424
+ return
425
+
426
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
427
+ self.config.max_position_embeddings = new_num_position_embeddings
428
+
429
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
430
+
431
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
432
+
433
+ if self.config.sinusoidal_pos_embds:
434
+ create_sinusoidal_embeddings(
435
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
436
+ )
437
+ else:
438
+ with torch.no_grad():
439
+ if num_position_embeds_diff > 0:
440
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
441
+ old_position_embeddings_weight
442
+ )
443
+ else:
444
+ self.embeddings.position_embeddings.weight = nn.Parameter(
445
+ old_position_embeddings_weight[:num_position_embeds_diff]
446
+ )
447
+ # move position_embeddings to correct device
448
+ self.embeddings.position_embeddings.to(self.device)
449
+
450
+ def get_input_embeddings(self) -> nn.Embedding:
451
+ return self.embeddings.word_embeddings
452
+
453
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
454
+ self.embeddings.word_embeddings = new_embeddings
455
+
456
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
457
+ """
458
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
459
+ class PreTrainedModel
460
+ """
461
+ for layer, heads in heads_to_prune.items():
462
+ self.transformer.layer[layer].attention.prune_heads(heads)
463
+
464
+ @add_start_docstrings_to_model_forward(LDDBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
465
+ @add_code_sample_docstrings(
466
+ processor_class=_TOKENIZER_FOR_DOC,
467
+ checkpoint=_CHECKPOINT_FOR_DOC,
468
+ output_type=BaseModelOutput,
469
+ config_class=_CONFIG_FOR_DOC,
470
+ )
471
+ def forward(
472
+ self,
473
+ input_ids: Optional[torch.Tensor] = None,
474
+ token_type_ids: Optional[torch.Tensor] = None,
475
+ attention_mask: Optional[torch.Tensor] = None,
476
+ head_mask: Optional[torch.Tensor] = None,
477
+ inputs_embeds: Optional[torch.Tensor] = None,
478
+ output_attentions: Optional[bool] = None,
479
+ output_hidden_states: Optional[bool] = None,
480
+ return_dict: Optional[bool] = None,
481
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
482
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
483
+ output_hidden_states = (
484
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
485
+ )
486
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
487
+
488
+ if input_ids is not None and inputs_embeds is not None:
489
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
490
+ elif input_ids is not None:
491
+ input_shape = input_ids.size()
492
+ elif inputs_embeds is not None:
493
+ input_shape = inputs_embeds.size()[:-1]
494
+ else:
495
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
496
+
497
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
498
+
499
+ if attention_mask is None:
500
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
501
+
502
+ # Prepare head mask if needed
503
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
504
+
505
+ if inputs_embeds is None:
506
+ inputs_embeds = self.embeddings(
507
+ input_ids=input_ids,
508
+ token_type_ids=token_type_ids,
509
+ ) # (bs, seq_length, dim)
510
+
511
+ bert_output = self.transformer(
512
+ x=inputs_embeds,
513
+ attn_mask=attention_mask,
514
+ head_mask=head_mask,
515
+ output_attentions=output_attentions,
516
+ output_hidden_states=output_hidden_states,
517
+ return_dict=return_dict,
518
+ )
519
+
520
+ gru_output, _ = self.gru(bert_output[0])
521
+
522
+ cnn_output = bert_output[0].view(input_shape[0], 1, input_shape[1], -1)
523
+ for i, layer_module in enumerate(self.cnn):
524
+ cnn_output = layer_module(cnn_output)
525
+ cnn_output = cnn_output.view(input_shape[0], input_shape[1], -1)
526
+
527
+ output = gru_output + cnn_output
528
+ if not return_dict:
529
+ return (output, ) + bert_output[1:]
530
+
531
+ return BaseModelOutput(
532
+ last_hidden_state=output,
533
+ hidden_states=bert_output.hidden_states,
534
+ attentions=bert_output.attentions,
535
+ )
536
+
537
+
538
+
539
+
540
+ @add_start_docstrings(
541
+ """LddBert Model with a `masked language modeling` head on top.""",
542
+ LDDBERT_START_DOCSTRING,
543
+ )
544
+ class LddBertForMaskedLM(LddBertPreTrainedModel):
545
+ def __init__(self, config: PretrainedConfig):
546
+ super().__init__(config)
547
+
548
+ self.activation = get_activation(config.activation)
549
+
550
+ self.lddbert = LddBertModel(config)
551
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
552
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
553
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
554
+
555
+ # Initialize weights and apply final processing
556
+ self.post_init()
557
+
558
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
559
+
560
+ def get_position_embeddings(self) -> nn.Embedding:
561
+ """
562
+ Returns the position embeddings
563
+ """
564
+ return self.lddbert.get_position_embeddings()
565
+
566
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
567
+ """
568
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
569
+
570
+ Arguments:
571
+ new_num_position_embeddings (`int`):
572
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
573
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
574
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
575
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
576
+ the size will remove vectors from the end.
577
+ """
578
+ self.lddbert.resize_position_embeddings(new_num_position_embeddings)
579
+
580
+ def get_output_embeddings(self) -> nn.Module:
581
+ return self.vocab_projector
582
+
583
+ def set_output_embeddings(self, new_embeddings: nn.Module):
584
+ self.vocab_projector = new_embeddings
585
+
586
+ @add_start_docstrings_to_model_forward(LDDBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
587
+ @add_code_sample_docstrings(
588
+ processor_class=_TOKENIZER_FOR_DOC,
589
+ checkpoint=_CHECKPOINT_FOR_DOC,
590
+ output_type=MaskedLMOutput,
591
+ config_class=_CONFIG_FOR_DOC,
592
+ )
593
+ def forward(
594
+ self,
595
+ input_ids: Optional[torch.Tensor] = None,
596
+ token_type_ids: Optional[torch.Tensor] = None,
597
+ attention_mask: Optional[torch.Tensor] = None,
598
+ head_mask: Optional[torch.Tensor] = None,
599
+ inputs_embeds: Optional[torch.Tensor] = None,
600
+ labels: Optional[torch.LongTensor] = None,
601
+ output_attentions: Optional[bool] = None,
602
+ output_hidden_states: Optional[bool] = None,
603
+ return_dict: Optional[bool] = None,
604
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
605
+ r"""
606
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
607
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
608
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
609
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
610
+ """
611
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
612
+
613
+ lddbert_output = self.lddbert(
614
+ input_ids=input_ids,
615
+ token_type_ids=token_type_ids,
616
+ attention_mask=attention_mask,
617
+ head_mask=head_mask,
618
+ inputs_embeds=inputs_embeds,
619
+ output_attentions=output_attentions,
620
+ output_hidden_states=output_hidden_states,
621
+ return_dict=return_dict,
622
+ )
623
+ hidden_states = lddbert_output[0] # (bs, seq_length, dim)
624
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
625
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
626
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
627
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
628
+
629
+ mlm_loss = None
630
+ if labels is not None:
631
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
632
+
633
+ if not return_dict:
634
+ output = (prediction_logits,) + lddbert_output[1:]
635
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
636
+
637
+ return MaskedLMOutput(
638
+ loss=mlm_loss,
639
+ logits=prediction_logits,
640
+ hidden_states=lddbert_output.hidden_states,
641
+ attentions=lddbert_output.attentions,
642
+ )
643
+
644
+
645
+ @add_start_docstrings(
646
+ """
647
+ LddBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
648
+ pooled output) e.g. for GLUE tasks.
649
+ """,
650
+ LDDBERT_START_DOCSTRING,
651
+ )
652
+ class LddBertForSequenceClassification(LddBertPreTrainedModel):
653
+ def __init__(self, config: PretrainedConfig):
654
+ super().__init__(config)
655
+ self.num_labels = config.num_labels
656
+ self.config = config
657
+
658
+
659
+ self.lddbert = LddBertModel(config)
660
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
661
+ self.activation = get_activation(config.activation)
662
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
663
+ self.classifier = nn.Linear(config.dim, config.num_labels)
664
+
665
+ # Initialize weights and apply final processing
666
+ self.post_init()
667
+
668
+ def get_position_embeddings(self) -> nn.Embedding:
669
+ """Returns the position embeddings"""
670
+ return self.lddbert.get_position_embeddings()
671
+
672
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
673
+ """
674
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
675
+
676
+ Arguments:
677
+ new_num_position_embeddings (`int`):
678
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
679
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
680
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
681
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
682
+ the size will remove vectors from the end.
683
+ """
684
+ self.lddbert.resize_position_embeddings(new_num_position_embeddings)
685
+
686
+
687
+ @add_start_docstrings_to_model_forward(LDDBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
688
+ @add_code_sample_docstrings(
689
+ processor_class=_TOKENIZER_FOR_DOC,
690
+ checkpoint=_CHECKPOINT_FOR_DOC,
691
+ output_type=SequenceClassifierOutput,
692
+ config_class=_CONFIG_FOR_DOC,
693
+ )
694
+ def forward(
695
+ self,
696
+ input_ids: Optional[torch.Tensor] = None,
697
+ token_type_ids: Optional[torch.Tensor] = None,
698
+ attention_mask: Optional[torch.Tensor] = None,
699
+ head_mask: Optional[torch.Tensor] = None,
700
+ inputs_embeds: Optional[torch.Tensor] = None,
701
+ labels: Optional[torch.LongTensor] = None,
702
+ output_attentions: Optional[bool] = None,
703
+ output_hidden_states: Optional[bool] = None,
704
+ return_dict: Optional[bool] = None,
705
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
706
+ r"""
707
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
708
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
709
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
710
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
711
+ """
712
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
713
+
714
+ lddbert_output = self.lddbert(
715
+ input_ids=input_ids,
716
+ token_type_ids=token_type_ids,
717
+ attention_mask=attention_mask,
718
+ head_mask=head_mask,
719
+ inputs_embeds=inputs_embeds,
720
+ output_attentions=output_attentions,
721
+ output_hidden_states=output_hidden_states,
722
+ return_dict=return_dict,
723
+ )
724
+ hidden_state = lddbert_output[0] # (bs, seq_len, dim)
725
+
726
+ pooled_output = hidden_state[:, 0] # (bs, dim)
727
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
728
+ pooled_output = self.activation(pooled_output) # (bs, dim)
729
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
730
+ logits = self.classifier(pooled_output) # (bs, num_labels)
731
+
732
+ loss = None
733
+ if labels is not None:
734
+ if self.config.problem_type is None:
735
+ if self.num_labels == 1:
736
+ self.config.problem_type = "regression"
737
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
738
+ self.config.problem_type = "single_label_classification"
739
+ else:
740
+ self.config.problem_type = "multi_label_classification"
741
+
742
+ if self.config.problem_type == "regression":
743
+ loss_fct = MSELoss()
744
+ if self.num_labels == 1:
745
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
746
+ else:
747
+ loss = loss_fct(logits, labels)
748
+ elif self.config.problem_type == "single_label_classification":
749
+ loss_fct = CrossEntropyLoss()
750
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
751
+ elif self.config.problem_type == "multi_label_classification":
752
+ loss_fct = BCEWithLogitsLoss()
753
+ loss = loss_fct(logits, labels)
754
+
755
+ if not return_dict:
756
+ output = (logits,) + lddbert_output[1:]
757
+ return ((loss,) + output) if loss is not None else output
758
+
759
+ return SequenceClassifierOutput(
760
+ loss=loss,
761
+ logits=logits,
762
+ hidden_states=lddbert_output.hidden_states,
763
+ attentions=lddbert_output.attentions,
764
+ )
765
+
766
+