igorktech commited on
Commit
6a4ced5
·
1 Parent(s): 2c98094

Model save

Browse files
configuration_hibial.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+ logger = logging.get_logger(__name__)
5
+
6
+ HIBIALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
7
+ "igorktech/custom": "https://huggingface.co/igorktech/custom4/resolve/main/config.json",
8
+ "igorktech/custom4": "https://huggingface.co/igorktech/custom4/resolve/main/config.json",
9
+ }
10
+
11
+
12
+ class HiBiAlBertConfig(PretrainedConfig):
13
+ r"""
14
+ This is the configuration class to store the configuration of a [`HierBertModel`]. It is used to
15
+ instantiate a HierBERT model according to the specified arguments, defining the model architecture. Instantiating a
16
+ configuration with the defaults will yield a similar configuration to that of the HierBERT
17
+ [HierBert](https://github.com/igorktech/hier-bert-pytorch) architecture.
18
+
19
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
20
+ documentation from [`PretrainedConfig`] for more information.
21
+
22
+
23
+ Args:
24
+ vocab_size (`int`, *optional*, defaults to 30522):
25
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
26
+ `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`].
27
+ hidden_size (`int`, *optional*, defaults to 768):
28
+ Dimensionality of the encoder layers and the pooler layer.
29
+ num_hidden_layers (`int`, *optional*, defaults to 12):
30
+ Number of hidden layers in the Transformer encoder.
31
+ num_attention_heads (`int`, *optional*, defaults to 12):
32
+ Number of attention heads for each attention layer in the Transformer encoder.
33
+ intermediate_size (`int`, *optional*, defaults to 3072):
34
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
35
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
36
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
37
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
38
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
39
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
40
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
41
+ The dropout ratio for the attention probabilities.
42
+ max_position_embeddings (`int`, *optional*, defaults to 512):
43
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
44
+ just in case (e.g., 512 or 1024 or 2048).
45
+ type_vocab_size (`int`, *optional*, defaults to 2):
46
+ The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
47
+ initializer_range (`float`, *optional*, defaults to 0.02):
48
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
49
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
50
+ The epsilon used by the layer normalization layers.
51
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
52
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
53
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
54
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
55
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
56
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
57
+ is_decoder (`bool`, *optional*, defaults to `False`):
58
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
59
+ use_cache (`bool`, *optional*, defaults to `True`):
60
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
61
+ relevant if `config.is_decoder=True`.
62
+ classifier_dropout (`float`, *optional*):
63
+ The dropout ratio for the classification head.
64
+ """
65
+
66
+ model_type = "hibial-bert"
67
+
68
+ def __init__(
69
+ self,
70
+ vocab_size=32000,
71
+ hidden_size=512,
72
+ num_hidden_layers=6,
73
+ num_attention_heads=8,
74
+ intermediate_size=2048,
75
+ hidden_act="gelu",
76
+ hidden_dropout_prob=0.1,
77
+ attention_probs_dropout_prob=0.1,
78
+ max_position_embeddings=512,
79
+ type_vocab_size=2,
80
+ initializer_range=0.02,
81
+ layer_norm_eps=1e-6,
82
+ norm_first=True,
83
+ pad_token_id=0,
84
+ sep_token_id=3,
85
+ position_embedding_type="absolute",
86
+ use_cache=True,
87
+ classifier_dropout=None,
88
+ auto_map={
89
+ "AutoConfig": "configuration_hibial.HiBiAlBertConfig",
90
+ "AutoModel": "modelling_hibial.HiBiAlBertModel",
91
+ "AutoModelForMaskedLM": "modelling_hibial.HiBiAlBertForMaskedLM",
92
+ "AutoModelForSequenceClassification": "modelling_hibial.HiBiAlBertForSequenceClassification",
93
+ },
94
+ **kwargs,
95
+ ):
96
+ super().__init__(
97
+ pad_token_id=pad_token_id,
98
+ sep_token_id=sep_token_id,
99
+ **kwargs)
100
+
101
+ self.vocab_size = vocab_size
102
+ self.hidden_size = hidden_size
103
+ self.num_hidden_layers = num_hidden_layers
104
+ self.num_attention_heads = num_attention_heads
105
+ self.hidden_act = hidden_act
106
+ self.intermediate_size = intermediate_size
107
+ self.hidden_dropout_prob = hidden_dropout_prob
108
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
109
+ self.max_position_embeddings = max_position_embeddings
110
+ self.type_vocab_size = type_vocab_size
111
+ self.initializer_range = initializer_range
112
+ self.layer_norm_eps = layer_norm_eps
113
+ self.norm_first = norm_first
114
+ self.position_embedding_type = position_embedding_type
115
+ self.use_cache = use_cache
116
+ self.classifier_dropout = classifier_dropout
117
+ self.auto_map = auto_map
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.31.0"
5
+ }
modelling_hibial.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ from typing import Optional, Any, Tuple
4
+
5
+ import torch
6
+ from torch import Tensor
7
+ import torch.nn.functional as F
8
+ from torch.nn.modules.module import Module
9
+ from torch.nn.modules.activation import MultiheadAttention
10
+ from torch.nn.modules.container import ModuleList
11
+ from torch.nn.init import xavier_uniform_
12
+ from torch.nn.modules.dropout import Dropout
13
+ from torch.nn.modules.linear import Linear
14
+ from torch.nn.modules.normalization import LayerNorm
15
+ # from torch.nn import TransformerEncoderLayer
16
+
17
+ from transformers.modeling_outputs import BaseModelOutputWithPooling
18
+ from transformers import PreTrainedModel
19
+ from transformers import BertForMaskedLM, BertForSequenceClassification
20
+
21
+ from .configuration_hibial import HiBiAlBertConfig
22
+
23
+ import warnings
24
+
25
+ # Turn off all warnings
26
+ warnings.filterwarnings("ignore")
27
+
28
+
29
+ # Define masking
30
+ def gen_encoder_ut_mask(src_seq, input_mask, utt_loc):
31
+ def _gen_mask_hierarchical(A, src_pad_mask):
32
+ # A: (bs, 100, 100); 100 is max_len*2 same as input_ids
33
+ return ~(2 * A == (A + A.transpose(1, 2))).bool()
34
+
35
+ enc_mask_utt = _gen_mask_hierarchical(utt_loc.unsqueeze(1).expand(-1, src_seq.shape[1], -1), input_mask)
36
+ return enc_mask_utt
37
+
38
+
39
+ def _get_pe_inputs(src_seq, input_mask, utt_loc):
40
+ pe_utt_loc = torch.zeros(utt_loc.shape, device=utt_loc.device)
41
+ for i in range(1, utt_loc.shape[1]): # time
42
+ _logic = (utt_loc[:, i] == utt_loc[:, i - 1]).float()
43
+ pe_utt_loc[:, i] = pe_utt_loc[:, i - 1] + _logic - (1 - _logic) * pe_utt_loc[:, i - 1]
44
+ return pe_utt_loc
45
+
46
+
47
+ def _CLS_masks(src_seq, input_mask, utt_loc):
48
+ # HT-Encoder
49
+ pe_utt_loc = _get_pe_inputs(src_seq, input_mask, utt_loc)
50
+
51
+ # UT-MASK
52
+ enc_mask_utt = gen_encoder_ut_mask(src_seq, input_mask, utt_loc)
53
+
54
+ # CT-MASK
55
+ enc_mask_ct = ((pe_utt_loc + input_mask) != 0).unsqueeze(1).expand(-1, src_seq.shape[1], -1) # HIER-CLS style
56
+
57
+ return pe_utt_loc, enc_mask_utt, enc_mask_ct
58
+
59
+
60
+ def get_hier_encoder_mask(src_seq, input_mask, utt_loc, type: str):
61
+ # Padding correction
62
+ # No token other than padding should attend to padding
63
+ # But padding needs to attend to padding tokens for numerical stability reasons
64
+ utt_loc = utt_loc - 2 * input_mask * utt_loc
65
+
66
+ # CT-Mask type
67
+ assert type in ["hier", "cls", "full"]
68
+
69
+ if type == "hier": # HIER: Context through final utterance
70
+ raise Exception("Not used for BERT")
71
+ elif type == "cls": # HIER-CLS: Context through cls tokens
72
+ return _CLS_masks(src_seq, input_mask, utt_loc)
73
+ elif type == "full": # Ut-mask only, CT-mask: Full attention
74
+ raise Exception("Not used for BERT")
75
+
76
+ return None
77
+
78
+
79
+ def _get_clones(module, N):
80
+ return ModuleList([copy.deepcopy(module) for i in range(N)])
81
+
82
+
83
+ def _get_activation_fn(activation):
84
+ if activation == "relu":
85
+ return F.relu
86
+ elif activation == "gelu":
87
+ return F.gelu
88
+
89
+ raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
90
+
91
+
92
+ class BiALiBi(Module):
93
+ def __init__(self, config):
94
+ super().__init__()
95
+ self.num_attention_heads = config.num_attention_heads
96
+
97
+ self.beta = torch.nn.Parameter(torch.empty(self.num_attention_heads), requires_grad=True)
98
+ self.gamma = torch.nn.Parameter(torch.empty(self.num_attention_heads), requires_grad=True)
99
+
100
+ torch.nn.init.normal_(self.beta, -2, 1)
101
+ torch.nn.init.normal_(self.gamma, -2, 1)
102
+
103
+ def get_rel_pos(self, seq_len: int):
104
+ memory = torch.arange(seq_len, dtype=torch.float32).to(self.gamma.device)
105
+ context = torch.arange(seq_len, dtype=torch.float32).unsqueeze(-1).to(self.gamma.device)
106
+ rel_pos = (memory - context).abs()
107
+
108
+ return rel_pos.unsqueeze(0).expand(self.num_attention_heads, -1, -1).clone()
109
+
110
+ def forward(self, seq_len: int):
111
+ rel_pos = self.get_rel_pos(seq_len)
112
+
113
+ # Masking
114
+ gamma_mask = torch.triu(torch.ones_like(rel_pos), diagonal=1)
115
+ gamma_mask *= -torch.sigmoid(self.gamma).view(-1, 1, 1)
116
+
117
+ beta_mask = torch.tril(torch.ones_like(rel_pos), diagonal=-1)
118
+ beta_mask *= -torch.sigmoid(self.beta).view(-1, 1, 1)
119
+
120
+ mask = beta_mask + gamma_mask
121
+
122
+ return rel_pos * mask
123
+
124
+
125
+ class TransformerEncoderLayer(Module):
126
+ r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
127
+ This standard encoder layer is based on the paper "Attention Is All You Need".
128
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
129
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
130
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
131
+ in a different way during application.
132
+ Args:
133
+ d_model: the number of expected features in the input (required).
134
+ nhead: the number of heads in the multiheadattention models (required).
135
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
136
+ dropout: the dropout value (default=0.1).
137
+ activation: the activation function of intermediate layer, relu or gelu (default=relu).
138
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
139
+ Examples::
140
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
141
+ >>> src = torch.rand(10, 32, 512)
142
+ >>> out = encoder_layer(src)
143
+ """
144
+
145
+ def __init__(self, config):
146
+ super(TransformerEncoderLayer, self).__init__()
147
+
148
+ self.self_attn = MultiheadAttention(config.hidden_size,
149
+ config.num_attention_heads,
150
+ dropout=config.attention_probs_dropout_prob)
151
+ # Implementation of Feedforward model
152
+ self.linear1 = Linear(config.hidden_size, config.intermediate_size)
153
+ self.dropout = Dropout(config.hidden_dropout_prob)
154
+ self.linear2 = Linear(config.intermediate_size, config.hidden_size)
155
+
156
+ self.norm_first = config.norm_first
157
+ self.norm1 = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
158
+ self.norm2 = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
159
+ self.dropout1 = Dropout(config.hidden_dropout_prob)
160
+ self.dropout2 = Dropout(config.hidden_dropout_prob)
161
+
162
+ self.activation = _get_activation_fn(config.hidden_act)
163
+
164
+ def __setstate__(self, state):
165
+ if 'activation' not in state:
166
+ state['activation'] = F.relu
167
+ super(TransformerEncoderLayer, self).__setstate__(state)
168
+
169
+ def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
170
+ src_key_padding_mask: Optional[Tensor] = None) -> tuple[Tensor, Optional[Tensor]]:
171
+ r"""Pass the input through the encoder layer.
172
+ Args:
173
+ src: the sequence to the encoder layer (required).
174
+ src_mask: the mask for the src sequence (optional).
175
+ src_key_padding_mask: the mask for the src keys per batch (optional).
176
+ Shape:
177
+ see the docs in Transformer class.
178
+ """
179
+
180
+ # Extend mask
181
+ # src_mask = src_mask.repeat(self.self_attn.num_heads, 1, 1)
182
+
183
+ # PreLayerNorm
184
+ if self.norm_first:
185
+
186
+ src = self.norm1(src)
187
+ src_attn = self.self_attn(src, src, src, attn_mask=src_mask,
188
+ key_padding_mask=src_key_padding_mask, average_attn_weights=False) # [0]
189
+ src = src + self.dropout1(src_attn[0])
190
+ src = self.norm2(src)
191
+ src_ffn = self.linear2(self.dropout(self.activation(self.linear1(src))))
192
+ src = src + self.dropout2(src_ffn)
193
+
194
+ else:
195
+ src_attn = self.self_attn(src, src, src, attn_mask=src_mask,
196
+ key_padding_mask=src_key_padding_mask, average_attn_weights=False) # [0]
197
+ src = src + self.dropout1(src_attn[0])
198
+ src = self.norm1(src)
199
+ src_ffn = self.linear2(self.dropout(self.activation(self.linear1(src))))
200
+ src = src + self.dropout2(src_ffn)
201
+ src = self.norm2(src)
202
+ return src, src_attn[1]
203
+
204
+
205
+ class HiBiAlBert(Module):
206
+ r"""A transformer model. User is able to modify the attributes as needed. The architecture
207
+ is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
208
+ Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
209
+ Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
210
+ Processing Systems, pages 6000-6010. Users can build the BERT(https://arxiv.org/abs/1810.04805)
211
+ model with corresponding parameters.
212
+ Args:
213
+ d_model: the number of expected features in the encoder/decoder inputs (default=512).
214
+ nhead: the number of heads in the multiheadattention models (default=8).
215
+ num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
216
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
217
+ dropout: the dropout value (default=0.1).
218
+ activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
219
+ custom_encoder: custom encoder (default=None).
220
+ custom_decoder: custom decoder (default=None).
221
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
222
+ Examples::
223
+ # >>> transformer_model = HIERTransformer(nhead=16, num_encoder_layers=12)
224
+ # >>> src = torch.rand((10, 32, 512))
225
+ # >>> token_type_ids/utt_indices = torch.tensor([0, 0, 1, 1, 1, 2, 2, 3, 3, 3]) Represent each utterance to encode
226
+ # >>> out = transformer_model(src)
227
+ Note: A full example to apply nn.Transformer module for the word language model is available in
228
+ # https://github.com/pytorch/examples/tree/master/word_language_model
229
+ """
230
+
231
+ def __init__(self, config) -> None:
232
+ super(HiBiAlBert, self).__init__()
233
+ self.config = config
234
+ # Word Emb
235
+
236
+ self.bialibi_utt = BiALiBi(config)
237
+ self.bialibi_ct = BiALiBi(config)
238
+
239
+ self.word_embeddings = torch.nn.Embedding(config.vocab_size,
240
+ config.hidden_size,
241
+ padding_idx=config.pad_token_id)
242
+
243
+ # Encoder
244
+ self.enc_layers = _get_clones(TransformerEncoderLayer(config=config),
245
+ # d_model=config.hidden_size,
246
+ # nhead=config.num_attention_heads,
247
+ # dim_feedforward=config.intermediate_size,
248
+ # dropout=config.hidden_dropout_prob,
249
+ # activation=config.hidden_act,
250
+ # layer_norm_eps=config.layer_norm_eps,
251
+ # norm_first=config.norm_first,
252
+ # batch_first=False),
253
+ config.num_hidden_layers) # ModuleList
254
+ self.norm_e = LayerNorm(config.hidden_size,
255
+ eps=config.layer_norm_eps)
256
+
257
+ self._reset_parameters()
258
+ self.init_weights()
259
+
260
+ def init_weights(self) -> None:
261
+ initrange = 0.1
262
+ self.word_embeddings.weight.data.uniform_(-initrange, initrange)
263
+
264
+ # TODO: fix return dict
265
+ def forward(self, input_ids: Tensor,
266
+ attention_mask: Optional[Tensor] = None,
267
+ token_type_ids: Optional[Tensor] = None,
268
+ ct_mask_type: str = "cls",
269
+ output_attentions: Optional[bool] = True,
270
+ memory_key_padding_mask: Optional[Tensor] = None,
271
+ **kwargs
272
+ ):
273
+ r"""Take in and process masked source/target sequences.
274
+ Args:
275
+ input_ids/src: the sequence to the encoder (required).
276
+ src_mask: the additive mask for the src sequence (optional).
277
+
278
+ memory_mask: the additive mask for the encoder output (optional).
279
+ attention_mask/src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
280
+
281
+ memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
282
+ Shape:
283
+ - input_ids/src: :math:`(S, N, E)`.
284
+ - src_mask: :math:`(S, S)`.
285
+ - memory_mask: :math:`(T, S)`.
286
+ - not(attention_mask)/src_key_padding_mask: :math:`(N, S)`.
287
+ - token_type_ids/utt_indices: :math:`(N, S)`.
288
+ - memory_key_padding_mask: :math:`(N, S)`.
289
+ Note: [src/memory]_mask ensures that position i is allowed to attend the unmasked
290
+ positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
291
+ while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
292
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
293
+ is provided, it will be added to the attention weight.
294
+ [src/memory]_key_padding_mask provides specified elements in the key to be ignored by
295
+ the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero
296
+ positions will be unchanged. If a BoolTensor is provided, the positions with the
297
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
298
+ - output: :math:`(T, N, E)`.
299
+ Note: Due to the multi-head attention architecture in the transformer model,
300
+ the output sequence length of a transformer is same as the input sequence
301
+ (i.e. target) length of the decode.
302
+ where S is the source sequence length, T is the target sequence length, N is the
303
+ batch size, E is the feature number
304
+ Examples:
305
+ # >>> output = transformer_model(src, src_mask=src_mask)
306
+ """
307
+ all_self_attentions = () if output_attentions else None
308
+ # print(input_ids.shape)
309
+
310
+ if attention_mask is None:
311
+ # Convert input_ids to attention mask
312
+ attention_mask = self.create_padding_mask(input_ids)
313
+ attention_mask = torch.tensor(attention_mask, dtype=torch.long)
314
+
315
+ if token_type_ids is None:
316
+ # Convert input_ids to token type IDs
317
+ token_type_ids = self.convert_input_ids_to_token_type_ids(input_ids)
318
+
319
+ src_key_padding_mask = torch.logical_not(attention_mask)
320
+ utt_indices = token_type_ids
321
+
322
+ _, enc_mask_utt, enc_mask_ct = get_hier_encoder_mask(input_ids,
323
+ src_key_padding_mask,
324
+ utt_indices,
325
+ type=ct_mask_type)
326
+ enc_mask_utt = enc_mask_utt.repeat(self.config.num_attention_heads, 1, 1)
327
+ enc_mask_ct = enc_mask_ct.repeat(self.config.num_attention_heads, 1, 1)
328
+
329
+ B, T = input_ids.shape # batch size, sequence length
330
+ bialibi_utt_mask = self.bialibi_utt(T).repeat(B, 1, 1)
331
+ bialibi_ct_mask = self.bialibi_ct(T).repeat(B, 1, 1)
332
+
333
+ bialibi_utt_mask[enc_mask_utt.bool()] = float('-inf')
334
+ bialibi_ct_mask[enc_mask_ct.bool()] = float('-inf')
335
+
336
+ # memory = self.encoder(input_ids, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
337
+
338
+ # Encoding
339
+ # memory = input_ids
340
+
341
+ enc_inp = self.word_embeddings(input_ids.transpose(0, 1))
342
+
343
+ # Basic config
344
+ # for i, layer in enumerate(self.enc_layers):
345
+ # if i == self.config.num_hidden_layers // 2:
346
+ # # Positional Embedding for Context Encoder
347
+ # enc_inp = enc_inp + self.post_word_emb(enc_inp.transpose(0, 1)).transpose(0, 1)
348
+ # if i < self.config.num_hidden_layers // 2:
349
+ # enc_inp = layer(enc_inp,
350
+ # src_key_padding_mask=src_key_padding_mask,
351
+ # src_mask=enc_mask_utt.float())
352
+ # else:
353
+ # enc_inp = layer(enc_inp,
354
+ # src_key_padding_mask=src_key_padding_mask,
355
+ # src_mask=enc_mask_ct)
356
+
357
+ # TODO: add layers configurations support and variations setup
358
+ # interleaved config (I3)
359
+ for i, layer in enumerate(self.enc_layers):
360
+ if i % (2 + 1) < 2:
361
+ # Shared encoders or Segment-wise encoders
362
+ # print("SWE")
363
+ enc_inp, att_w = layer(enc_inp,
364
+ src_key_padding_mask=src_key_padding_mask,
365
+ src_mask=bialibi_utt_mask)
366
+ else:
367
+ # Context encoder or Cross-segment encoders
368
+ # print("CSE")
369
+ enc_inp, att_w = layer(enc_inp,
370
+ src_key_padding_mask=src_key_padding_mask,
371
+ src_mask=bialibi_ct_mask)
372
+ if output_attentions:
373
+ all_self_attentions = all_self_attentions + (att_w,)
374
+
375
+ if self.norm_e is not None:
376
+ enc_inp = self.norm_e(enc_inp)
377
+
378
+ encoder_output = enc_inp.transpose(0, 1)
379
+ hidden_states = encoder_output
380
+
381
+ pooled_output = hidden_states[:, 0, :]
382
+ outputs = (hidden_states, pooled_output, all_self_attentions)
383
+
384
+ return outputs
385
+
386
+ def create_padding_mask(self, token_ids):
387
+ padding_mask = torch.ne(token_ids, self.config.pad_token_id).int()
388
+ return padding_mask
389
+
390
+ def generate_square_subsequent_mask(self, sz: int) -> Tensor:
391
+ r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
392
+ Unmasked positions are filled with float(0.0).
393
+ """
394
+ mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
395
+ mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
396
+ return mask
397
+
398
+ def _reset_parameters(self):
399
+ r"""Initiate parameters in the transformer model."""
400
+
401
+ for p in self.parameters():
402
+ if p.dim() > 1:
403
+ xavier_uniform_(p)
404
+
405
+ def convert_input_ids_to_token_type_ids(self, input_ids):
406
+ token_type_ids = torch.zeros_like(input_ids)
407
+
408
+ for row, row_tensor in enumerate(input_ids):
409
+ sep_indices = torch.nonzero(row_tensor == self.config.sep_token_id)
410
+ prev_index = -1
411
+ for type_id, index in enumerate(sep_indices):
412
+ token_type_ids[row, prev_index + 1:index + 1] = type_id
413
+ prev_index = index
414
+
415
+ return token_type_ids
416
+
417
+
418
+ class HiBiAlBertModel(PreTrainedModel):
419
+ config_class = HiBiAlBertConfig
420
+ base_model_prefix = "hibial"
421
+
422
+ def __init__(self, config):
423
+ super().__init__(config)
424
+
425
+ self.model = HiBiAlBert(config)
426
+
427
+ def forward(
428
+ self,
429
+ input_ids: Optional[torch.Tensor] = None,
430
+ attention_mask: Optional[torch.Tensor] = None,
431
+ token_type_ids: Optional[torch.Tensor] = None,
432
+ position_ids: Optional[torch.Tensor] = None,
433
+ inputs_embeds: Optional[torch.Tensor] = None,
434
+ output_attentions: Optional[bool] = None,
435
+ output_hidden_states: Optional[bool] = None,
436
+ return_dict: Optional[bool] = None,
437
+ **kwargs
438
+ ):
439
+ outputs = self.model(input_ids=input_ids,
440
+ attention_mask=attention_mask,
441
+ token_type_ids=token_type_ids,
442
+ position_ids=position_ids,
443
+ inputs_embeds=inputs_embeds,
444
+ output_attentions=output_attentions,
445
+ output_hidden_states=output_hidden_states,
446
+ return_dict=return_dict)
447
+ if not return_dict:
448
+ return outputs
449
+
450
+ return BaseModelOutputWithPooling(
451
+ last_hidden_state=outputs[0],
452
+ pooler_output=outputs[1],
453
+ attentions=outputs[2])
454
+
455
+ def get_input_embeddings(self):
456
+ return self.model.word_embeddings
457
+
458
+ def set_input_embeddings(self, value):
459
+ self.model.word_embeddings = value
460
+
461
+
462
+ class HiBiAlBertForMaskedLM(BertForMaskedLM):
463
+ config_class = HiBiAlBertConfig
464
+
465
+ def __init__(self, config):
466
+ super().__init__(config)
467
+ self.bert = HiBiAlBertModel(config)
468
+
469
+
470
+ class HiBiAlBertForSequenceClassification(BertForSequenceClassification):
471
+ config_class = HiBiAlBertConfig
472
+
473
+ def __init__(self, config):
474
+ super().__init__(config)
475
+ self.bert = HiBiAlBertModel(config)