lhallee commited on
Commit
dfa3bd2
·
verified ·
1 Parent(s): 38a6515

Upload dplm2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dplm2.py +929 -0
dplm2.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FastPLMs-compatible DPLM2 implementation.
3
+
4
+ This module is based on:
5
+ https://github.com/bytedance/dplm
6
+ """
7
+
8
+ import entrypoint_setup
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.nn import functional as F
12
+ from dataclasses import dataclass
13
+ from typing import Dict, List, Optional, Tuple, Union
14
+
15
+ from transformers import EsmTokenizer
16
+ from transformers.modeling_outputs import (
17
+ BaseModelOutputWithPastAndCrossAttentions,
18
+ BaseModelOutputWithPoolingAndCrossAttentions,
19
+ ModelOutput,
20
+ SequenceClassifierOutput,
21
+ TokenClassifierOutput,
22
+ )
23
+ from transformers.models.esm.configuration_esm import EsmConfig
24
+ from transformers.models.esm.modeling_esm import (
25
+ EsmAttention,
26
+ EsmClassificationHead,
27
+ EsmEmbeddings,
28
+ EsmEncoder,
29
+ EsmIntermediate,
30
+ EsmLayer,
31
+ EsmLMHead,
32
+ EsmOutput,
33
+ EsmPooler,
34
+ EsmPreTrainedModel,
35
+ EsmSelfAttention,
36
+ EsmSelfOutput,
37
+ RotaryEmbedding,
38
+ apply_rotary_pos_emb,
39
+ )
40
+
41
+ try:
42
+ from torch.nn.attention.flex_attention import create_block_mask
43
+ from torch.nn.attention.flex_attention import flex_attention
44
+ except ImportError:
45
+ create_block_mask = None
46
+ flex_attention = None
47
+
48
+ try:
49
+ from .base_tokenizer import BaseSequenceTokenizer
50
+ except ImportError:
51
+ from base_tokenizer import BaseSequenceTokenizer
52
+
53
+ try:
54
+ from .embedding_mixin import EmbeddingMixin
55
+ except ImportError:
56
+ try:
57
+ from ..embedding_mixin import EmbeddingMixin
58
+ except ImportError:
59
+ from embedding_mixin import EmbeddingMixin
60
+
61
+
62
+ def _create_pad_block_mask(attention_mask_2d: torch.Tensor):
63
+ assert create_block_mask is not None, "Flex attention block mask requires create_block_mask."
64
+ token_valid = attention_mask_2d.bool()
65
+ batch_size, seq_len = token_valid.shape
66
+
67
+ def mask_mod(batch_idx, head_idx, q_idx, kv_idx):
68
+ return token_valid[batch_idx, q_idx] & token_valid[batch_idx, kv_idx]
69
+
70
+ return create_block_mask(
71
+ mask_mod,
72
+ batch_size,
73
+ 1,
74
+ seq_len,
75
+ seq_len,
76
+ device=attention_mask_2d.device,
77
+ )
78
+
79
+
80
+ def _infer_modality_type(input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
81
+ input_mask = attention_mask.bool()
82
+ modality_type = ((input_ids < 33) & input_mask).int()
83
+ modality_type[~input_mask] = 2
84
+ return modality_type
85
+
86
+
87
+ MODEL_REGISTRY = {}
88
+
89
+
90
+ def register_model(name):
91
+ def decorator(cls):
92
+ MODEL_REGISTRY[name] = cls
93
+ return cls
94
+
95
+ return decorator
96
+
97
+
98
+ @dataclass
99
+ class DPLM2MaskedLMOutput(ModelOutput):
100
+ loss: Optional[torch.Tensor] = None
101
+ logits: Optional[torch.Tensor] = None
102
+ last_hidden_state: Optional[torch.Tensor] = None
103
+ hidden_states: Optional[Tuple[torch.Tensor, ...]] = None
104
+ attentions: Optional[Tuple[torch.Tensor, ...]] = None
105
+
106
+
107
+ class DPLM2Config(EsmConfig):
108
+ model_type = "dplm2"
109
+
110
+ def __init__(
111
+ self,
112
+ attn_backend: str = "sdpa",
113
+ aa_type: int = 1,
114
+ struct_type: int = 0,
115
+ pad_type: int = 2,
116
+ **kwargs,
117
+ ):
118
+ super().__init__(**kwargs)
119
+ self.attn_backend = attn_backend
120
+ self.aa_type = aa_type
121
+ self.struct_type = struct_type
122
+ self.pad_type = pad_type
123
+ self.tie_word_embeddings = False
124
+
125
+
126
+ class DPLM2PreTrainedModel(EsmPreTrainedModel):
127
+ config_class = DPLM2Config
128
+ base_model_prefix = "dplm2"
129
+ supports_gradient_checkpointing = True
130
+ tokenizer = EsmTokenizer.from_pretrained("facebook/esm2_t6_8M_UR50D")
131
+ all_tied_weights_keys = {}
132
+
133
+ def get_input_embeddings(self) -> nn.Module:
134
+ try:
135
+ return self.embeddings.word_embeddings
136
+ except AttributeError:
137
+ return self.esm.embeddings.word_embeddings
138
+
139
+
140
+ class ModifiedRotaryEmbedding(RotaryEmbedding):
141
+ def __init__(self, dim: int, aa_type: int, struct_type: int):
142
+ super().__init__(dim)
143
+ self.aa_type = aa_type
144
+ self.struct_type = struct_type
145
+
146
+ def _has_multimodal_tokens(self, type_ids: Optional[torch.Tensor]) -> bool:
147
+ if type_ids is None:
148
+ return False
149
+ aa_present = (type_ids == self.aa_type).any()
150
+ struct_present = (type_ids == self.struct_type).any()
151
+ return bool(aa_present and struct_present)
152
+
153
+ def _update_cos_sin_tables(
154
+ self,
155
+ x: torch.Tensor,
156
+ type_ids: Optional[torch.Tensor],
157
+ seq_dimension: int = 2,
158
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
159
+ seq_len = x.shape[seq_dimension]
160
+ if self._has_multimodal_tokens(type_ids):
161
+ seq_len = seq_len // 2
162
+
163
+ cache_is_stale = (
164
+ self._cos_cached is None
165
+ or self._sin_cached is None
166
+ or seq_len != self._seq_len_cached
167
+ or self._cos_cached.device != x.device
168
+ )
169
+ if cache_is_stale:
170
+ self._seq_len_cached = seq_len
171
+ t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq)
172
+ freqs = torch.outer(t, self.inv_freq)
173
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
174
+ self._cos_cached = emb.cos()[None, None, :, :]
175
+ self._sin_cached = emb.sin()[None, None, :, :]
176
+
177
+ return self._cos_cached, self._sin_cached
178
+
179
+ def forward(
180
+ self,
181
+ q: torch.Tensor,
182
+ k: torch.Tensor,
183
+ type_ids: Optional[torch.Tensor],
184
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
185
+ self._cos_cached, self._sin_cached = self._update_cos_sin_tables(
186
+ k,
187
+ type_ids=type_ids,
188
+ seq_dimension=-2,
189
+ )
190
+
191
+ if self._has_multimodal_tokens(type_ids):
192
+ q_1, q_2 = q.chunk(2, dim=-2)
193
+ k_1, k_2 = k.chunk(2, dim=-2)
194
+ q_1 = apply_rotary_pos_emb(q_1, self._cos_cached, self._sin_cached)
195
+ q_2 = apply_rotary_pos_emb(q_2, self._cos_cached, self._sin_cached)
196
+ k_1 = apply_rotary_pos_emb(k_1, self._cos_cached, self._sin_cached)
197
+ k_2 = apply_rotary_pos_emb(k_2, self._cos_cached, self._sin_cached)
198
+ return torch.cat((q_1, q_2), dim=-2), torch.cat((k_1, k_2), dim=-2)
199
+
200
+ return (
201
+ apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
202
+ apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
203
+ )
204
+
205
+
206
+ class ModifiedEsmSelfAttention(EsmSelfAttention):
207
+ def __init__(self, config, position_embedding_type=None):
208
+ super().__init__(config, position_embedding_type)
209
+ self.attn_backend = config.attn_backend
210
+ self.rotary_embeddings = ModifiedRotaryEmbedding(
211
+ dim=self.attention_head_size,
212
+ aa_type=config.aa_type,
213
+ struct_type=config.struct_type,
214
+ )
215
+
216
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
217
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
218
+ x = x.view(new_x_shape)
219
+ return x.permute(0, 2, 1, 3)
220
+
221
+ def forward(
222
+ self,
223
+ hidden_states: torch.Tensor,
224
+ attention_mask: Optional[torch.FloatTensor] = None,
225
+ head_mask: Optional[torch.FloatTensor] = None,
226
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
227
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
228
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
229
+ output_attentions: Optional[bool] = False,
230
+ type_ids: Optional[torch.Tensor] = None,
231
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
232
+ flex_block_mask: Optional[object] = None,
233
+ **kwargs,
234
+ ) -> Tuple[torch.Tensor]:
235
+ if past_key_values is not None:
236
+ past_key_value = past_key_values
237
+
238
+ mixed_query_layer = self.query(hidden_states)
239
+ is_cross_attention = encoder_hidden_states is not None
240
+
241
+ if is_cross_attention and past_key_value is not None:
242
+ key_layer = past_key_value[0]
243
+ value_layer = past_key_value[1]
244
+ attention_mask = encoder_attention_mask
245
+ elif is_cross_attention:
246
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
247
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
248
+ attention_mask = encoder_attention_mask
249
+ elif past_key_value is not None:
250
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
251
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
252
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
253
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
254
+ else:
255
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
256
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
257
+
258
+ query_layer = self.transpose_for_scores(mixed_query_layer) * self.attention_head_size**-0.5
259
+
260
+ if self.is_decoder:
261
+ past_key_value = (key_layer, value_layer)
262
+
263
+ if self.position_embedding_type == "rotary":
264
+ query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer, type_ids)
265
+
266
+ if self.position_embedding_type in ["relative_key", "relative_key_query"]:
267
+ raise NotImplementedError
268
+
269
+ query_layer = query_layer.contiguous()
270
+ key_layer = key_layer.contiguous()
271
+ value_layer = value_layer.contiguous()
272
+
273
+ if output_attentions:
274
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
275
+ if attention_mask is not None:
276
+ attention_scores = attention_scores + attention_mask
277
+ attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).to(query_layer.dtype)
278
+ context_layer = torch.matmul(attention_probs, value_layer)
279
+ else:
280
+ attention_probs = None
281
+ if self.attn_backend == "flex":
282
+ assert flex_attention is not None, "Flex attention backend requested but torch.flex_attention is unavailable."
283
+ assert query_layer.dtype in (torch.float16, torch.bfloat16), (
284
+ f"Flex attention backend requires float16 or bfloat16, got {query_layer.dtype}."
285
+ )
286
+ assert is_cross_attention is False, "Flex attention backend currently does not support cross-attention."
287
+ assert past_key_value is None, "Flex attention backend currently does not support KV caching."
288
+ if attention_mask is not None:
289
+ assert flex_block_mask is not None, (
290
+ "Flex attention backend requires a block mask when attention_mask is provided."
291
+ )
292
+ context_layer = flex_attention(
293
+ query_layer,
294
+ key_layer,
295
+ value_layer,
296
+ block_mask=flex_block_mask,
297
+ scale=1.0,
298
+ )
299
+ else:
300
+ context_layer = F.scaled_dot_product_attention(
301
+ query_layer,
302
+ key_layer,
303
+ value_layer,
304
+ attn_mask=attention_mask,
305
+ scale=1.0,
306
+ )
307
+
308
+ if head_mask is not None and torch.is_tensor(head_mask):
309
+ context_layer = context_layer * head_mask
310
+
311
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
312
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
313
+ context_layer = context_layer.view(new_context_layer_shape)
314
+
315
+ outputs = (context_layer, attention_probs)
316
+ if self.is_decoder:
317
+ outputs = outputs + (past_key_value,)
318
+ return outputs
319
+
320
+
321
+ class ModifiedEsmAttention(EsmAttention):
322
+ def __init__(self, config):
323
+ nn.Module.__init__(self)
324
+ self.self = ModifiedEsmSelfAttention(config)
325
+ self.output = EsmSelfOutput(config)
326
+ self.pruned_heads = set()
327
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
328
+
329
+ def forward(
330
+ self,
331
+ hidden_states,
332
+ attention_mask=None,
333
+ head_mask=None,
334
+ encoder_hidden_states=None,
335
+ encoder_attention_mask=None,
336
+ past_key_value=None,
337
+ output_attentions=False,
338
+ type_ids=None,
339
+ flex_block_mask=None,
340
+ ):
341
+ hidden_states_ln = self.LayerNorm(hidden_states)
342
+ self_outputs = self.self(
343
+ hidden_states_ln,
344
+ attention_mask,
345
+ head_mask,
346
+ encoder_hidden_states,
347
+ encoder_attention_mask,
348
+ past_key_value,
349
+ output_attentions,
350
+ type_ids,
351
+ flex_block_mask=flex_block_mask,
352
+ )
353
+ attention_output = self.output(self_outputs[0], hidden_states)
354
+ outputs = (attention_output,) + self_outputs[1:]
355
+ return outputs
356
+
357
+
358
+ class ModifiedEsmLayer(EsmLayer):
359
+ def __init__(self, config):
360
+ nn.Module.__init__(self)
361
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
362
+ self.seq_len_dim = 1
363
+ self.attention = ModifiedEsmAttention(config)
364
+ self.is_decoder = config.is_decoder
365
+ self.add_cross_attention = config.add_cross_attention
366
+ if self.add_cross_attention:
367
+ if self.is_decoder is False:
368
+ raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
369
+ self.crossattention = ModifiedEsmAttention(config)
370
+ self.intermediate = EsmIntermediate(config)
371
+ self.output = EsmOutput(config)
372
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
373
+
374
+ def forward(
375
+ self,
376
+ hidden_states,
377
+ attention_mask=None,
378
+ head_mask=None,
379
+ encoder_hidden_states=None,
380
+ encoder_attention_mask=None,
381
+ past_key_value=None,
382
+ output_attentions=False,
383
+ type_ids=None,
384
+ flex_block_mask=None,
385
+ ):
386
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
387
+ self_attention_outputs = self.attention(
388
+ hidden_states,
389
+ attention_mask,
390
+ head_mask,
391
+ output_attentions=output_attentions,
392
+ past_key_value=self_attn_past_key_value,
393
+ type_ids=type_ids,
394
+ flex_block_mask=flex_block_mask,
395
+ )
396
+ attention_output = self_attention_outputs[0]
397
+
398
+ if self.is_decoder:
399
+ outputs = self_attention_outputs[1:-1]
400
+ present_key_value = self_attention_outputs[-1]
401
+ else:
402
+ outputs = self_attention_outputs[1:]
403
+
404
+ if self.is_decoder and encoder_hidden_states is not None:
405
+ if self.add_cross_attention is False:
406
+ raise AttributeError(
407
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention "
408
+ "layers by setting `config.add_cross_attention=True`"
409
+ )
410
+
411
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
412
+ cross_attention_outputs = self.crossattention(
413
+ attention_output,
414
+ attention_mask,
415
+ head_mask,
416
+ encoder_hidden_states,
417
+ encoder_attention_mask,
418
+ cross_attn_past_key_value,
419
+ output_attentions,
420
+ type_ids=None,
421
+ flex_block_mask=None,
422
+ )
423
+ attention_output = cross_attention_outputs[0]
424
+ outputs = outputs + cross_attention_outputs[1:-1]
425
+ present_key_value = present_key_value + cross_attention_outputs[-1]
426
+
427
+ layer_output = self.feed_forward_chunk(attention_output)
428
+ outputs = (layer_output,) + outputs
429
+
430
+ if self.is_decoder:
431
+ outputs = outputs + (present_key_value,)
432
+ return outputs
433
+
434
+
435
+ class ModifiedEsmEncoder(EsmEncoder):
436
+ def __init__(self, config):
437
+ nn.Module.__init__(self)
438
+ self.config = config
439
+ self.layer = nn.ModuleList([ModifiedEsmLayer(config) for _ in range(config.num_hidden_layers)])
440
+ self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
441
+ self.gradient_checkpointing = False
442
+
443
+ def forward(
444
+ self,
445
+ hidden_states,
446
+ attention_mask=None,
447
+ head_mask=None,
448
+ encoder_hidden_states=None,
449
+ encoder_attention_mask=None,
450
+ past_key_values=None,
451
+ use_cache=None,
452
+ output_attentions=False,
453
+ output_hidden_states=False,
454
+ return_dict=True,
455
+ type_ids=None,
456
+ flex_block_mask=None,
457
+ ):
458
+ all_hidden_states = () if output_hidden_states else None
459
+ all_self_attentions = () if output_attentions else None
460
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
461
+ next_decoder_cache = () if use_cache else None
462
+
463
+ for i, layer_module in enumerate(self.layer):
464
+ if output_hidden_states:
465
+ all_hidden_states = all_hidden_states + (hidden_states,)
466
+
467
+ layer_head_mask = head_mask[i] if head_mask is not None else None
468
+ past_key_value = past_key_values[i] if past_key_values is not None else None
469
+
470
+ if self.gradient_checkpointing and self.training:
471
+ layer_outputs = self._gradient_checkpointing_func(
472
+ layer_module.__call__,
473
+ hidden_states,
474
+ attention_mask,
475
+ layer_head_mask,
476
+ encoder_hidden_states,
477
+ encoder_attention_mask,
478
+ past_key_value,
479
+ output_attentions,
480
+ type_ids,
481
+ flex_block_mask,
482
+ )
483
+ else:
484
+ layer_outputs = layer_module(
485
+ hidden_states,
486
+ attention_mask,
487
+ layer_head_mask,
488
+ encoder_hidden_states,
489
+ encoder_attention_mask,
490
+ past_key_value,
491
+ output_attentions,
492
+ type_ids,
493
+ flex_block_mask,
494
+ )
495
+
496
+ hidden_states = layer_outputs[0]
497
+ if use_cache:
498
+ next_decoder_cache = next_decoder_cache + (layer_outputs[-1],)
499
+ if output_attentions:
500
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
501
+ if self.config.add_cross_attention:
502
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
503
+
504
+ if self.emb_layer_norm_after:
505
+ hidden_states = self.emb_layer_norm_after(hidden_states)
506
+
507
+ if output_hidden_states:
508
+ all_hidden_states = all_hidden_states + (hidden_states,)
509
+
510
+ if return_dict is False:
511
+ return tuple(
512
+ value
513
+ for value in [
514
+ hidden_states,
515
+ next_decoder_cache,
516
+ all_hidden_states,
517
+ all_self_attentions,
518
+ all_cross_attentions,
519
+ ]
520
+ if value is not None
521
+ )
522
+
523
+ return BaseModelOutputWithPastAndCrossAttentions(
524
+ last_hidden_state=hidden_states,
525
+ past_key_values=next_decoder_cache,
526
+ hidden_states=all_hidden_states,
527
+ attentions=all_self_attentions,
528
+ cross_attentions=all_cross_attentions,
529
+ )
530
+
531
+
532
+ class DPLM2Model(DPLM2PreTrainedModel, EmbeddingMixin):
533
+ config_class = DPLM2Config
534
+
535
+ def __init__(self, config, add_pooling_layer=True):
536
+ DPLM2PreTrainedModel.__init__(self, config)
537
+ self.config = config
538
+ self.embeddings = EsmEmbeddings(config)
539
+ self.encoder = ModifiedEsmEncoder(config)
540
+ self.pooler = EsmPooler(config) if add_pooling_layer else None
541
+ self.post_init()
542
+
543
+ def _convert_head_mask_to_5d(self, head_mask: torch.Tensor, num_hidden_layers: int) -> torch.Tensor:
544
+ if head_mask.dim() == 1:
545
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
546
+ head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
547
+ elif head_mask.dim() == 2:
548
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
549
+ assert head_mask.dim() == 5, f"head_mask.dim != 5, got {head_mask.dim()}"
550
+ head_mask = head_mask.to(dtype=self.dtype)
551
+ return head_mask
552
+
553
+ def get_head_mask(
554
+ self,
555
+ head_mask: Optional[torch.Tensor],
556
+ num_hidden_layers: int,
557
+ is_attention_chunked: bool = False,
558
+ ) -> Union[torch.Tensor, List[None]]:
559
+ if head_mask is None:
560
+ return [None] * num_hidden_layers
561
+ head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
562
+ if is_attention_chunked:
563
+ head_mask = head_mask.unsqueeze(-1)
564
+ return head_mask
565
+
566
+ def set_input_embeddings(self, value):
567
+ self.embeddings.word_embeddings = value
568
+
569
+ def _embed(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
570
+ if attention_mask is None:
571
+ attention_mask = input_ids.ne(self.config.pad_token_id)
572
+ type_ids = _infer_modality_type(input_ids, attention_mask)
573
+ outputs = self(
574
+ input_ids=input_ids,
575
+ attention_mask=attention_mask,
576
+ type_ids=type_ids,
577
+ output_hidden_states=False,
578
+ output_attentions=False,
579
+ return_dict=True,
580
+ )
581
+ return outputs.last_hidden_state
582
+
583
+ def forward(
584
+ self,
585
+ input_ids: Optional[torch.Tensor] = None,
586
+ attention_mask: Optional[torch.Tensor] = None,
587
+ position_ids: Optional[torch.Tensor] = None,
588
+ head_mask: Optional[torch.Tensor] = None,
589
+ inputs_embeds: Optional[torch.Tensor] = None,
590
+ encoder_hidden_states: Optional[torch.Tensor] = None,
591
+ encoder_attention_mask: Optional[torch.Tensor] = None,
592
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
593
+ use_cache: Optional[bool] = None,
594
+ output_attentions: Optional[bool] = None,
595
+ output_hidden_states: Optional[bool] = None,
596
+ return_dict: Optional[bool] = None,
597
+ type_ids: Optional[torch.Tensor] = None,
598
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
599
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
600
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
601
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
602
+
603
+ if self.config.is_decoder:
604
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
605
+ else:
606
+ use_cache = False
607
+
608
+ if input_ids is not None and inputs_embeds is not None:
609
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
610
+ if input_ids is not None:
611
+ input_shape = input_ids.size()
612
+ elif inputs_embeds is not None:
613
+ input_shape = inputs_embeds.size()[:-1]
614
+ else:
615
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
616
+
617
+ batch_size, seq_length = input_shape
618
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
619
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
620
+
621
+ if attention_mask is None:
622
+ attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
623
+
624
+ token_attention_mask = None
625
+ if attention_mask.dim() == 2:
626
+ token_attention_mask = attention_mask.bool()
627
+ if self.config.attn_backend == "flex" and output_attentions is False:
628
+ extended_attention_mask = None
629
+ else:
630
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
631
+ elif attention_mask.dim() == 4:
632
+ if self.config.attn_backend == "flex" and output_attentions is False:
633
+ extended_attention_mask = None
634
+ else:
635
+ extended_attention_mask = attention_mask
636
+ if input_ids is not None:
637
+ token_attention_mask = input_ids.ne(self.config.pad_token_id)
638
+ else:
639
+ raise ValueError(f"Unsupported attention_mask shape: {attention_mask.shape}")
640
+
641
+ if self.config.is_decoder and encoder_hidden_states is not None:
642
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
643
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
644
+ if encoder_attention_mask is None:
645
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
646
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
647
+ else:
648
+ encoder_extended_attention_mask = encoder_attention_mask
649
+
650
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
651
+
652
+ embedding_attention_mask = token_attention_mask
653
+ if embedding_attention_mask is None and input_ids is not None:
654
+ embedding_attention_mask = input_ids.ne(self.config.pad_token_id)
655
+
656
+ flex_block_mask = None
657
+ if (
658
+ self.config.attn_backend == "flex"
659
+ and token_attention_mask is not None
660
+ and output_attentions is False
661
+ ):
662
+ assert create_block_mask is not None, (
663
+ "Flex attention backend requested but torch.create_block_mask is unavailable."
664
+ )
665
+ flex_block_mask = _create_pad_block_mask(token_attention_mask)
666
+
667
+ embedding_output = self.embeddings(
668
+ input_ids=input_ids,
669
+ position_ids=position_ids,
670
+ attention_mask=embedding_attention_mask,
671
+ inputs_embeds=inputs_embeds,
672
+ )
673
+ encoder_outputs = self.encoder(
674
+ embedding_output,
675
+ attention_mask=extended_attention_mask,
676
+ head_mask=head_mask,
677
+ encoder_hidden_states=encoder_hidden_states,
678
+ encoder_attention_mask=encoder_extended_attention_mask,
679
+ past_key_values=past_key_values,
680
+ use_cache=use_cache,
681
+ output_attentions=output_attentions,
682
+ output_hidden_states=output_hidden_states,
683
+ return_dict=return_dict,
684
+ type_ids=type_ids,
685
+ flex_block_mask=flex_block_mask,
686
+ )
687
+ sequence_output = encoder_outputs[0]
688
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
689
+
690
+ if return_dict is False:
691
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
692
+
693
+ return BaseModelOutputWithPoolingAndCrossAttentions(
694
+ last_hidden_state=sequence_output,
695
+ pooler_output=pooled_output,
696
+ past_key_values=None,
697
+ hidden_states=encoder_outputs.hidden_states,
698
+ attentions=encoder_outputs.attentions,
699
+ cross_attentions=encoder_outputs.cross_attentions,
700
+ )
701
+
702
+
703
+ class DPLM2ForMaskedLM(DPLM2PreTrainedModel, EmbeddingMixin):
704
+ config_class = DPLM2Config
705
+
706
+ def __init__(self, config, dropout: float = 0.1, vocab_size: Optional[int] = None):
707
+ config.hidden_dropout_prob = dropout
708
+ config.tie_word_embeddings = False
709
+ if vocab_size is not None:
710
+ config.vocab_size = vocab_size
711
+ DPLM2PreTrainedModel.__init__(self, config)
712
+ self.esm = DPLM2Model(config, add_pooling_layer=False)
713
+ self.lm_head = EsmLMHead(config)
714
+ self.loss_fct = nn.CrossEntropyLoss()
715
+ self.post_init()
716
+ self.pad_id = config.pad_token_id
717
+
718
+ def get_output_embeddings(self):
719
+ return self.lm_head.decoder
720
+
721
+ def set_output_embeddings(self, new_embeddings):
722
+ self.lm_head.decoder = new_embeddings
723
+
724
+ def _get_modality_type(self, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
725
+ return _infer_modality_type(input_ids, attention_mask)
726
+
727
+ def _embed(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
728
+ if attention_mask is None:
729
+ attention_mask = input_ids.ne(self.pad_id)
730
+ type_ids = self._get_modality_type(input_ids, attention_mask)
731
+ outputs = self.esm(
732
+ input_ids=input_ids,
733
+ attention_mask=attention_mask,
734
+ type_ids=type_ids,
735
+ output_attentions=False,
736
+ output_hidden_states=False,
737
+ return_dict=True,
738
+ )
739
+ return outputs.last_hidden_state
740
+
741
+ def forward(
742
+ self,
743
+ input_ids: Optional[torch.Tensor] = None,
744
+ attention_mask: Optional[torch.Tensor] = None,
745
+ type_ids: Optional[torch.Tensor] = None,
746
+ inputs_embeds: Optional[torch.Tensor] = None,
747
+ decoder_input_ids: Optional[torch.Tensor] = None,
748
+ decoder_attention_mask: Optional[torch.Tensor] = None,
749
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
750
+ labels: Optional[torch.Tensor] = None,
751
+ output_attentions: Optional[bool] = None,
752
+ output_hidden_states: Optional[bool] = None,
753
+ return_dict: Optional[bool] = None,
754
+ encoder_hidden_states: Optional[torch.Tensor] = None,
755
+ encoder_attention_mask: Optional[torch.Tensor] = None,
756
+ ) -> Union[Tuple[torch.Tensor], DPLM2MaskedLMOutput]:
757
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
758
+
759
+ if attention_mask is None:
760
+ assert input_ids is not None
761
+ attention_mask = input_ids.ne(self.pad_id)
762
+
763
+ if type_ids is None:
764
+ assert input_ids is not None
765
+ type_ids = self._get_modality_type(input_ids, attention_mask)
766
+
767
+ outputs = self.esm(
768
+ input_ids=input_ids,
769
+ inputs_embeds=inputs_embeds,
770
+ attention_mask=attention_mask,
771
+ encoder_hidden_states=encoder_hidden_states,
772
+ encoder_attention_mask=encoder_attention_mask,
773
+ output_attentions=output_attentions,
774
+ output_hidden_states=output_hidden_states,
775
+ return_dict=True,
776
+ type_ids=type_ids,
777
+ )
778
+
779
+ sequence_output = outputs.last_hidden_state
780
+ logits = self.lm_head(sequence_output)
781
+ loss = None
782
+ if labels is not None:
783
+ labels = labels.to(logits.device)
784
+ loss = self.loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
785
+
786
+ if return_dict is False:
787
+ output = (logits, sequence_output, outputs.hidden_states, outputs.attentions)
788
+ if loss is not None:
789
+ return (loss,) + output
790
+ return output
791
+
792
+ return DPLM2MaskedLMOutput(
793
+ loss=loss,
794
+ logits=logits,
795
+ last_hidden_state=sequence_output,
796
+ hidden_states=outputs.hidden_states,
797
+ attentions=outputs.attentions,
798
+ )
799
+
800
+
801
+ class DPLM2ForSequenceClassification(DPLM2PreTrainedModel, EmbeddingMixin):
802
+ config_class = DPLM2Config
803
+
804
+ def __init__(self, config):
805
+ DPLM2PreTrainedModel.__init__(self, config)
806
+ self.num_labels = config.num_labels
807
+ self.esm = DPLM2Model(config, add_pooling_layer=False)
808
+ self.classifier = EsmClassificationHead(config)
809
+ self.mse = nn.MSELoss()
810
+ self.ce = nn.CrossEntropyLoss()
811
+ self.bce = nn.BCEWithLogitsLoss()
812
+ self.post_init()
813
+
814
+ def _embed(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
815
+ return self.esm._embed(input_ids, attention_mask)
816
+
817
+ def forward(
818
+ self,
819
+ input_ids: Optional[torch.Tensor] = None,
820
+ attention_mask: Optional[torch.Tensor] = None,
821
+ type_ids: Optional[torch.Tensor] = None,
822
+ inputs_embeds: Optional[torch.Tensor] = None,
823
+ labels: Optional[torch.Tensor] = None,
824
+ output_attentions: Optional[bool] = None,
825
+ output_hidden_states: Optional[bool] = None,
826
+ return_dict: Optional[bool] = None,
827
+ **kwargs,
828
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
829
+ if type_ids is None and input_ids is not None:
830
+ if attention_mask is None:
831
+ attention_mask = input_ids.ne(self.config.pad_token_id)
832
+ type_ids = _infer_modality_type(input_ids, attention_mask)
833
+
834
+ outputs = self.esm(
835
+ input_ids=input_ids,
836
+ attention_mask=attention_mask,
837
+ type_ids=type_ids,
838
+ inputs_embeds=inputs_embeds,
839
+ output_attentions=output_attentions,
840
+ output_hidden_states=output_hidden_states,
841
+ return_dict=True,
842
+ )
843
+ sequence_output = outputs.last_hidden_state
844
+ logits = self.classifier(sequence_output)
845
+
846
+ loss = None
847
+ if labels is not None:
848
+ labels = labels.to(logits.device)
849
+ if self.config.problem_type is None:
850
+ if self.num_labels == 1:
851
+ self.config.problem_type = "regression"
852
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
853
+ self.config.problem_type = "single_label_classification"
854
+ else:
855
+ self.config.problem_type = "multi_label_classification"
856
+
857
+ if self.config.problem_type == "regression":
858
+ if self.num_labels == 1:
859
+ loss = self.mse(logits.squeeze(), labels.squeeze())
860
+ else:
861
+ loss = self.mse(logits, labels)
862
+ elif self.config.problem_type == "single_label_classification":
863
+ loss = self.ce(logits.view(-1, self.num_labels), labels.view(-1))
864
+ elif self.config.problem_type == "multi_label_classification":
865
+ loss = self.bce(logits, labels)
866
+
867
+ return SequenceClassifierOutput(
868
+ loss=loss,
869
+ logits=logits,
870
+ hidden_states=outputs.hidden_states,
871
+ attentions=outputs.attentions,
872
+ )
873
+
874
+
875
+ class DPLM2ForTokenClassification(DPLM2PreTrainedModel, EmbeddingMixin):
876
+ config_class = DPLM2Config
877
+
878
+ def __init__(self, config):
879
+ DPLM2PreTrainedModel.__init__(self, config)
880
+ self.num_labels = config.num_labels
881
+ self.esm = DPLM2Model(config, add_pooling_layer=False)
882
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
883
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
884
+ self.loss_fct = nn.CrossEntropyLoss()
885
+ self.post_init()
886
+
887
+ def _embed(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
888
+ return self.esm._embed(input_ids, attention_mask)
889
+
890
+ def forward(
891
+ self,
892
+ input_ids: Optional[torch.Tensor] = None,
893
+ attention_mask: Optional[torch.Tensor] = None,
894
+ type_ids: Optional[torch.Tensor] = None,
895
+ inputs_embeds: Optional[torch.Tensor] = None,
896
+ labels: Optional[torch.Tensor] = None,
897
+ output_attentions: Optional[bool] = None,
898
+ output_hidden_states: Optional[bool] = None,
899
+ return_dict: Optional[bool] = None,
900
+ **kwargs,
901
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
902
+ if type_ids is None and input_ids is not None:
903
+ if attention_mask is None:
904
+ attention_mask = input_ids.ne(self.config.pad_token_id)
905
+ type_ids = _infer_modality_type(input_ids, attention_mask)
906
+
907
+ outputs = self.esm(
908
+ input_ids=input_ids,
909
+ attention_mask=attention_mask,
910
+ type_ids=type_ids,
911
+ inputs_embeds=inputs_embeds,
912
+ output_attentions=output_attentions,
913
+ output_hidden_states=output_hidden_states,
914
+ return_dict=True,
915
+ )
916
+ sequence_output = self.dropout(outputs.last_hidden_state)
917
+ logits = self.classifier(sequence_output)
918
+
919
+ loss = None
920
+ if labels is not None:
921
+ labels = labels.to(logits.device)
922
+ loss = self.loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
923
+
924
+ return TokenClassifierOutput(
925
+ loss=loss,
926
+ logits=logits,
927
+ hidden_states=outputs.hidden_states,
928
+ attentions=outputs.attentions,
929
+ )