File size: 6,083 Bytes
c03cbed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
from typing import Unpack
import torch
from transformers import (
    DataCollatorWithFlattening,
    ModernBertModel,
    ModernBertConfig,
    ModernBertForMaskedLM,
    ModernBertForSequenceClassification,
    ModernBertForTokenClassification,
    ModernBertForQuestionAnswering,
    ModernBertForMultipleChoice
)
from transformers.masking_utils import create_bidirectional_mask, create_bidirectional_sliding_window_mask
from transformers.modeling_outputs import BaseModelOutput
from transformers.utils import TransformersKwargs


def _unpad_input(input_ids: torch.Tensor, attention_mask: torch.Tensor):
    collator = DataCollatorWithFlattening(return_flash_attn_kwargs=True)
    features = collator([{"input_ids": i[a.bool()].tolist()} for i, a in zip(input_ids, attention_mask)])
    return features


def _pad_output(inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int,) -> torch.Tensor:
    if inputs.dim() == 3:
        inputs = inputs.squeeze()
    if inputs.dim() == 1:
        output = torch.zeros(batch * seqlen, dtype=inputs.dtype, device=inputs.device)
        output[indices] = inputs
        padded_inputs = output.view(batch, seqlen)
    else:
        _, *rest = inputs.shape
        output = torch.zeros(batch * seqlen, *rest, dtype=inputs.dtype, device=inputs.device)
        output[indices] = inputs
        padded_inputs = output.view(batch, seqlen, *rest)
    return padded_inputs


class UnpadModernBertModel(ModernBertModel):

    def __init__(self, config: ModernBertConfig):
        super().__init__(config)

    def forward(

        self,

        input_ids: torch.LongTensor | None = None,

        attention_mask: torch.Tensor | None = None,

        position_ids: torch.LongTensor | None = None,

        inputs_embeds: torch.Tensor | None = None,

        **kwargs: Unpack[TransformersKwargs],

    ) -> BaseModelOutput:
        if (input_ids is None) ^ (inputs_embeds is not None):
            raise ValueError("You must specify exactly one of input_ids or inputs_embeds")

        seq_len = inputs_embeds.shape[1] if inputs_embeds is not None else input_ids.shape[1]
        batch_size = inputs_embeds.shape[0] if inputs_embeds is not None else input_ids.shape[0]
        device = input_ids.device if input_ids is not None else inputs_embeds.device

        indices = None
        if self.config._attn_implementation.startswith("flash_attention"):
            if input_ids is None or attention_mask is None:
                raise ValueError("Unpadding requires both input_ids and attention_mask")
            with torch.no_grad():
                indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
                features = _unpad_input(input_ids, attention_mask)
                input_ids = features["input_ids"].to(device=device)
                position_ids = features["position_ids"].to(device=device)
                attention_mask = None
                kwargs["cu_seq_lens_k"] = features["cu_seq_lens_k"].to(device=device)
                kwargs["cu_seq_lens_q"] = features["cu_seq_lens_q"].to(device=device)
                kwargs["max_length_k"] = features["max_length_k"]
                kwargs["max_length_q"] = features["max_length_q"]

        if position_ids is None:
            position_ids = torch.arange(seq_len, device=device).unsqueeze(0)

        hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds)

        if not isinstance(attention_mask_mapping := attention_mask, dict):
            mask_kwargs = {
                "config": self.config,
                "inputs_embeds": hidden_states,
                "attention_mask": attention_mask,
            }
            attention_mask_mapping = {
                "full_attention": create_bidirectional_mask(**mask_kwargs),
                "sliding_attention": create_bidirectional_sliding_window_mask(**mask_kwargs),
            }

        position_embeddings = {}
        for layer_type in self.config.layer_types:
            position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type)

        for encoder_layer in self.layers:
            hidden_states = encoder_layer(
                hidden_states,
                attention_mask=attention_mask_mapping[encoder_layer.attention_type],
                position_embeddings=position_embeddings[encoder_layer.attention_type],
                **kwargs,
            )

        hidden_states = self.final_norm(hidden_states)
        if self.config._attn_implementation.startswith("flash_attention"):
            hidden_states = _pad_output(
                inputs=hidden_states, indices=indices, batch=batch_size, seqlen=seq_len
            )

        return BaseModelOutput(last_hidden_state=hidden_states)


class UnpadModernBertForMaskedLM(ModernBertForMaskedLM):

    def __init__(self, config):
        super().__init__(config)
        self.model = UnpadModernBertModel(config)
        self.post_init()


class UnpadModernBertForSequenceClassification(ModernBertForSequenceClassification):

    def __init__(self, config):
        super().__init__(config)
        self.model = UnpadModernBertModel(config)
        self.post_init()


class UnpadModernBertForTokenClassification(ModernBertForTokenClassification):

    def __init__(self, config):
        super().__init__(config)
        self.model = UnpadModernBertModel(config)
        self.post_init()


class UnpadModernBertForQuestionAnswering(ModernBertForQuestionAnswering):

    def __init__(self, config):
        super().__init__(config)
        self.model = UnpadModernBertModel(config)
        self.post_init()


class UnpadModernBertForMultipleChoice(ModernBertForMultipleChoice):

    def __init__(self, config):
        super().__init__(config)
        self.model = UnpadModernBertModel(config)
        self.post_init()


def enable_modernbert_unpadding():
    ModernBertModel.forward = UnpadModernBertModel.forward