PeteBleackley commited on
Commit
6a14d00
·
verified ·
1 Parent(s): bc73af9

Upload DisamBert

Browse files
Files changed (4) hide show
  1. DisamBert.py +241 -0
  2. README.md +199 -0
  3. config.json +0 -0
  4. model.safetensors +3 -0
DisamBert.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Generator, Iterable
2
+ from dataclasses import dataclass
3
+ from enum import StrEnum
4
+ from itertools import chain
5
+
6
+ import numpy as np
7
+ import pandas as pd
8
+ import torch
9
+ import torch.nn as nn
10
+ from transformers import (
11
+ AutoConfig,
12
+ AutoModel,
13
+ AutoTokenizer,
14
+ ModernBertModel,
15
+ PreTrainedConfig,
16
+ PreTrainedModel,
17
+ )
18
+ from transformers.modeling_outputs import TokenClassifierOutput
19
+
20
+ BATCH_SIZE = 16
21
+
22
+
23
+ class ModelURI(StrEnum):
24
+ BASE = "answerdotai/ModernBERT-base"
25
+ LARGE = "answerdotai/ModernBERT-large"
26
+
27
+
28
+ @dataclass(slots=True, frozen=True)
29
+ class LexicalExample:
30
+ concept: str
31
+ definition: str
32
+
33
+
34
+ @dataclass(slots=True, frozen=True)
35
+ class PaddedBatch:
36
+ input_ids: torch.Tensor
37
+ attention_mask: torch.Tensor
38
+
39
+
40
+ class DisamBert(PreTrainedModel):
41
+ def __init__(self, config: PreTrainedConfig):
42
+ super().__init__(config)
43
+ if config.init_basemodel:
44
+ self.BaseModel = AutoModel.from_pretrained(config.name_or_path, device_map="auto")
45
+ self.classifier_head = nn.UninitializedParameter()
46
+ self.bias = nn.UninitializedParameter()
47
+ self.__entities = None
48
+ else:
49
+ self.BaseModel = ModernBertModel(config)
50
+ self.classifier_head = nn.Parameter(
51
+ torch.empty((config.ontology_size, config.hidden_size))
52
+ )
53
+ self.bias = nn.Parameter(torch.empty((config.ontology_size, 1)))
54
+ self.__entities = pd.Series(config.entities)
55
+ config.init_basemodel = False
56
+ self.tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_path)
57
+ self.loss = nn.CrossEntropyLoss()
58
+ self.post_init()
59
+
60
+ @classmethod
61
+ def from_base(cls, base_id: ModelURI):
62
+ config = AutoConfig.from_pretrained(base_id)
63
+ config.init_basemodel = True
64
+ config.tokenizer_path = base_id
65
+ return cls(config)
66
+
67
+ def init_classifier(self, entities: Generator[LexicalExample]) -> None:
68
+ entity_ids = []
69
+ vectors = []
70
+ batch = []
71
+ n = 0
72
+ with self.BaseModel.device:
73
+ torch.cuda.empty_cache()
74
+ for entity in entities:
75
+ entity_ids.append(entity.concept)
76
+ batch.append(entity.definition)
77
+
78
+ n += 1
79
+ if n == BATCH_SIZE:
80
+ tokens = self.tokenizer(batch, padding=True, return_tensors="pt")
81
+ encoding = self.BaseModel(tokens["input_ids"], tokens["attention_mask"])
82
+ vectors.append(encoding.last_hidden_state.detach()[:, 0])
83
+ n = 0
84
+ batch = []
85
+ if n > 0:
86
+ tokens = self.tokenizer(batch, padding=True, return_tensors="pt")
87
+ encoding = self.BaseModel(tokens["input_ids"], tokens["attention_mask"])
88
+ vectors.append(encoding.last_hidden_state.detach()[:, 0])
89
+
90
+ self.__entities = pd.Series(entity_ids)
91
+ self.config.entities = entity_ids
92
+ self.config.ontology_size = len(entity_ids)
93
+ self.classifier_head = nn.Parameter(torch.cat(vectors, dim=0))
94
+ self.bias = nn.Parameter(
95
+ torch.nn.init.normal_(
96
+ torch.empty((self.config.ontology_size, 1)),
97
+ std=self.classifier_head.std().item() * np.sqrt(self.config.hidden_size)
98
+ )
99
+ )
100
+
101
+ @property
102
+ def entities(self) -> pd.Series:
103
+ if self.__entities is None and hasattr(self.config, "entities"):
104
+ self.__entities = pd.Series(self.config.entities)
105
+ return self.__entities
106
+
107
+ def forward(
108
+ self,
109
+ input_ids: torch.Tensor,
110
+ attention_mask: torch.Tensor,
111
+ lengths: list[list[int]],
112
+ candidates: list[list[list[int]]],
113
+ labels: Iterable[list[int]] | None = None,
114
+ output_hidden_states: bool = False,
115
+ output_attentions: bool = False,
116
+ ) -> TokenClassifierOutput:
117
+ assert not nn.parameter.is_lazy(self.classifier_head), (
118
+ "Run init_classifier to initialise weights"
119
+ )
120
+ base_model_output = self.BaseModel(
121
+ input_ids,
122
+ attention_mask,
123
+ output_hidden_states=output_hidden_states,
124
+ output_attentions=output_attentions,
125
+ )
126
+ token_vectors = base_model_output.last_hidden_state
127
+ span_vectors = torch.cat(
128
+ [
129
+ torch.vstack(
130
+ [
131
+ torch.sum(chunk, dim=0)
132
+ for chunk in self.split(token_vectors[i], sentence_indices)
133
+ ]
134
+ )
135
+ for (i, sentence_indices) in enumerate(lengths)
136
+ ]
137
+ )
138
+ logits = torch.einsum("ij,kj->ki", span_vectors, self.classifier_head) + self.bias
139
+ logits1 = logits - logits.min()
140
+ mask = torch.zeros_like(logits)
141
+ for i, concepts in enumerate(chain.from_iterable(candidates)):
142
+ mask[concepts, i] = torch.tensor(1.0)
143
+ logits2 = logits1 * mask
144
+ sentence_lengths = [len(sentence_indices) for sentence_indices in lengths]
145
+ maxlen = max(sentence_lengths)
146
+ split_logits = torch.split(logits2, sentence_lengths, dim=1)
147
+ logits3 = torch.stack(
148
+ [
149
+ self.extend_to_max_length(sentence, length, maxlen)
150
+ for (sentence, length) in zip(split_logits, sentence_lengths, strict=True)
151
+ ]
152
+ )
153
+ return TokenClassifierOutput(
154
+ logits=logits3,
155
+ loss=self.loss(logits3, labels) if labels is not None else None,
156
+ hidden_states=base_model_output.hidden_states if output_hidden_states else None,
157
+ attentions=base_model_output.attentions if output_attentions else None,
158
+ )
159
+
160
+ def split(self, vectors: torch.Tensor, lengths: list[int]) -> tuple[torch.Tensor, ...]:
161
+ maxlen = vectors.shape[0]
162
+ total_length = sum(lengths)
163
+ is_padded = total_length < maxlen
164
+ chunks = vectors.split((lengths + [maxlen - total_length]) if is_padded else lengths)
165
+ return chunks[:-1] if is_padded else chunks
166
+
167
+ def pad(self, tokens: Iterable[list[int]]) -> PaddedBatch:
168
+ lengths = [len(sentence) for sentence in tokens]
169
+ maxlen = max(lengths)
170
+ input_ids = torch.tensor(
171
+ [
172
+ sentence + [self.config.pad_token_id] * (maxlen - length)
173
+ for (sentence, length) in zip(tokens, lengths, strict=True)
174
+ ]
175
+ )
176
+ attention_mask = torch.vstack(
177
+ [torch.cat((torch.ones(length), torch.zeros(maxlen - length))) for length in lengths]
178
+ )
179
+ return PaddedBatch(input_ids, attention_mask)
180
+
181
+ def extend_to_max_length(
182
+ self, sentence: torch.Tensor, length: int, maxlength: int
183
+ ) -> torch.Tensor:
184
+ with self.BaseModel.device:
185
+ return (
186
+ torch.cat(
187
+ [
188
+ sentence,
189
+ torch.zeros((self.config.ontology_size, maxlength - length)),
190
+ ],
191
+ dim=1,
192
+ )
193
+ if length < maxlength
194
+ else sentence
195
+ )
196
+
197
+ def pad_labels(self, labels: list[list[int]]) -> torch.Tensor:
198
+ unk = len(self.config.entities) - 1
199
+ lengths = [len(seq) for seq in labels]
200
+ maxlen = max(lengths)
201
+ with self.BaseModel.device:
202
+ return torch.tensor(
203
+ [
204
+ seq + [unk] * (maxlen - length)
205
+ for (seq, length) in zip(labels, lengths, strict=True)
206
+ ]
207
+ )
208
+
209
+ def tokenize(
210
+ self, batch: list[dict[str, str | list[int]]]
211
+ ) -> dict[str, torch.Tensor | list[list[int]]]:
212
+ all_indices = []
213
+ all_tokens = []
214
+ with self.BaseModel.device:
215
+ for example in batch:
216
+ text = example["text"]
217
+ span_indices = example["indices"]
218
+ indices = []
219
+ tokens = []
220
+ last_span = len(span_indices) - 2
221
+ for i, position in enumerate(span_indices[:-1]):
222
+ span = text[position : span_indices[i + 1]]
223
+ span_tokens = self.tokenizer([span], padding=False)["input_ids"][0]
224
+ if i > 0:
225
+ span_tokens = span_tokens[1:]
226
+ if i < last_span:
227
+ span_tokens = span_tokens[:-1]
228
+ indices.append(len(span_tokens))
229
+ tokens.extend(span_tokens)
230
+ all_indices.append(indices)
231
+ all_tokens.append(tokens)
232
+ padded = self.pad(all_tokens)
233
+ result = {
234
+ "input_ids": padded.input_ids,
235
+ "attention_mask": padded.attention_mask,
236
+ "lengths": all_indices,
237
+ "candidates": [example["candidates"] for example in batch],
238
+ }
239
+ if "labels" in batch[0]:
240
+ result["labels"] = self.pad_labels([example["labels"] for example in batch])
241
+ return result
README.md ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags: []
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+ This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
config.json ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7edcdd4e95aadaacfd31b3043d3143265bd78d87109af73839496784b3594f7
3
+ size 2061551568