lhallee commited on
Commit
66e8426
·
verified ·
1 Parent(s): 9fe5906

Upload embedding_mixin.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. embedding_mixin.py +370 -0
embedding_mixin.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sqlite3
3
+ import networkx as nx
4
+ import numpy as np
5
+ import torch
6
+ from tqdm.auto import tqdm
7
+ from typing import Callable, List, Optional
8
+ from torch.utils.data import DataLoader
9
+ from torch.utils.data import Dataset as TorchDataset
10
+ from transformers import PreTrainedTokenizerBase
11
+
12
+
13
+ class Pooler:
14
+ def __init__(self, pooling_types: List[str]):
15
+ self.pooling_types = pooling_types
16
+ self.pooling_options = {
17
+ 'mean': self.mean_pooling,
18
+ 'max': self.max_pooling,
19
+ 'norm': self.norm_pooling,
20
+ 'median': self.median_pooling,
21
+ 'std': self.std_pooling,
22
+ 'var': self.var_pooling,
23
+ 'cls': self.cls_pooling,
24
+ 'parti': self._pool_parti,
25
+ }
26
+
27
+ def _create_pooled_matrices_across_layers(self, attentions: torch.Tensor) -> torch.Tensor:
28
+ maxed_attentions = torch.max(attentions, dim=1)[0]
29
+ return maxed_attentions
30
+
31
+ def _page_rank(self, attention_matrix, personalization=None, nstart=None, prune_type="top_k_outdegree"):
32
+ # Run PageRank on the attention matrix converted to a graph.
33
+ # Raises exceptions if the graph doesn't match the token sequence or has no edges.
34
+ # Returns the PageRank scores for each token node.
35
+ G = self._convert_to_graph(attention_matrix)
36
+ if G.number_of_nodes() != attention_matrix.shape[0]:
37
+ raise Exception(
38
+ f"The number of nodes in the graph should be equal to the number of tokens in sequence! You have {G.number_of_nodes()} nodes for {attention_matrix.shape[0]} tokens.")
39
+ if G.number_of_edges() == 0:
40
+ raise Exception(f"You don't seem to have any attention edges left in the graph.")
41
+
42
+ return nx.pagerank(G, alpha=0.85, tol=1e-06, weight='weight', personalization=personalization, nstart=nstart, max_iter=100)
43
+
44
+ def _convert_to_graph(self, matrix):
45
+ # Convert a matrix (e.g., attention scores) to a directed graph using networkx.
46
+ # Each element in the matrix represents a directed edge with a weight.
47
+ G = nx.from_numpy_array(matrix, create_using=nx.DiGraph)
48
+ return G
49
+
50
+ def _calculate_importance_weights(self, dict_importance, attention_mask: Optional[torch.Tensor] = None):
51
+ # Remove keys where attention_mask is 0
52
+ if attention_mask is not None:
53
+ for k in list(dict_importance.keys()):
54
+ if attention_mask[k] == 0:
55
+ del dict_importance[k]
56
+
57
+ #dict_importance[0] # remove cls
58
+ #dict_importance[-1] # remove eos
59
+ total = sum(dict_importance.values())
60
+ return np.array([v / total for _, v in dict_importance.items()])
61
+
62
+ def _pool_parti(self, emb: torch.Tensor, attentions: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
63
+ maxed_attentions = self._create_pooled_matrices_across_layers(attentions).numpy()
64
+ # emb is (b, L, d), maxed_attentions is (b, L, L)
65
+ emb_pooled = []
66
+ for e, a, mask in zip(emb, maxed_attentions, attention_mask):
67
+ dict_importance = self._page_rank(a)
68
+ importance_weights = self._calculate_importance_weights(dict_importance, mask)
69
+ num_tokens = int(mask.sum().item())
70
+ emb_pooled.append(np.average(e[:num_tokens], weights=importance_weights, axis=0))
71
+ pooled = torch.tensor(np.array(emb_pooled))
72
+ return pooled
73
+
74
+ def mean_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
75
+ if attention_mask is None:
76
+ return emb.mean(dim=1)
77
+ else:
78
+ attention_mask = attention_mask.unsqueeze(-1)
79
+ return (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
80
+
81
+ def max_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
82
+ if attention_mask is None:
83
+ return emb.max(dim=1).values
84
+ else:
85
+ attention_mask = attention_mask.unsqueeze(-1)
86
+ return (emb * attention_mask).max(dim=1).values
87
+
88
+ def norm_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
89
+ if attention_mask is None:
90
+ return emb.norm(dim=1, p=2)
91
+ else:
92
+ attention_mask = attention_mask.unsqueeze(-1)
93
+ return (emb * attention_mask).norm(dim=1, p=2)
94
+
95
+ def median_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
96
+ if attention_mask is None:
97
+ return emb.median(dim=1).values
98
+ else:
99
+ attention_mask = attention_mask.unsqueeze(-1)
100
+ return (emb * attention_mask).median(dim=1).values
101
+
102
+ def std_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
103
+ if attention_mask is None:
104
+ return emb.std(dim=1)
105
+ else:
106
+ # Compute variance correctly over non-masked positions, then take sqrt
107
+ var = self.var_pooling(emb, attention_mask, **kwargs)
108
+ return torch.sqrt(var)
109
+
110
+ def var_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
111
+ if attention_mask is None:
112
+ return emb.var(dim=1)
113
+ else:
114
+ # Correctly compute variance over only non-masked positions
115
+ attention_mask = attention_mask.unsqueeze(-1) # (b, L, 1)
116
+ # Compute mean over non-masked positions
117
+ mean = (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) # (b, d)
118
+ mean = mean.unsqueeze(1) # (b, 1, d)
119
+ # Compute squared differences from mean, only over non-masked positions
120
+ squared_diff = (emb - mean) ** 2 # (b, L, d)
121
+ # Sum squared differences over non-masked positions and divide by count
122
+ var = (squared_diff * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) # (b, d)
123
+ return var
124
+
125
+ def cls_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
126
+ return emb[:, 0, :]
127
+
128
+ def __call__(
129
+ self,
130
+ emb: torch.Tensor,
131
+ attention_mask: Optional[torch.Tensor] = None,
132
+ attentions: Optional[torch.Tensor] = None
133
+ ): # [mean, max]
134
+ final_emb = []
135
+ for pooling_type in self.pooling_types:
136
+ final_emb.append(self.pooling_options[pooling_type](emb=emb, attention_mask=attention_mask, attentions=attentions)) # (b, d)
137
+ return torch.cat(final_emb, dim=-1) # (b, n_pooling_types * d)
138
+
139
+
140
+ class ProteinDataset(TorchDataset):
141
+ """Simple dataset for protein sequences."""
142
+ def __init__(self, sequences: list[str]):
143
+ self.sequences = sequences
144
+
145
+ def __len__(self) -> int:
146
+ return len(self.sequences)
147
+
148
+ def __getitem__(self, idx: int) -> str:
149
+ return self.sequences[idx]
150
+
151
+
152
+ def build_collator(tokenizer: PreTrainedTokenizerBase) -> Callable[[list[str]], dict[str, torch.Tensor]]:
153
+ def _collate_fn(sequences: list[str]) -> dict[str, torch.Tensor]:
154
+ return tokenizer(sequences, return_tensors="pt", padding='longest')
155
+ return _collate_fn
156
+
157
+
158
+ class EmbeddingMixin:
159
+ def _embed(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
160
+ raise NotImplementedError
161
+
162
+ @property
163
+ def device(self) -> torch.device:
164
+ """Get the device of the model."""
165
+ return next(self.parameters()).device
166
+
167
+ def _read_sequences_from_db(self, db_path: str) -> set[str]:
168
+ """Read sequences from SQLite database."""
169
+ sequences = []
170
+ with sqlite3.connect(db_path) as conn:
171
+ c = conn.cursor()
172
+ c.execute("SELECT sequence FROM embeddings")
173
+ while True:
174
+ row = c.fetchone()
175
+ if row is None:
176
+ break
177
+ sequences.append(row[0])
178
+ return set(sequences)
179
+
180
+ def _ensure_embeddings_table(self, conn: sqlite3.Connection) -> None:
181
+ cursor = conn.cursor()
182
+ cursor.execute(
183
+ "CREATE TABLE IF NOT EXISTS embeddings ("
184
+ "sequence TEXT PRIMARY KEY, "
185
+ "embedding BLOB NOT NULL, "
186
+ "shape TEXT, "
187
+ "dtype TEXT"
188
+ ")"
189
+ )
190
+ cursor.execute("PRAGMA table_info(embeddings)")
191
+ rows = cursor.fetchall()
192
+ column_names = [row[1] for row in rows]
193
+ if "shape" not in column_names:
194
+ cursor.execute("ALTER TABLE embeddings ADD COLUMN shape TEXT")
195
+ if "dtype" not in column_names:
196
+ cursor.execute("ALTER TABLE embeddings ADD COLUMN dtype TEXT")
197
+ conn.commit()
198
+
199
+ def load_embeddings_from_pth(self, save_path: str) -> dict[str, torch.Tensor]:
200
+ assert os.path.exists(save_path), f"Embedding file does not exist: {save_path}"
201
+ payload = torch.load(save_path, map_location="cpu", weights_only=True)
202
+ assert isinstance(payload, dict), "Expected .pth embeddings file to contain a dictionary."
203
+ for sequence, tensor in payload.items():
204
+ assert isinstance(sequence, str), "Expected embedding dictionary keys to be sequences (str)."
205
+ assert isinstance(tensor, torch.Tensor), "Expected embedding dictionary values to be tensors."
206
+ return payload
207
+
208
+ def load_embeddings_from_db(self, db_path: str, sequences: Optional[List[str]] = None) -> dict[str, torch.Tensor]:
209
+ assert os.path.exists(db_path), f"Embedding database does not exist: {db_path}"
210
+ loaded: dict[str, torch.Tensor] = {}
211
+ with sqlite3.connect(db_path) as conn:
212
+ self._ensure_embeddings_table(conn)
213
+ cursor = conn.cursor()
214
+ if sequences is None:
215
+ cursor.execute("SELECT sequence, embedding, shape, dtype FROM embeddings")
216
+ else:
217
+ if len(sequences) == 0:
218
+ return loaded
219
+ placeholders = ",".join(["?"] * len(sequences))
220
+ cursor.execute(
221
+ f"SELECT sequence, embedding, shape, dtype FROM embeddings WHERE sequence IN ({placeholders})",
222
+ tuple(sequences),
223
+ )
224
+
225
+ rows = cursor.fetchall()
226
+ for row in rows:
227
+ sequence = row[0]
228
+ embedding_bytes = row[1]
229
+ shape_text = row[2]
230
+ dtype_text = row[3]
231
+ assert shape_text is not None, "Missing shape metadata in embeddings table."
232
+ assert dtype_text is not None, "Missing dtype metadata in embeddings table."
233
+ shape_values = [int(value) for value in shape_text.split(",") if len(value) > 0]
234
+ assert len(shape_values) > 0, f"Invalid shape metadata for sequence: {sequence}"
235
+ expected_size = int(np.prod(shape_values))
236
+ np_dtype = np.dtype(dtype_text)
237
+ array = np.frombuffer(embedding_bytes, dtype=np_dtype)
238
+ assert array.size == expected_size, f"Shape mismatch while reading sequence: {sequence}"
239
+ reshaped = array.copy().reshape(tuple(shape_values))
240
+ loaded[sequence] = torch.from_numpy(reshaped)
241
+ return loaded
242
+
243
+ def embed_dataset(
244
+ self,
245
+ sequences: List[str],
246
+ tokenizer: Optional[PreTrainedTokenizerBase] = None,
247
+ batch_size: int = 2,
248
+ max_len: int = 512,
249
+ truncate: bool = True,
250
+ full_embeddings: bool = False,
251
+ embed_dtype: torch.dtype = torch.float32,
252
+ pooling_types: List[str] = ['mean'],
253
+ num_workers: int = 0,
254
+ sql: bool = False,
255
+ save: bool = True,
256
+ sql_db_path: str = 'embeddings.db',
257
+ save_path: str = 'embeddings.pth',
258
+ **kwargs,
259
+ ) -> Optional[dict[str, torch.Tensor]]:
260
+ """
261
+ Embed a dataset of protein sequences.
262
+
263
+ Supports two modes:
264
+ - Tokenizer mode (ESM2/ESM++): provide `tokenizer`, `_embed(input_ids, attention_mask)` is used.
265
+ - Sequence mode (E1): pass `tokenizer=None`, `_embed(sequences, return_attention_mask=True, **kwargs)` is used.
266
+ """
267
+ sequences = list(set([seq[:max_len] if truncate else seq for seq in sequences]))
268
+ sequences = sorted(sequences, key=len, reverse=True)
269
+ hidden_size = self.config.hidden_size
270
+ pooler = Pooler(pooling_types) if not full_embeddings else None
271
+ tokenizer_mode = tokenizer is not None
272
+ if tokenizer_mode:
273
+ collate_fn = build_collator(tokenizer)
274
+ device = self.device
275
+ else:
276
+ collate_fn = None
277
+ device = None
278
+
279
+ def get_embeddings(residue_embeddings: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
280
+ if full_embeddings or residue_embeddings.ndim == 2:
281
+ return residue_embeddings
282
+ return pooler(residue_embeddings, attention_mask)
283
+
284
+ def iter_batches(to_embed: List[str]):
285
+ if tokenizer_mode:
286
+ assert collate_fn is not None
287
+ assert device is not None
288
+ dataset = ProteinDataset(to_embed)
289
+ dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn, shuffle=False)
290
+ for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), desc='Embedding batches'):
291
+ seqs = to_embed[i * batch_size:(i + 1) * batch_size]
292
+ input_ids = batch['input_ids'].to(device)
293
+ attention_mask = batch['attention_mask'].to(device)
294
+ residue_embeddings = self._embed(input_ids, attention_mask)
295
+ yield seqs, residue_embeddings, attention_mask
296
+ else:
297
+ for batch_start in tqdm(range(0, len(to_embed), batch_size), desc='Embedding batches'):
298
+ seqs = to_embed[batch_start:batch_start + batch_size]
299
+ batch_output = self._embed(seqs, return_attention_mask=True, **kwargs)
300
+ assert isinstance(batch_output, tuple), "Sequence mode _embed must return (last_hidden_state, attention_mask)."
301
+ assert len(batch_output) == 2, "Sequence mode _embed must return exactly two values."
302
+ residue_embeddings, attention_mask = batch_output
303
+ assert isinstance(attention_mask, torch.Tensor), "Sequence mode _embed must return attention_mask as a torch.Tensor."
304
+ yield seqs, residue_embeddings, attention_mask
305
+
306
+ if sql:
307
+ conn = sqlite3.connect(sql_db_path)
308
+ self._ensure_embeddings_table(conn)
309
+ c = conn.cursor()
310
+ already_embedded = self._read_sequences_from_db(sql_db_path)
311
+ to_embed = [seq for seq in sequences if seq not in already_embedded]
312
+ print(f"Found {len(already_embedded)} already embedded sequences in {sql_db_path}")
313
+ print(f"Embedding {len(to_embed)} new sequences")
314
+ if len(to_embed) > 0:
315
+ with torch.no_grad():
316
+ for i, (seqs, residue_embeddings, attention_mask) in enumerate(iter_batches(to_embed)):
317
+ embeddings = get_embeddings(residue_embeddings, attention_mask).to(embed_dtype)
318
+ for seq, emb, mask in zip(seqs, embeddings, attention_mask):
319
+ if full_embeddings:
320
+ emb = emb[mask.bool()].reshape(-1, hidden_size)
321
+ emb_np = emb.cpu().numpy()
322
+ emb_shape = ",".join([str(dim) for dim in emb_np.shape])
323
+ emb_dtype = str(emb_np.dtype)
324
+ c.execute(
325
+ "INSERT OR REPLACE INTO embeddings (sequence, embedding, shape, dtype) VALUES (?, ?, ?, ?)",
326
+ (seq, emb_np.tobytes(), emb_shape, emb_dtype),
327
+ )
328
+ if tokenizer_mode and (i + 1) % 100 == 0:
329
+ conn.commit()
330
+ conn.commit()
331
+ conn.close()
332
+ return None
333
+
334
+ embeddings_dict = {}
335
+ if os.path.exists(save_path):
336
+ embeddings_dict = self.load_embeddings_from_pth(save_path)
337
+ to_embed = [seq for seq in sequences if seq not in embeddings_dict]
338
+ print(f"Found {len(embeddings_dict)} already embedded sequences in {save_path}")
339
+ print(f"Embedding {len(to_embed)} new sequences")
340
+ else:
341
+ to_embed = sequences
342
+ print(f"Embedding {len(to_embed)} new sequences")
343
+
344
+ if len(to_embed) > 0:
345
+ with torch.no_grad():
346
+ for seqs, residue_embeddings, attention_mask in iter_batches(to_embed):
347
+ embeddings = get_embeddings(residue_embeddings, attention_mask).to(embed_dtype)
348
+ for seq, emb, mask in zip(seqs, embeddings, attention_mask):
349
+ if full_embeddings:
350
+ emb = emb[mask.bool()].reshape(-1, hidden_size)
351
+ embeddings_dict[seq] = emb.cpu()
352
+
353
+ if save:
354
+ torch.save(embeddings_dict, save_path)
355
+
356
+ return embeddings_dict
357
+
358
+
359
+ if __name__ == "__main__":
360
+ # py -m pooler
361
+ pooler = Pooler(pooling_types=['max', 'parti'])
362
+ batch_size = 8
363
+ seq_len = 64
364
+ hidden_size = 128
365
+ num_layers = 12
366
+ emb = torch.randn(batch_size, seq_len, hidden_size)
367
+ attentions = torch.randn(batch_size, num_layers, seq_len, seq_len)
368
+ attention_mask = torch.ones(batch_size, seq_len)
369
+ y = pooler(emb=emb, attention_mask=attention_mask, attentions=attentions)
370
+ print(y.shape)