| import os |
| import sqlite3 |
| import networkx as nx |
| import numpy as np |
| import torch |
| from tqdm.auto import tqdm |
| from typing import Callable, List, Optional |
| from torch.utils.data import DataLoader |
| from torch.utils.data import Dataset as TorchDataset |
| from transformers import PreTrainedTokenizerBase |
|
|
|
|
| class Pooler: |
| def __init__(self, pooling_types: List[str]): |
| self.pooling_types = pooling_types |
| self.pooling_options = { |
| 'mean': self.mean_pooling, |
| 'max': self.max_pooling, |
| 'norm': self.norm_pooling, |
| 'median': self.median_pooling, |
| 'std': self.std_pooling, |
| 'var': self.var_pooling, |
| 'cls': self.cls_pooling, |
| 'parti': self._pool_parti, |
| } |
|
|
| def _create_pooled_matrices_across_layers(self, attentions: torch.Tensor) -> torch.Tensor: |
| maxed_attentions = torch.max(attentions, dim=1)[0] |
| return maxed_attentions |
|
|
| def _page_rank(self, attention_matrix, personalization=None, nstart=None, prune_type="top_k_outdegree"): |
| |
| |
| |
| G = self._convert_to_graph(attention_matrix) |
| if G.number_of_nodes() != attention_matrix.shape[0]: |
| raise Exception( |
| f"The number of nodes in the graph should be equal to the number of tokens in sequence! You have {G.number_of_nodes()} nodes for {attention_matrix.shape[0]} tokens.") |
| if G.number_of_edges() == 0: |
| raise Exception(f"You don't seem to have any attention edges left in the graph.") |
|
|
| return nx.pagerank(G, alpha=0.85, tol=1e-06, weight='weight', personalization=personalization, nstart=nstart, max_iter=100) |
|
|
| def _convert_to_graph(self, matrix): |
| |
| |
| G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) |
| return G |
|
|
| def _calculate_importance_weights(self, dict_importance, attention_mask: Optional[torch.Tensor] = None): |
| |
| if attention_mask is not None: |
| for k in list(dict_importance.keys()): |
| if attention_mask[k] == 0: |
| del dict_importance[k] |
|
|
| |
| |
| total = sum(dict_importance.values()) |
| return np.array([v / total for _, v in dict_importance.items()]) |
|
|
| def _pool_parti(self, emb: torch.Tensor, attentions: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): |
| maxed_attentions = self._create_pooled_matrices_across_layers(attentions).numpy() |
| |
| emb_pooled = [] |
| for e, a, mask in zip(emb, maxed_attentions, attention_mask): |
| dict_importance = self._page_rank(a) |
| importance_weights = self._calculate_importance_weights(dict_importance, mask) |
| num_tokens = int(mask.sum().item()) |
| emb_pooled.append(np.average(e[:num_tokens], weights=importance_weights, axis=0)) |
| pooled = torch.tensor(np.array(emb_pooled)) |
| return pooled |
|
|
| def mean_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.mean(dim=1) |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) |
|
|
| def max_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.max(dim=1).values |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).max(dim=1).values |
|
|
| def norm_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.norm(dim=1, p=2) |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).norm(dim=1, p=2) |
|
|
| def median_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.median(dim=1).values |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).median(dim=1).values |
| |
| def std_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.std(dim=1) |
| else: |
| |
| var = self.var_pooling(emb, attention_mask, **kwargs) |
| return torch.sqrt(var) |
| |
| def var_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.var(dim=1) |
| else: |
| |
| attention_mask = attention_mask.unsqueeze(-1) |
| |
| mean = (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) |
| mean = mean.unsqueeze(1) |
| |
| squared_diff = (emb - mean) ** 2 |
| |
| var = (squared_diff * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) |
| return var |
|
|
| def cls_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| return emb[:, 0, :] |
|
|
| def __call__( |
| self, |
| emb: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| attentions: Optional[torch.Tensor] = None |
| ): |
| final_emb = [] |
| for pooling_type in self.pooling_types: |
| final_emb.append(self.pooling_options[pooling_type](emb=emb, attention_mask=attention_mask, attentions=attentions)) |
| return torch.cat(final_emb, dim=-1) |
|
|
|
|
| class ProteinDataset(TorchDataset): |
| """Simple dataset for protein sequences.""" |
| def __init__(self, sequences: list[str]): |
| self.sequences = sequences |
|
|
| def __len__(self) -> int: |
| return len(self.sequences) |
|
|
| def __getitem__(self, idx: int) -> str: |
| return self.sequences[idx] |
|
|
|
|
| def build_collator(tokenizer: PreTrainedTokenizerBase) -> Callable[[list[str]], dict[str, torch.Tensor]]: |
| def _collate_fn(sequences: list[str]) -> dict[str, torch.Tensor]: |
| return tokenizer(sequences, return_tensors="pt", padding='longest') |
| return _collate_fn |
|
|
|
|
| def parse_fasta(fasta_path: str) -> List[str]: |
| assert os.path.exists(fasta_path), f"FASTA file does not exist: {fasta_path}" |
| sequences = [] |
| current_seq = [] |
| with open(fasta_path, 'r') as f: |
| for line in f: |
| line = line.strip() |
| if not line: |
| continue |
| if line.startswith('>'): |
| if current_seq: |
| sequences.append(''.join(current_seq)) |
| current_seq = [] |
| else: |
| current_seq.append(line) |
| if current_seq: |
| sequences.append(''.join(current_seq)) |
| return sequences |
|
|
|
|
| class EmbeddingMixin: |
| def _embed(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor: |
| raise NotImplementedError |
|
|
| @property |
| def device(self) -> torch.device: |
| """Get the device of the model.""" |
| return next(self.parameters()).device |
|
|
| def _read_sequences_from_db(self, db_path: str) -> set[str]: |
| """Read sequences from SQLite database.""" |
| sequences = [] |
| with sqlite3.connect(db_path) as conn: |
| c = conn.cursor() |
| c.execute("SELECT sequence FROM embeddings") |
| while True: |
| row = c.fetchone() |
| if row is None: |
| break |
| sequences.append(row[0]) |
| return set(sequences) |
|
|
| def _ensure_embeddings_table(self, conn: sqlite3.Connection) -> None: |
| cursor = conn.cursor() |
| cursor.execute( |
| "CREATE TABLE IF NOT EXISTS embeddings (" |
| "sequence TEXT PRIMARY KEY, " |
| "embedding BLOB NOT NULL, " |
| "shape TEXT, " |
| "dtype TEXT" |
| ")" |
| ) |
| cursor.execute("PRAGMA table_info(embeddings)") |
| rows = cursor.fetchall() |
| column_names = [row[1] for row in rows] |
| if "shape" not in column_names: |
| cursor.execute("ALTER TABLE embeddings ADD COLUMN shape TEXT") |
| if "dtype" not in column_names: |
| cursor.execute("ALTER TABLE embeddings ADD COLUMN dtype TEXT") |
| conn.commit() |
|
|
| def load_embeddings_from_pth(self, save_path: str) -> dict[str, torch.Tensor]: |
| assert os.path.exists(save_path), f"Embedding file does not exist: {save_path}" |
| payload = torch.load(save_path, map_location="cpu", weights_only=True) |
| assert isinstance(payload, dict), "Expected .pth embeddings file to contain a dictionary." |
| for sequence, tensor in payload.items(): |
| assert isinstance(sequence, str), "Expected embedding dictionary keys to be sequences (str)." |
| assert isinstance(tensor, torch.Tensor), "Expected embedding dictionary values to be tensors." |
| return payload |
|
|
| def load_embeddings_from_db(self, db_path: str, sequences: Optional[List[str]] = None) -> dict[str, torch.Tensor]: |
| assert os.path.exists(db_path), f"Embedding database does not exist: {db_path}" |
| loaded: dict[str, torch.Tensor] = {} |
| with sqlite3.connect(db_path) as conn: |
| self._ensure_embeddings_table(conn) |
| cursor = conn.cursor() |
| if sequences is None: |
| cursor.execute("SELECT sequence, embedding, shape, dtype FROM embeddings") |
| else: |
| if len(sequences) == 0: |
| return loaded |
| placeholders = ",".join(["?"] * len(sequences)) |
| cursor.execute( |
| f"SELECT sequence, embedding, shape, dtype FROM embeddings WHERE sequence IN ({placeholders})", |
| tuple(sequences), |
| ) |
|
|
| rows = cursor.fetchall() |
| for row in rows: |
| sequence = row[0] |
| embedding_bytes = row[1] |
| shape_text = row[2] |
| dtype_text = row[3] |
| assert shape_text is not None, "Missing shape metadata in embeddings table." |
| assert dtype_text is not None, "Missing dtype metadata in embeddings table." |
| shape_values = [int(value) for value in shape_text.split(",") if len(value) > 0] |
| assert len(shape_values) > 0, f"Invalid shape metadata for sequence: {sequence}" |
| expected_size = int(np.prod(shape_values)) |
| np_dtype = np.dtype(dtype_text) |
| array = np.frombuffer(embedding_bytes, dtype=np_dtype) |
| assert array.size == expected_size, f"Shape mismatch while reading sequence: {sequence}" |
| reshaped = array.copy().reshape(tuple(shape_values)) |
| loaded[sequence] = torch.from_numpy(reshaped) |
| return loaded |
|
|
| def embed_dataset( |
| self, |
| sequences: Optional[List[str]] = None, |
| tokenizer: Optional[PreTrainedTokenizerBase] = None, |
| batch_size: int = 2, |
| max_len: int = 512, |
| truncate: bool = True, |
| full_embeddings: bool = False, |
| embed_dtype: torch.dtype = torch.float32, |
| pooling_types: List[str] = ['mean'], |
| num_workers: int = 0, |
| sql: bool = False, |
| save: bool = True, |
| sql_db_path: str = 'embeddings.db', |
| save_path: str = 'embeddings.pth', |
| fasta_path: Optional[str] = None, |
| **kwargs, |
| ) -> Optional[dict[str, torch.Tensor]]: |
| """ |
| Embed a dataset of protein sequences. |
| |
| Supports two modes: |
| - Tokenizer mode (ESM2/ESM++): provide `tokenizer`, `_embed(input_ids, attention_mask)` is used. |
| - Sequence mode (E1): pass `tokenizer=None`, `_embed(sequences, return_attention_mask=True, **kwargs)` is used. |
| |
| Sequences can be supplied as a list via `sequences`, parsed from a FASTA file via |
| `fasta_path`, or both (the two sources are combined). At least one must be provided. |
| """ |
| if fasta_path is not None: |
| fasta_sequences = parse_fasta(fasta_path) |
| sequences = list(sequences or []) + fasta_sequences |
| assert sequences is not None and len(sequences) > 0, \ |
| "Must provide at least one sequence via `sequences` or `fasta_path`." |
| sequences = list(set([seq[:max_len] if truncate else seq for seq in sequences])) |
| sequences = sorted(sequences, key=len, reverse=True) |
| hidden_size = self.config.hidden_size |
| pooler = Pooler(pooling_types) if not full_embeddings else None |
| tokenizer_mode = tokenizer is not None |
| if tokenizer_mode: |
| collate_fn = build_collator(tokenizer) |
| device = self.device |
| else: |
| collate_fn = None |
| device = None |
|
|
| def get_embeddings(residue_embeddings: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor: |
| if full_embeddings or residue_embeddings.ndim == 2: |
| return residue_embeddings |
| return pooler(residue_embeddings, attention_mask) |
|
|
| def iter_batches(to_embed: List[str]): |
| if tokenizer_mode: |
| assert collate_fn is not None |
| assert device is not None |
| dataset = ProteinDataset(to_embed) |
| dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn, shuffle=False) |
| for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), desc='Embedding batches'): |
| seqs = to_embed[i * batch_size:(i + 1) * batch_size] |
| input_ids = batch['input_ids'].to(device) |
| attention_mask = batch['attention_mask'].to(device) |
| residue_embeddings = self._embed(input_ids, attention_mask) |
| yield seqs, residue_embeddings, attention_mask |
| else: |
| for batch_start in tqdm(range(0, len(to_embed), batch_size), desc='Embedding batches'): |
| seqs = to_embed[batch_start:batch_start + batch_size] |
| batch_output = self._embed(seqs, return_attention_mask=True, **kwargs) |
| assert isinstance(batch_output, tuple), "Sequence mode _embed must return (last_hidden_state, attention_mask)." |
| assert len(batch_output) == 2, "Sequence mode _embed must return exactly two values." |
| residue_embeddings, attention_mask = batch_output |
| assert isinstance(attention_mask, torch.Tensor), "Sequence mode _embed must return attention_mask as a torch.Tensor." |
| yield seqs, residue_embeddings, attention_mask |
|
|
| if sql: |
| conn = sqlite3.connect(sql_db_path) |
| self._ensure_embeddings_table(conn) |
| c = conn.cursor() |
| already_embedded = self._read_sequences_from_db(sql_db_path) |
| to_embed = [seq for seq in sequences if seq not in already_embedded] |
| print(f"Found {len(already_embedded)} already embedded sequences in {sql_db_path}") |
| print(f"Embedding {len(to_embed)} new sequences") |
| if len(to_embed) > 0: |
| with torch.no_grad(): |
| for i, (seqs, residue_embeddings, attention_mask) in enumerate(iter_batches(to_embed)): |
| embeddings = get_embeddings(residue_embeddings, attention_mask).to(embed_dtype) |
| for seq, emb, mask in zip(seqs, embeddings, attention_mask): |
| if full_embeddings: |
| emb = emb[mask.bool()].reshape(-1, hidden_size) |
| emb_np = emb.cpu().numpy() |
| emb_shape = ",".join([str(dim) for dim in emb_np.shape]) |
| emb_dtype = str(emb_np.dtype) |
| c.execute( |
| "INSERT OR REPLACE INTO embeddings (sequence, embedding, shape, dtype) VALUES (?, ?, ?, ?)", |
| (seq, emb_np.tobytes(), emb_shape, emb_dtype), |
| ) |
| if tokenizer_mode and (i + 1) % 100 == 0: |
| conn.commit() |
| conn.commit() |
| conn.close() |
| return None |
|
|
| embeddings_dict = {} |
| if os.path.exists(save_path): |
| embeddings_dict = self.load_embeddings_from_pth(save_path) |
| to_embed = [seq for seq in sequences if seq not in embeddings_dict] |
| print(f"Found {len(embeddings_dict)} already embedded sequences in {save_path}") |
| print(f"Embedding {len(to_embed)} new sequences") |
| else: |
| to_embed = sequences |
| print(f"Embedding {len(to_embed)} new sequences") |
|
|
| if len(to_embed) > 0: |
| with torch.no_grad(): |
| for seqs, residue_embeddings, attention_mask in iter_batches(to_embed): |
| embeddings = get_embeddings(residue_embeddings, attention_mask).to(embed_dtype) |
| for seq, emb, mask in zip(seqs, embeddings, attention_mask): |
| if full_embeddings: |
| emb = emb[mask.bool()].reshape(-1, hidden_size) |
| embeddings_dict[seq] = emb.cpu() |
|
|
| if save: |
| torch.save(embeddings_dict, save_path) |
|
|
| return embeddings_dict |
|
|
|
|
| if __name__ == "__main__": |
| |
| pooler = Pooler(pooling_types=['max', 'parti']) |
| batch_size = 8 |
| seq_len = 64 |
| hidden_size = 128 |
| num_layers = 12 |
| emb = torch.randn(batch_size, seq_len, hidden_size) |
| attentions = torch.randn(batch_size, num_layers, seq_len, seq_len) |
| attention_mask = torch.ones(batch_size, seq_len) |
| y = pooler(emb=emb, attention_mask=attention_mask, attentions=attentions) |
| print(y.shape) |
|
|