| import torch |
| import numpy as np |
| import networkx as nx |
| from typing import Optional, List |
|
|
|
|
| class Pooler: |
| def __init__(self, pooling_types: List[str]): |
| self.pooling_types = pooling_types |
| self.pooling_options = { |
| 'mean': self.mean_pooling, |
| 'max': self.max_pooling, |
| 'norm': self.norm_pooling, |
| 'median': self.median_pooling, |
| 'std': self.std_pooling, |
| 'var': self.var_pooling, |
| 'cls': self.cls_pooling, |
| 'parti': self._pool_parti, |
| } |
|
|
| def _create_pooled_matrices_across_layers(self, attentions: torch.Tensor) -> torch.Tensor: |
| maxed_attentions = torch.max(attentions, dim=1)[0] |
| return maxed_attentions |
|
|
| def _page_rank(self, attention_matrix, personalization=None, nstart=None, prune_type="top_k_outdegree"): |
| |
| |
| |
| G = self._convert_to_graph(attention_matrix) |
| if G.number_of_nodes() != attention_matrix.shape[0]: |
| raise Exception( |
| f"The number of nodes in the graph should be equal to the number of tokens in sequence! You have {G.number_of_nodes()} nodes for {attention_matrix.shape[0]} tokens.") |
| if G.number_of_edges() == 0: |
| raise Exception(f"You don't seem to have any attention edges left in the graph.") |
|
|
| return nx.pagerank(G, alpha=0.85, tol=1e-06, weight='weight', personalization=personalization, nstart=nstart, max_iter=100) |
|
|
| def _convert_to_graph(self, matrix): |
| |
| |
| G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) |
| return G |
|
|
| def _calculate_importance_weights(self, dict_importance, attention_mask: Optional[torch.Tensor] = None): |
| |
| if attention_mask is not None: |
| for k in list(dict_importance.keys()): |
| if attention_mask[k] == 0: |
| del dict_importance[k] |
|
|
| |
| |
| total = sum(dict_importance.values()) |
| return np.array([v / total for _, v in dict_importance.items()]) |
|
|
| def _pool_parti(self, emb: torch.Tensor, attentions: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): |
| maxed_attentions = self._create_pooled_matrices_across_layers(attentions).numpy() |
| |
| emb_pooled = [] |
| for e, a, mask in zip(emb, maxed_attentions, attention_mask): |
| dict_importance = self._page_rank(a) |
| importance_weights = self._calculate_importance_weights(dict_importance, mask) |
| num_tokens = int(mask.sum().item()) |
| emb_pooled.append(np.average(e[:num_tokens], weights=importance_weights, axis=0)) |
| pooled = torch.tensor(np.array(emb_pooled)) |
| return pooled |
|
|
| def mean_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.mean(dim=1) |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) |
|
|
| def max_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.max(dim=1).values |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).max(dim=1).values |
|
|
| def norm_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.norm(dim=1, p=2) |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).norm(dim=1, p=2) |
|
|
| def median_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.median(dim=1).values |
| else: |
| attention_mask = attention_mask.unsqueeze(-1) |
| return (emb * attention_mask).median(dim=1).values |
| |
| def std_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.std(dim=1) |
| else: |
| |
| var = self.var_pooling(emb, attention_mask, **kwargs) |
| return torch.sqrt(var) |
| |
| def var_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| if attention_mask is None: |
| return emb.var(dim=1) |
| else: |
| |
| attention_mask = attention_mask.unsqueeze(-1) |
| |
| mean = (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) |
| mean = mean.unsqueeze(1) |
| |
| squared_diff = (emb - mean) ** 2 |
| |
| var = (squared_diff * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) |
| return var |
|
|
| def cls_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): |
| return emb[:, 0, :] |
|
|
| def __call__( |
| self, |
| emb: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| attentions: Optional[torch.Tensor] = None |
| ): |
| final_emb = [] |
| for pooling_type in self.pooling_types: |
| final_emb.append(self.pooling_options[pooling_type](emb=emb, attention_mask=attention_mask, attentions=attentions)) |
| return torch.cat(final_emb, dim=-1) |
| |
|
|
| if __name__ == "__main__": |
| |
| pooler = Pooler(pooling_types=['max', 'parti']) |
| |
| batch_size = 8 |
| seq_len = 64 |
| hidden_size = 128 |
| num_layers = 12 |
| emb = torch.randn(batch_size, seq_len, hidden_size) |
| attentions = torch.randn(batch_size, num_layers, seq_len, seq_len) |
| attention_mask = torch.ones(batch_size, seq_len) |
| |
| y = pooler(emb=emb, attention_mask=attention_mask, attentions=attentions) |
| print(y.shape) |
|
|