Spaces:
Sleeping
Sleeping
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| import math | |
| class HashingTrickEmbedding(nn.Module): | |
| def __init__(self, vocab_size, hidden_size, num_hashes=2, num_buckets=8192, device='cpu'): | |
| super().__init__() | |
| self.vocab_size = vocab_size | |
| self.hidden_size = hidden_size | |
| self.num_hashes = num_hashes | |
| self.num_buckets = num_buckets | |
| self.device = device | |
| # β Use nn.Embedding instead of EmbeddingBag | |
| self.hash_embedding = nn.Embedding( | |
| num_buckets, | |
| hidden_size | |
| ).to(device) | |
| self.proj_matrix = nn.Parameter( | |
| torch.randn(num_hashes, hidden_size), | |
| requires_grad=False | |
| ) | |
| self.random_vectors = nn.Parameter( | |
| torch.randn(vocab_size, hidden_size), | |
| requires_grad=False | |
| ) | |
| def forward(self, input_ids): | |
| batch_size, seq_len = input_ids.size() | |
| input_ids_flat = input_ids.view(-1) | |
| # Hash each token using SimHash | |
| hashed_ids = self.simhash(input_ids_flat) # shape: [batch_size * seq_len] | |
| output = self.hash_embedding(hashed_ids) # shape: [batch_size * seq_len, hidden_size] | |
| return output.view(batch_size, seq_len, self.hidden_size) | |
| def simhash(self, input_ids): | |
| device = input_ids.device | |
| token_vectors = self.random_vectors.to(device)[input_ids] | |
| dots = torch.einsum('bd,hd->bh', token_vectors, self.proj_matrix) | |
| signs = (dots > 0).to(torch.int64) | |
| hashed = signs + torch.arange(self.num_hashes, device=device) * 2 | |
| return hashed.sum(dim=1) % self.num_buckets | |
| def get_peft_embedding(vocab_size, hidden_size, num_hashes, num_buckets, device='cpu'): | |
| return HashingTrickEmbedding( | |
| vocab_size=vocab_size, | |
| hidden_size=hidden_size, | |
| num_hashes=num_hashes, | |
| num_buckets=num_buckets, | |
| device=device | |
| ) |