Spaces:
Sleeping
Sleeping
File size: 1,933 Bytes
549c270 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class HashingTrickEmbedding(nn.Module):
def __init__(self, vocab_size, hidden_size, num_hashes=2, num_buckets=8192, device='cpu'):
super().__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hashes = num_hashes
self.num_buckets = num_buckets
self.device = device
# ✅ Use nn.Embedding instead of EmbeddingBag
self.hash_embedding = nn.Embedding(
num_buckets,
hidden_size
).to(device)
self.proj_matrix = nn.Parameter(
torch.randn(num_hashes, hidden_size),
requires_grad=False
)
self.random_vectors = nn.Parameter(
torch.randn(vocab_size, hidden_size),
requires_grad=False
)
def forward(self, input_ids):
batch_size, seq_len = input_ids.size()
input_ids_flat = input_ids.view(-1)
# Hash each token using SimHash
hashed_ids = self.simhash(input_ids_flat) # shape: [batch_size * seq_len]
output = self.hash_embedding(hashed_ids) # shape: [batch_size * seq_len, hidden_size]
return output.view(batch_size, seq_len, self.hidden_size)
def simhash(self, input_ids):
device = input_ids.device
token_vectors = self.random_vectors.to(device)[input_ids]
dots = torch.einsum('bd,hd->bh', token_vectors, self.proj_matrix)
signs = (dots > 0).to(torch.int64)
hashed = signs + torch.arange(self.num_hashes, device=device) * 2
return hashed.sum(dim=1) % self.num_buckets
def get_peft_embedding(vocab_size, hidden_size, num_hashes, num_buckets, device='cpu'):
return HashingTrickEmbedding(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hashes=num_hashes,
num_buckets=num_buckets,
device=device
) |