File size: 1,806 Bytes
545c4d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
import math

ON_JZ = False
DATASET_NAME = (
    "./data_dir/nomic_embed_supervised" if ON_JZ else "jxm/nomic_embed_supervised"
)
MODEL_NAME = "./models/modernbert-embed-base" if ON_JZ else "intfloat/e5-base-v2"


if ON_JZ:
    dataset = load_dataset(DATASET_NAME, split="train")
else:
    dataset = load_dataset(
        DATASET_NAME,
        split="train[:2000]",
        data_files=["data/train-00000-of-00116.parquet"],
        verification_mode="no_checks",
    )

# map query column to an embedding
# model = SentenceTransformer('nomic-ai/modernbert-embed-base')
model = SentenceTransformer(MODEL_NAME)

# map query column to an embedding
def map_to_embedding(example):
    example["query_embedding"] = model.encode(example["query"])
    example["document_embedding"] = model.encode(example["document"])
    return example


# apparently this prevents the dataset from getting cached
# dataset = dataset.remove_columns(["negative", "dataset"])
dataset = dataset.map(map_to_embedding, batched=True, batch_size=128)
# remove negative and dataset column
print(dataset)
print(dataset[0])

from cde_benchmark.utils.faiss_clustering import paired_kmeans_faiss

q = torch.Tensor(dataset["query_embedding"])
X = torch.Tensor(dataset["document_embedding"])
cluster_size = 1024
k = math.ceil(len(X) / cluster_size)
print(k)
max_iters = 100

centroids, assignments = paired_kmeans_faiss(q=q, X=X, k=k, max_iters=max_iters)

# flatten assignments
assignments = list(assignments.flatten())
print(assignments)

# add these assignments to the dataset
dataset = dataset.add_column("cluster_assignment", assignments)

print(dataset)

# save dataset
dataset.save_to_disk("./data_dir/nomic_embed_supervised_clustered")