| | import torch |
| | from datasets import load_dataset |
| | from sentence_transformers import SentenceTransformer |
| | import math |
| |
|
| | ON_JZ = False |
| | DATASET_NAME = ( |
| | "./data_dir/nomic_embed_supervised" if ON_JZ else "jxm/nomic_embed_supervised" |
| | ) |
| | MODEL_NAME = "./models/modernbert-embed-base" if ON_JZ else "intfloat/e5-base-v2" |
| |
|
| |
|
| | if ON_JZ: |
| | dataset = load_dataset(DATASET_NAME, split="train") |
| | else: |
| | dataset = load_dataset( |
| | DATASET_NAME, |
| | split="train[:2000]", |
| | data_files=["data/train-00000-of-00116.parquet"], |
| | verification_mode="no_checks", |
| | ) |
| |
|
| | |
| | |
| | model = SentenceTransformer(MODEL_NAME) |
| |
|
| | |
| | def map_to_embedding(example): |
| | example["query_embedding"] = model.encode(example["query"]) |
| | example["document_embedding"] = model.encode(example["document"]) |
| | return example |
| |
|
| |
|
| | |
| | |
| | dataset = dataset.map(map_to_embedding, batched=True, batch_size=128) |
| | |
| | print(dataset) |
| | print(dataset[0]) |
| |
|
| | from cde_benchmark.utils.faiss_clustering import paired_kmeans_faiss |
| |
|
| | q = torch.Tensor(dataset["query_embedding"]) |
| | X = torch.Tensor(dataset["document_embedding"]) |
| | cluster_size = 1024 |
| | k = math.ceil(len(X) / cluster_size) |
| | print(k) |
| | max_iters = 100 |
| |
|
| | centroids, assignments = paired_kmeans_faiss(q=q, X=X, k=k, max_iters=max_iters) |
| |
|
| | |
| | assignments = list(assignments.flatten()) |
| | print(assignments) |
| |
|
| | |
| | dataset = dataset.add_column("cluster_assignment", assignments) |
| |
|
| | print(dataset) |
| |
|
| | |
| | dataset.save_to_disk("./data_dir/nomic_embed_supervised_clustered") |
| |
|