manu's picture
Upload folder using huggingface_hub
545c4d5 verified
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
import math
ON_JZ = False
DATASET_NAME = (
"./data_dir/nomic_embed_supervised" if ON_JZ else "jxm/nomic_embed_supervised"
)
MODEL_NAME = "./models/modernbert-embed-base" if ON_JZ else "intfloat/e5-base-v2"
if ON_JZ:
dataset = load_dataset(DATASET_NAME, split="train")
else:
dataset = load_dataset(
DATASET_NAME,
split="train[:2000]",
data_files=["data/train-00000-of-00116.parquet"],
verification_mode="no_checks",
)
# map query column to an embedding
# model = SentenceTransformer('nomic-ai/modernbert-embed-base')
model = SentenceTransformer(MODEL_NAME)
# map query column to an embedding
def map_to_embedding(example):
example["query_embedding"] = model.encode(example["query"])
example["document_embedding"] = model.encode(example["document"])
return example
# apparently this prevents the dataset from getting cached
# dataset = dataset.remove_columns(["negative", "dataset"])
dataset = dataset.map(map_to_embedding, batched=True, batch_size=128)
# remove negative and dataset column
print(dataset)
print(dataset[0])
from cde_benchmark.utils.faiss_clustering import paired_kmeans_faiss
q = torch.Tensor(dataset["query_embedding"])
X = torch.Tensor(dataset["document_embedding"])
cluster_size = 1024
k = math.ceil(len(X) / cluster_size)
print(k)
max_iters = 100
centroids, assignments = paired_kmeans_faiss(q=q, X=X, k=k, max_iters=max_iters)
# flatten assignments
assignments = list(assignments.flatten())
print(assignments)
# add these assignments to the dataset
dataset = dataset.add_column("cluster_assignment", assignments)
print(dataset)
# save dataset
dataset.save_to_disk("./data_dir/nomic_embed_supervised_clustered")