Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
openaccess_embeddings / embeddings.py
brettrenfer's picture
Embeddings refactor
f815992 verified
raw
history blame
3.09 kB
import datasets
from datasets import Features, Value, Array1D
from transformers import CLIPProcessor, CLIPModel
import torch
from PIL import Image
from tqdm import tqdm
# 1) Load the dataset
dataset = datasets.load_dataset("metmuseum/openaccess", split="train", streaming=False)
# If the dataset is huge for your machine, consider streaming=True and writing out shards.
# 2) Initialize model/processor
model_name = "openai/clip-vit-base-patch32"
model = CLIPModel.from_pretrained(model_name)
processor = CLIPProcessor.from_pretrained(model_name)
# 3) Device + eval
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
model.to(device)
model.eval()
# 4) Helper to normalize (L2)
def l2_normalize(x, dim=-1, eps=1e-12):
return x / (x.norm(p=2, dim=dim, keepdim=True) + eps)
# 5) Iterate with batching
BATCH_SIZE = 32 # tune for your machine
object_ids_batch, images_batch = [], []
all_object_ids, all_embeddings = [], []
def flush_batch():
if not images_batch:
return
# Processor expects PIL list
inputs = processor(images=images_batch, return_tensors="pt")
# Only move tensors to device, not the whole dict-of-PIL
pixel_values = inputs["pixel_values"].to(device)
with torch.no_grad():
feats = model.get_image_features(pixel_values=pixel_values) # (B, D)
feats = l2_normalize(feats, dim=-1) # normalize
feats = feats.cpu() # keep CPU for HF datasets
# Save
for oid, vec in zip(object_ids_batch, feats):
all_object_ids.append(int(oid))
all_embeddings.append(vec.numpy().astype("float32")) # (D,)
# clear
object_ids_batch.clear()
images_batch.clear()
for item in tqdm(dataset):
# Depending on the dataset schema, column names may differ.
# Using 'Object ID' and 'jpg' from your example; adjust if needed (e.g., 'image').
object_id = item.get("Object ID")
image_pil = item.get("jpg")
if object_id is None or image_pil is None:
continue
# Ensure RGB
if isinstance(image_pil, Image.Image):
img = image_pil.convert("RGB")
else:
# If it’s an array/bytes, try to convert to PIL; otherwise skip
try:
img = Image.fromarray(image_pil).convert("RGB")
except Exception:
continue
object_ids_batch.append(object_id)
images_batch.append(img)
if len(images_batch) >= BATCH_SIZE:
flush_batch()
# flush any remainder
flush_batch()
# 6) Build a proper HF dataset with explicit features
if len(all_embeddings) == 0:
raise RuntimeError("No embeddings were produced. Check dataset columns and image availability.")
dim = len(all_embeddings[0])
features = Features({
"Object ID": Value("int32"),
"Embedding": Array1D(dim, dtype="float32"),
})
embedding_dataset = datasets.Dataset.from_dict(
{
"Object ID": all_object_ids,
"Embedding": all_embeddings,
},
features=features,
)
# 7) Save to disk
embedding_dataset.save_to_disk("metmuseum_embeddings")