|
|
import datasets |
|
|
from datasets import Features, Value, Array1D |
|
|
from transformers import CLIPProcessor, CLIPModel |
|
|
import torch |
|
|
from PIL import Image |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
dataset = datasets.load_dataset("metmuseum/openaccess", split="train", streaming=False) |
|
|
|
|
|
|
|
|
|
|
|
model_name = "openai/clip-vit-base-patch32" |
|
|
model = CLIPModel.from_pretrained(model_name) |
|
|
processor = CLIPProcessor.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") |
|
|
model.to(device) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
def l2_normalize(x, dim=-1, eps=1e-12): |
|
|
return x / (x.norm(p=2, dim=dim, keepdim=True) + eps) |
|
|
|
|
|
|
|
|
BATCH_SIZE = 32 |
|
|
object_ids_batch, images_batch = [], [] |
|
|
all_object_ids, all_embeddings = [], [] |
|
|
|
|
|
def flush_batch(): |
|
|
if not images_batch: |
|
|
return |
|
|
|
|
|
inputs = processor(images=images_batch, return_tensors="pt") |
|
|
|
|
|
pixel_values = inputs["pixel_values"].to(device) |
|
|
with torch.no_grad(): |
|
|
feats = model.get_image_features(pixel_values=pixel_values) |
|
|
feats = l2_normalize(feats, dim=-1) |
|
|
feats = feats.cpu() |
|
|
|
|
|
for oid, vec in zip(object_ids_batch, feats): |
|
|
all_object_ids.append(int(oid)) |
|
|
all_embeddings.append(vec.numpy().astype("float32")) |
|
|
|
|
|
object_ids_batch.clear() |
|
|
images_batch.clear() |
|
|
|
|
|
for item in tqdm(dataset): |
|
|
|
|
|
|
|
|
object_id = item.get("Object ID") |
|
|
image_pil = item.get("jpg") |
|
|
|
|
|
if object_id is None or image_pil is None: |
|
|
continue |
|
|
|
|
|
|
|
|
if isinstance(image_pil, Image.Image): |
|
|
img = image_pil.convert("RGB") |
|
|
else: |
|
|
|
|
|
try: |
|
|
img = Image.fromarray(image_pil).convert("RGB") |
|
|
except Exception: |
|
|
continue |
|
|
|
|
|
object_ids_batch.append(object_id) |
|
|
images_batch.append(img) |
|
|
|
|
|
if len(images_batch) >= BATCH_SIZE: |
|
|
flush_batch() |
|
|
|
|
|
|
|
|
flush_batch() |
|
|
|
|
|
|
|
|
if len(all_embeddings) == 0: |
|
|
raise RuntimeError("No embeddings were produced. Check dataset columns and image availability.") |
|
|
|
|
|
dim = len(all_embeddings[0]) |
|
|
features = Features({ |
|
|
"Object ID": Value("int32"), |
|
|
"Embedding": Array1D(dim, dtype="float32"), |
|
|
}) |
|
|
|
|
|
embedding_dataset = datasets.Dataset.from_dict( |
|
|
{ |
|
|
"Object ID": all_object_ids, |
|
|
"Embedding": all_embeddings, |
|
|
}, |
|
|
features=features, |
|
|
) |
|
|
|
|
|
|
|
|
embedding_dataset.save_to_disk("metmuseum_embeddings") |
|
|
|