File size: 3,085 Bytes
b323980 f815992 b323980 f815992 b323980 f815992 b323980 f815992 b323980 f815992 b323980 f815992 b323980 f815992 b323980 f815992 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import datasets
from datasets import Features, Value, Array1D
from transformers import CLIPProcessor, CLIPModel
import torch
from PIL import Image
from tqdm import tqdm
# 1) Load the dataset
dataset = datasets.load_dataset("metmuseum/openaccess", split="train", streaming=False)
# If the dataset is huge for your machine, consider streaming=True and writing out shards.
# 2) Initialize model/processor
model_name = "openai/clip-vit-base-patch32"
model = CLIPModel.from_pretrained(model_name)
processor = CLIPProcessor.from_pretrained(model_name)
# 3) Device + eval
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
model.to(device)
model.eval()
# 4) Helper to normalize (L2)
def l2_normalize(x, dim=-1, eps=1e-12):
return x / (x.norm(p=2, dim=dim, keepdim=True) + eps)
# 5) Iterate with batching
BATCH_SIZE = 32 # tune for your machine
object_ids_batch, images_batch = [], []
all_object_ids, all_embeddings = [], []
def flush_batch():
if not images_batch:
return
# Processor expects PIL list
inputs = processor(images=images_batch, return_tensors="pt")
# Only move tensors to device, not the whole dict-of-PIL
pixel_values = inputs["pixel_values"].to(device)
with torch.no_grad():
feats = model.get_image_features(pixel_values=pixel_values) # (B, D)
feats = l2_normalize(feats, dim=-1) # normalize
feats = feats.cpu() # keep CPU for HF datasets
# Save
for oid, vec in zip(object_ids_batch, feats):
all_object_ids.append(int(oid))
all_embeddings.append(vec.numpy().astype("float32")) # (D,)
# clear
object_ids_batch.clear()
images_batch.clear()
for item in tqdm(dataset):
# Depending on the dataset schema, column names may differ.
# Using 'Object ID' and 'jpg' from your example; adjust if needed (e.g., 'image').
object_id = item.get("Object ID")
image_pil = item.get("jpg")
if object_id is None or image_pil is None:
continue
# Ensure RGB
if isinstance(image_pil, Image.Image):
img = image_pil.convert("RGB")
else:
# If it’s an array/bytes, try to convert to PIL; otherwise skip
try:
img = Image.fromarray(image_pil).convert("RGB")
except Exception:
continue
object_ids_batch.append(object_id)
images_batch.append(img)
if len(images_batch) >= BATCH_SIZE:
flush_batch()
# flush any remainder
flush_batch()
# 6) Build a proper HF dataset with explicit features
if len(all_embeddings) == 0:
raise RuntimeError("No embeddings were produced. Check dataset columns and image availability.")
dim = len(all_embeddings[0])
features = Features({
"Object ID": Value("int32"),
"Embedding": Array1D(dim, dtype="float32"),
})
embedding_dataset = datasets.Dataset.from_dict(
{
"Object ID": all_object_ids,
"Embedding": all_embeddings,
},
features=features,
)
# 7) Save to disk
embedding_dataset.save_to_disk("metmuseum_embeddings")
|