Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
Files changed (1) hide show
  1. embeddings.py +85 -41
embeddings.py CHANGED
@@ -1,53 +1,97 @@
1
  import datasets
 
2
  from transformers import CLIPProcessor, CLIPModel
3
  import torch
4
  from PIL import Image
 
5
 
6
- # Load the dataset
7
- dataset = datasets.load_dataset("metmuseum/openaccess")
 
8
 
9
- # Initialize the CLIP model and processor
10
- model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
11
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
 
12
 
13
- # Set the model to use the CPU or Apple's MPS if available
14
- # Change this if you have a fancier computer (:
15
  device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
16
  model.to(device)
 
17
 
18
- # Map to store embeddings with Object ID as key
19
- embeddings_map = {}
20
-
21
- # Function to create embeddings for a PIL Jpeg image
22
- def create_embedding(image_pil):
23
- try:
24
- inputs = processor(images=image_pil, return_tensors="pt", padding=True).to(device)
25
- with torch.no_grad():
26
- embeddings = model.get_image_features(**inputs)
27
- return embeddings
28
- except Exception as e:
29
- print(f"Error processing image: {e}")
30
- return None
31
-
32
- # Loop through the dataset and process the images, and add them to a map
33
- # Optionally, you could add more keys here
34
- # Or, just add the embeddings to the full dataset
35
- for item in dataset['train']:
36
- object_id = item['Object ID']
37
- image_pil = item['jpg']
38
- if image_pil:
39
- embedding = create_embedding(image_pil)
40
- if embedding is not None:
41
- embeddings_map[object_id] = embedding.cpu().numpy()
42
-
43
- # Convert embeddings map to a new dataset
44
- # Note: I changed to to [embeddings][0] because the examples seemed to like [embeddings] format better
45
- # than [[[embeddings]]]
46
- # ...perhaps that's incorrect?
47
- embedding_dataset = datasets.Dataset.from_dict({
48
- 'Object ID': list(embeddings_map.keys()),
49
- 'Embedding': [embedding.tolist() for embedding in embeddings_map.values()][0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  })
51
 
52
- # Save the new dataset to disk
53
- embedding_dataset.save_to_disk('metmuseum_embeddings')
 
 
 
 
 
 
 
 
 
1
  import datasets
2
+ from datasets import Features, Value, Array1D
3
  from transformers import CLIPProcessor, CLIPModel
4
  import torch
5
  from PIL import Image
6
+ from tqdm import tqdm
7
 
8
+ # 1) Load the dataset
9
+ dataset = datasets.load_dataset("metmuseum/openaccess", split="train", streaming=False)
10
+ # If the dataset is huge for your machine, consider streaming=True and writing out shards.
11
 
12
+ # 2) Initialize model/processor
13
+ model_name = "openai/clip-vit-base-patch32"
14
+ model = CLIPModel.from_pretrained(model_name)
15
+ processor = CLIPProcessor.from_pretrained(model_name)
16
 
17
+ # 3) Device + eval
 
18
  device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
19
  model.to(device)
20
+ model.eval()
21
 
22
+ # 4) Helper to normalize (L2)
23
+ def l2_normalize(x, dim=-1, eps=1e-12):
24
+ return x / (x.norm(p=2, dim=dim, keepdim=True) + eps)
25
+
26
+ # 5) Iterate with batching
27
+ BATCH_SIZE = 32 # tune for your machine
28
+ object_ids_batch, images_batch = [], []
29
+ all_object_ids, all_embeddings = [], []
30
+
31
+ def flush_batch():
32
+ if not images_batch:
33
+ return
34
+ # Processor expects PIL list
35
+ inputs = processor(images=images_batch, return_tensors="pt")
36
+ # Only move tensors to device, not the whole dict-of-PIL
37
+ pixel_values = inputs["pixel_values"].to(device)
38
+ with torch.no_grad():
39
+ feats = model.get_image_features(pixel_values=pixel_values) # (B, D)
40
+ feats = l2_normalize(feats, dim=-1) # normalize
41
+ feats = feats.cpu() # keep CPU for HF datasets
42
+ # Save
43
+ for oid, vec in zip(object_ids_batch, feats):
44
+ all_object_ids.append(int(oid))
45
+ all_embeddings.append(vec.numpy().astype("float32")) # (D,)
46
+ # clear
47
+ object_ids_batch.clear()
48
+ images_batch.clear()
49
+
50
+ for item in tqdm(dataset):
51
+ # Depending on the dataset schema, column names may differ.
52
+ # Using 'Object ID' and 'jpg' from your example; adjust if needed (e.g., 'image').
53
+ object_id = item.get("Object ID")
54
+ image_pil = item.get("jpg")
55
+
56
+ if object_id is None or image_pil is None:
57
+ continue
58
+
59
+ # Ensure RGB
60
+ if isinstance(image_pil, Image.Image):
61
+ img = image_pil.convert("RGB")
62
+ else:
63
+ # If it’s an array/bytes, try to convert to PIL; otherwise skip
64
+ try:
65
+ img = Image.fromarray(image_pil).convert("RGB")
66
+ except Exception:
67
+ continue
68
+
69
+ object_ids_batch.append(object_id)
70
+ images_batch.append(img)
71
+
72
+ if len(images_batch) >= BATCH_SIZE:
73
+ flush_batch()
74
+
75
+ # flush any remainder
76
+ flush_batch()
77
+
78
+ # 6) Build a proper HF dataset with explicit features
79
+ if len(all_embeddings) == 0:
80
+ raise RuntimeError("No embeddings were produced. Check dataset columns and image availability.")
81
+
82
+ dim = len(all_embeddings[0])
83
+ features = Features({
84
+ "Object ID": Value("int32"),
85
+ "Embedding": Array1D(dim, dtype="float32"),
86
  })
87
 
88
+ embedding_dataset = datasets.Dataset.from_dict(
89
+ {
90
+ "Object ID": all_object_ids,
91
+ "Embedding": all_embeddings,
92
+ },
93
+ features=features,
94
+ )
95
+
96
+ # 7) Save to disk
97
+ embedding_dataset.save_to_disk("metmuseum_embeddings")