Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
File size: 3,798 Bytes
569f72e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import os, math, glob
import datasets
from datasets import Features, Value, Array1D
from transformers import CLIPProcessor, CLIPModel
import torch
from PIL import Image
from tqdm import tqdm
import numpy as np

# Optional (recommended for reproducibility)
torch.manual_seed(0)

# ---------- Config ----------
MODEL_NAME   = "openai/clip-vit-base-patch32"
BATCH_SIZE   = 32         # tune for your machine
SHARD_SIZE   = 10_000     # write a parquet file every N rows
OUT_DIR      = "metmuseum_embeddings_streaming"  # will contain *.parquet
IMG_COL      = "jpg"      # adjust if column differs (sometimes 'image')
ID_COL       = "Object ID"
# ----------------------------

# 1) Load streaming dataset
ds_stream = datasets.load_dataset(
    "metmuseum/openaccess", split="train", streaming=True
)

# 2) Model / processor / device
model = CLIPModel.from_pretrained(MODEL_NAME)
processor = CLIPProcessor.from_pretrained(MODEL_NAME)
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
model.to(device).eval()

# 3) L2 normalize helper
def l2_normalize(x, dim=-1, eps=1e-12):
    return x / (x.norm(p=2, dim=dim, keepdim=True) + eps)

# 4) Sharded writer (Parquet via datasets.Dataset)
os.makedirs(OUT_DIR, exist_ok=True)
shard_idx = 0
rows_in_shard = 0
buffer_ids = []
buffer_vecs = []
emb_dim = None  # will set after first batch

def flush_shard():
    """Write current buffer to a parquet shard and clear it."""
    global shard_idx, rows_in_shard, buffer_ids, buffer_vecs, emb_dim
    if not buffer_ids:
        return

    # Ensure emb_dim is known
    if emb_dim is None:
        emb_dim = len(buffer_vecs[0])

    # Build a small in-memory HF Dataset for this shard with explicit features
    features = Features({
        ID_COL: Value("int32"),
        "Embedding": Array1D(emb_dim, dtype="float32"),
    })
    shard_ds = datasets.Dataset.from_dict(
        {ID_COL: buffer_ids, "Embedding": buffer_vecs},
        features=features,
    )
    # Write a parquet file (fast & compact)
    shard_path = os.path.join(OUT_DIR, f"part-{shard_idx:05d}.parquet")
    shard_ds.to_parquet(shard_path)

    # Clear buffers / advance
    shard_idx += 1
    rows_in_shard = 0
    buffer_ids = []
    buffer_vecs = []

# 5) Batch inference loop
obj_ids_batch, images_batch = [], []

def flush_batch():
    """Run CLIP on the current image batch and append to shard buffer."""
    global emb_dim, rows_in_shard, buffer_ids, buffer_vecs
    if not images_batch:
        return
    inputs = processor(images=images_batch, return_tensors="pt")
    pixel_values = inputs["pixel_values"].to(device)

    with torch.no_grad():
        feats = model.get_image_features(pixel_values=pixel_values)  # (B, D)
        feats = l2_normalize(feats, dim=-1).cpu().numpy().astype("float32")

    if emb_dim is None:
        emb_dim = feats.shape[1]

    # Append to shard buffer
    buffer_ids.extend([int(x) for x in obj_ids_batch])
    buffer_vecs.extend([feats[i] for i in range(feats.shape[0])])
    rows_in_shard += feats.shape[0]

    # Clear batch
    obj_ids_batch.clear()
    images_batch.clear()

# Iterate stream
for item in tqdm(ds_stream, desc="Embedding (streaming)"):
    oid = item.get(ID_COL)
    img = item.get(IMG_COL)

    if oid is None or img is None:
        continue

    # Ensure PIL RGB
    if isinstance(img, Image.Image):
        pil_img = img.convert("RGB")
    else:
        try:
            pil_img = Image.fromarray(img).convert("RGB")
        except Exception:
            continue

    obj_ids_batch.append(oid)
    images_batch.append(pil_img)

    if len(images_batch) >= BATCH_SIZE:
        flush_batch()

    if rows_in_shard >= SHARD_SIZE:
        flush_shard()

# Flush remainder
flush_batch()
flush_shard()

print(f"Wrote {shard_idx} shard(s) to {OUT_DIR}")