File size: 4,702 Bytes
9916e21
ccc9144
0dd2819
2044d2c
 
c53d7ef
52ea788
0dd2819
9916e21
0dd2819
affee54
 
 
 
96c1b1a
affee54
 
0dd2819
 
affee54
 
 
 
 
 
0dd2819
52ea788
 
 
0dd2819
96c1b1a
0dd2819
 
 
 
 
 
 
 
 
4d0d466
0dd2819
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c58c07d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0dd2819
 
 
 
 
 
 
 
 
 
9916e21
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import gradio as gr
from datasets import load_dataset
from itertools import islice
import numpy as np
from PIL import Image
import torch
from transformers import CLIPModel, CLIPProcessor
import torch.nn.functional as F

# ---------- utils ----------
def flux_to_gray(flux_array):
    a = np.array(flux_array, dtype=np.float32)
    a = np.squeeze(a)
    if a.ndim == 3:
        axis = int(np.argmin(a.shape))
        a = np.nanmean(a, axis=axis)
    a = np.nan_to_num(a, nan=0.0, posinf=0.0, neginf=0.0)
    lo = np.nanpercentile(a, 1)
    hi = np.nanpercentile(a, 99)
    if not np.isfinite(lo) or not np.isfinite(hi) or hi <= lo:
        lo, hi = float(np.nanmin(a)), float(np.nanmax(a))
    norm = np.clip((a - lo) / (hi - lo + 1e-9), 0, 1)
    arr = (norm * 255).astype(np.uint8)
    return Image.fromarray(arr, mode="L")

# ---------- model ----------
model_id = "openai/clip-vit-base-patch32"
model = CLIPModel.from_pretrained(model_id)
processor = CLIPProcessor.from_pretrained(model_id)
model.eval()

# ---------- in-memory index ----------
INDEX = {
    "feats": None,   # torch.Tensor [N, 512]
    "ids": [],       # list[str]
    "thumbs": [],    # list[PIL.Image]
    "bands": []      # list[str]
}

def build_index(n=200):
    ds = load_dataset("MultimodalUniverse/jwst", split="train", streaming=True)
    feats, ids, thumbs, bands = [], [], [], []
    for rec in islice(ds, int(n)):
        pil = flux_to_gray(rec["image"]["flux"]).convert("RGB")
        t = pil.copy(); t.thumbnail((128, 128))
        with torch.no_grad():
            inp = processor(images=pil, return_tensors="pt")
            f = model.get_image_features(**inp)           # [1, 512]
            f = F.normalize(f, p=2, dim=-1)[0]            # [512]
        feats.append(f)
        ids.append(str(rec.get("object_id")))
        bands.append(str(rec["image"].get("band")))
        thumbs.append(t)

    if not feats:
        return "No records indexed."

    INDEX["feats"] = torch.stack(feats)                   # [N, 512]
    INDEX["ids"] = ids
    INDEX["thumbs"] = thumbs
    INDEX["bands"] = bands
    return f"Index built: {len(ids)} images."

def search(text_query, image_query, k=5):
    if INDEX["feats"] is None:
        return [], "Build the index first."

    with torch.no_grad():
        if text_query and text_query.strip():
            inputs = processor(text=[text_query.strip()], return_tensors="pt")
            q = model.get_text_features(**inputs)           # [1, 512]
        elif image_query is not None:
            pil = image_query.convert("RGB")
            inputs = processor(images=pil, return_tensors="pt")
            q = model.get_image_features(**inputs)          # [1, 512]
        else:
            return [], "Enter text or upload an image."

        q = F.normalize(q, p=2, dim=-1)[0]                  # [512]
        sims = (INDEX["feats"] @ q).cpu()                   # [N]
        topk = torch.topk(sims, k=min(int(k), sims.shape[0]))

        items = []
        for idx in topk.indices.tolist():
            cap = f"id: {INDEX['ids'][idx]}  score: {float(sims[idx]):.3f}  band: {INDEX['bands'][idx]}"
            items.append((INDEX["thumbs"][idx], cap))
        return items, f"Returned {len(items)} results."

    def search(text_query, image_query, k=5):
    if INDEX["feats"] is None:
        return [], "Build the index first."

    with torch.no_grad():
        if text_query and text_query.strip():
            inputs = processor(text=[text_query.strip()], return_tensors="pt")
            q = model.get_text_features(**inputs)          # [1, 512]
        elif image_query is not None:
            pil = image_query.convert("RGB")
            inputs = processor(images=pil, return_tensors="pt")
            q = model.get_image_features(**inputs)         # [1, 512]
        else:
            return [], "Enter text or upload an image."

        q = F.normalize(q, p=2, dim=-1)[0]                 # [512]
        sims = (INDEX["feats"] @ q).cpu()                  # [N]
        topk = torch.topk(sims, k=min(int(k), sims.shape[0]))

        items = []
        for idx in topk.indices.tolist():
            cap = f"id: {INDEX['ids'][idx]}  score: {float(sims[idx]):.3f}  band: {INDEX['bands'][idx]}"
            items.append((INDEX["thumbs"][idx], cap))
        return items, f"Returned {len(items)} results."

# ---------- UI ----------
with gr.Blocks() as demo:
    gr.Markdown("JWST multimodal search — build the index")

    n = gr.Slider(50, 1000, value=200, step=10, label="How many images to index")
    build_btn = gr.Button("Build index")
    status = gr.Textbox(label="Status", lines=2)

    build_btn.click(build_index, inputs=n, outputs=status)

demo.launch()