Forrest Wargo commited on
Commit
7c2acc2
·
1 Parent(s): b5a68f4

adding init

Browse files
Files changed (3) hide show
  1. README.md +3 -0
  2. handler.py +275 -0
  3. requirements.txt +7 -0
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Moondream3 Preview Endpoint
2
+
3
+ Loads upstream weights via MODEL_ID.
handler.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import json
4
+ import os
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from PIL import Image
8
+
9
+ import torch
10
+ from transformers import AutoModelForCausalLM
11
+
12
+
13
+ def _b64_to_pil(data_url: str) -> Image.Image:
14
+ if not isinstance(data_url, str) or not data_url.startswith("data:"):
15
+ raise ValueError("Expected a data URL starting with 'data:'")
16
+ header, b64data = data_url.split(",", 1)
17
+ raw = base64.b64decode(b64data)
18
+ img = Image.open(io.BytesIO(raw))
19
+ img.load()
20
+ return img
21
+
22
+
23
+ class EndpointHandler:
24
+ """HF Inference Endpoint handler for Moondream3 Preview.
25
+
26
+ Input contract (OpenAI-style):
27
+ {
28
+ "messages": [
29
+ {
30
+ "role": "user",
31
+ "content": [
32
+ { "type": "image_url", "image_url": { "url": "data:<mime>;base64,<...>" } },
33
+ { "type": "text", "text": "<object or question>" }
34
+ ]
35
+ }
36
+ ],
37
+ "task": "point" | "detect" | "query" // optional, default "point"
38
+ "max_objects": <int> // optional for detect
39
+ "reasoning": <bool> // optional for query
40
+ }
41
+
42
+ Output:
43
+ - task=="point": { points: [{x, y}], width, height }
44
+ - task=="detect": { objects: [{x_min, y_min, x_max, y_max}], width, height }
45
+ - task=="query": { answer: "...", width?, height? }
46
+ Coordinates are normalized (0-1). width/height echo source image dims for convenience.
47
+ """
48
+
49
+ def __init__(self, path: str = "") -> None:
50
+ model_id = os.environ.get("MODEL_ID", ".")
51
+
52
+ os.environ.setdefault("OMP_NUM_THREADS", "1")
53
+ os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True")
54
+
55
+ # Load local repo (or remote if MODEL_ID points to hub id)
56
+ self.model = AutoModelForCausalLM.from_pretrained(
57
+ model_id,
58
+ trust_remote_code=True,
59
+ torch_dtype=torch.bfloat16,
60
+ device_map="auto",
61
+ )
62
+
63
+ # Optional compilation for speed if exposed by remote code
64
+ try:
65
+ compile_fn = getattr(self.model, "compile", None)
66
+ if callable(compile_fn):
67
+ compile_fn()
68
+ except Exception:
69
+ pass
70
+
71
+ def __call__(self, data: Dict[str, Any]) -> Any:
72
+ # Accept HF toolkit shapes: { inputs: {...} } or JSON string
73
+ if isinstance(data, dict) and "inputs" in data:
74
+ inputs_val = data.get("inputs")
75
+ if isinstance(inputs_val, dict):
76
+ data = inputs_val
77
+ elif isinstance(inputs_val, (str, bytes, bytearray)):
78
+ try:
79
+ if isinstance(inputs_val, (bytes, bytearray)):
80
+ inputs_val = inputs_val.decode("utf-8")
81
+ parsed = json.loads(inputs_val)
82
+ if isinstance(parsed, dict):
83
+ data = parsed
84
+ except Exception:
85
+ pass
86
+
87
+ messages = data.get("messages")
88
+ task = str(data.get("task", "point")).lower()
89
+ reasoning = bool(data.get("reasoning", True))
90
+ max_objects = data.get("max_objects")
91
+ prioritize_accuracy = bool(data.get("prioritize_accuracy", True))
92
+
93
+ if not messages:
94
+ return {"error": "Provide 'messages' with user image and text"}
95
+
96
+ # Extract first user image and text
97
+ image_data_url: Optional[str] = None
98
+ text_piece: Optional[str] = None
99
+ for msg in messages:
100
+ if msg.get("role") != "user":
101
+ return {"error": "Only user messages are supported."}
102
+ for part in msg.get("content", []):
103
+ if part.get("type") == "image_url" and image_data_url is None:
104
+ image_data_url = part.get("image_url", {}).get("url")
105
+ elif part.get("type") == "text" and text_piece is None:
106
+ text_piece = part.get("text")
107
+ if image_data_url and text_piece:
108
+ break
109
+
110
+ if not image_data_url or not isinstance(image_data_url, str) or not image_data_url.startswith("data:"):
111
+ return {"error": "image_url.url must be a data URL (data:...)"}
112
+ if not text_piece:
113
+ return {"error": "Content must include text."}
114
+
115
+ # Decode for dimensions and pass PIL to model
116
+ try:
117
+ pil = _b64_to_pil(image_data_url)
118
+ except Exception as e:
119
+ return {"error": f"Failed to decode image data URL: {e}"}
120
+
121
+ width = getattr(pil, "width", None)
122
+ height = getattr(pil, "height", None)
123
+ if width and height:
124
+ try:
125
+ print(f"[moondream-endpoint] Received image size: {width}x{height}")
126
+ except Exception:
127
+ pass
128
+
129
+ # Run selected skill
130
+ try:
131
+ if task == "point":
132
+ if prioritize_accuracy:
133
+ flipped = pil.transpose(Image.FLIP_LEFT_RIGHT)
134
+ res_orig = self.model.point(pil, text_piece)
135
+ res_flip = self.model.point(flipped, text_piece)
136
+ points = self._tta_points(res_orig.get("points", []), res_flip.get("points", []))
137
+ out: Dict[str, Any] = {"points": points}
138
+ else:
139
+ result = self.model.point(pil, text_piece)
140
+ out = {"points": result.get("points", [])}
141
+ elif task == "detect":
142
+ settings = {"max_objects": int(max_objects)} if max_objects else None
143
+ if prioritize_accuracy:
144
+ flipped = pil.transpose(Image.FLIP_LEFT_RIGHT)
145
+ res_orig = self.model.detect(pil, text_piece, settings=settings)
146
+ res_flip = self.model.detect(flipped, text_piece, settings=settings)
147
+ objects = self._tta_boxes(res_orig.get("objects", []), res_flip.get("objects", []))
148
+ out = {"objects": objects}
149
+ else:
150
+ result = self.model.detect(pil, text_piece, settings=settings)
151
+ out = {"objects": result.get("objects", [])}
152
+ elif task == "query":
153
+ result = self.model.query(pil, question=text_piece, reasoning=reasoning, stream=False)
154
+ out = {"answer": result.get("answer", "")}
155
+ else:
156
+ return {"error": f"Unsupported task '{task}'. Use 'point', 'detect', or 'query'."}
157
+ except Exception as e:
158
+ return {"error": f"Model inference failed: {e}"}
159
+
160
+ if width and height:
161
+ out.update({"width": width, "height": height})
162
+ out.update({"task": task})
163
+ return out
164
+
165
+ @staticmethod
166
+ def _flip_point(p: Dict[str, Any]) -> Dict[str, float]:
167
+ x = float(p.get("x", 0.0))
168
+ y = float(p.get("y", 0.0))
169
+ x = 1.0 - x
170
+ return {"x": max(0.0, min(1.0, x)), "y": max(0.0, min(1.0, y))}
171
+
172
+ @classmethod
173
+ def _deduplicate_and_average_points(cls, points: List[Dict[str, Any]], tol: float = 0.03) -> List[Dict[str, float]]:
174
+ clusters: List[Dict[str, float]] = []
175
+ counts: List[int] = []
176
+ for p in points:
177
+ px = float(p.get("x", 0.0))
178
+ py = float(p.get("y", 0.0))
179
+ matched = False
180
+ for i, c in enumerate(clusters):
181
+ dx = px - c["x"]
182
+ dy = py - c["y"]
183
+ if dx * dx + dy * dy <= tol * tol:
184
+ n = counts[i]
185
+ c["x"] = (c["x"] * n + px) / (n + 1)
186
+ c["y"] = (c["y"] * n + py) / (n + 1)
187
+ counts[i] = n + 1
188
+ matched = True
189
+ break
190
+ if not matched:
191
+ clusters.append({"x": px, "y": py})
192
+ counts.append(1)
193
+ return clusters
194
+
195
+ @classmethod
196
+ def _tta_points(cls, points_a: List[Dict[str, Any]], points_b_flipped: List[Dict[str, Any]]) -> List[Dict[str, float]]:
197
+ # Convert flipped prediction back to original frame
198
+ unflipped_b = [cls._flip_point(p) for p in points_b_flipped]
199
+ merged = list(points_a) + unflipped_b
200
+ return cls._deduplicate_and_average_points(merged)
201
+
202
+ @staticmethod
203
+ def _flip_box(b: Dict[str, Any]) -> Dict[str, float]:
204
+ xmin = float(b.get("x_min", 0.0))
205
+ xmax = float(b.get("x_max", 0.0))
206
+ ymin = float(b.get("y_min", 0.0))
207
+ ymax = float(b.get("y_max", 0.0))
208
+ nxmin = 1.0 - xmax
209
+ nxmax = 1.0 - xmin
210
+ nxmin, nxmax = max(0.0, min(1.0, nxmin)), max(0.0, min(1.0, nxmax))
211
+ ymin, ymax = max(0.0, min(1.0, ymin)), max(0.0, min(1.0, ymax))
212
+ if nxmin > nxmax:
213
+ nxmin, nxmax = nxmax, nxmin
214
+ return {"x_min": nxmin, "y_min": ymin, "x_max": nxmax, "y_max": ymax}
215
+
216
+ @staticmethod
217
+ def _iou(b1: Dict[str, float], b2: Dict[str, float]) -> float:
218
+ x1 = max(b1["x_min"], b2["x_min"])
219
+ y1 = max(b1["y_min"], b2["y_min"])
220
+ x2 = min(b1["x_max"], b2["x_max"])
221
+ y2 = min(b1["y_max"], b2["y_max"])
222
+ inter_w = max(0.0, x2 - x1)
223
+ inter_h = max(0.0, y2 - y1)
224
+ inter = inter_w * inter_h
225
+ a1 = max(0.0, b1["x_max"] - b1["x_min"]) * max(0.0, b1["y_max"] - b1["y_min"])
226
+ a2 = max(0.0, b2["x_max"] - b2["x_min"]) * max(0.0, b2["y_max"] - b2["y_min"])
227
+ denom = a1 + a2 - inter
228
+ return inter / denom if denom > 0 else 0.0
229
+
230
+ @classmethod
231
+ def _merge_boxes_with_nms(cls, boxes: List[Dict[str, float]], iou_threshold: float = 0.5) -> List[Dict[str, float]]:
232
+ merged: List[Dict[str, float]] = []
233
+ used = [False] * len(boxes)
234
+ for i in range(len(boxes)):
235
+ if used[i]:
236
+ continue
237
+ cluster = [boxes[i]]
238
+ used[i] = True
239
+ for j in range(i + 1, len(boxes)):
240
+ if used[j]:
241
+ continue
242
+ if cls._iou(boxes[i], boxes[j]) >= iou_threshold:
243
+ used[j] = True
244
+ cluster.append(boxes[j])
245
+ # Average cluster
246
+ n = float(len(cluster))
247
+ avg = {
248
+ "x_min": sum(b["x_min"] for b in cluster) / n,
249
+ "y_min": sum(b["y_min"] for b in cluster) / n,
250
+ "x_max": sum(b["x_max"] for b in cluster) / n,
251
+ "y_max": sum(b["y_max"] for b in cluster) / n,
252
+ }
253
+ # Clamp
254
+ avg["x_min"] = max(0.0, min(1.0, avg["x_min"]))
255
+ avg["y_min"] = max(0.0, min(1.0, avg["y_min"]))
256
+ avg["x_max"] = max(0.0, min(1.0, avg["x_max"]))
257
+ avg["y_max"] = max(0.0, min(1.0, avg["y_max"]))
258
+ merged.append(avg)
259
+ return merged
260
+
261
+ @classmethod
262
+ def _tta_boxes(cls, boxes_a: List[Dict[str, Any]], boxes_b_flipped: List[Dict[str, Any]]) -> List[Dict[str, float]]:
263
+ unflipped_b = [cls._flip_box(b) for b in boxes_b_flipped]
264
+ combined = [
265
+ {
266
+ "x_min": float(b.get("x_min", 0.0)),
267
+ "y_min": float(b.get("y_min", 0.0)),
268
+ "x_max": float(b.get("x_max", 0.0)),
269
+ "y_max": float(b.get("y_max", 0.0)),
270
+ }
271
+ for b in (list(boxes_a) + unflipped_b)
272
+ ]
273
+ return cls._merge_boxes_with_nms(combined, iou_threshold=0.5)
274
+
275
+
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ Pillow
3
+ transformers>=4.41
4
+ accelerate
5
+ tokenizers
6
+ numpy
7
+