harelcain commited on
Commit
77a1faf
·
verified ·
1 Parent(s): f0fed41

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +20 -0
  2. README.md +14 -5
  3. app.py +553 -0
  4. requirements.txt +7 -0
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ RUN apt-get update && apt-get install -y \
6
+ libgl1 \
7
+ libglib2.0-0 \
8
+ libsm6 \
9
+ libxext6 \
10
+ libxrender1 \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ COPY requirements.txt .
14
+ RUN pip install --no-cache-dir -r requirements.txt
15
+
16
+ COPY app.py .
17
+
18
+ EXPOSE 7860
19
+
20
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -1,10 +1,19 @@
1
  ---
2
- title: Kf Api
3
- emoji: 🌍
4
- colorFrom: gray
5
- colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Image Aligner API
3
+ emoji: 🎯
4
+ colorFrom: purple
5
+ colorTo: green
6
  sdk: docker
7
  pinned: false
8
  ---
9
 
10
+ # Image Aligner API
11
+
12
+ Geometric alignment with background-aware color matching.
13
+
14
+ ## API Endpoints
15
+
16
+ - `POST /api/align` - Returns aligned image as PNG
17
+ - `POST /api/align/base64` - Returns aligned image as base64 JSON
18
+
19
+ Dedicated with love and devotion to Alon Y., Daniel B., Denis Z., Tal S. and the rest of the Animation Taskforce 2026.
app.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Image Aligner - FastAPI Web Interface with API
4
+ Dedicated with love and devotion to Alon Y., Daniel B., Denis Z., Tal S.
5
+ and the rest of the Animation Taskforce 2026
6
+ """
7
+
8
+ import io
9
+ import base64
10
+ import warnings
11
+ import cv2
12
+ import numpy as np
13
+ from fastapi import FastAPI, File, UploadFile, HTTPException
14
+ from fastapi.responses import HTMLResponse, Response
15
+ from fastapi.middleware.cors import CORSMiddleware
16
+ from pydantic import BaseModel
17
+ from scipy.linalg import sqrtm, inv
18
+ from skimage import exposure
19
+ import uvicorn
20
+
21
+
22
+ # ============== Image Alignment Core ==============
23
+
24
+ def extract_features(img: np.ndarray) -> tuple:
25
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
26
+ sift = cv2.SIFT_create(nfeatures=10000, contrastThreshold=0.02, edgeThreshold=15)
27
+ keypoints, descriptors = sift.detectAndCompute(gray, None)
28
+ return keypoints, descriptors
29
+
30
+
31
+ def match_features(desc1: np.ndarray, desc2: np.ndarray, ratio_thresh: float = 0.85) -> list:
32
+ if desc1 is None or desc2 is None:
33
+ return []
34
+ FLANN_INDEX_KDTREE = 1
35
+ index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
36
+ search_params = dict(checks=150)
37
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
38
+ try:
39
+ matches = flann.knnMatch(desc1, desc2, k=2)
40
+ except cv2.error:
41
+ return []
42
+ good_matches = []
43
+ for match_pair in matches:
44
+ if len(match_pair) == 2:
45
+ m, n = match_pair
46
+ if m.distance < ratio_thresh * n.distance:
47
+ good_matches.append(m)
48
+ return good_matches
49
+
50
+
51
+ def compute_homography(kp1, kp2, matches, ransac_reproj_thresh=8.0, confidence=0.9999):
52
+ if len(matches) < 4:
53
+ return None, None
54
+ src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
55
+ dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
56
+ H, mask = cv2.findHomography(
57
+ src_pts, dst_pts,
58
+ method=cv2.USAC_MAGSAC,
59
+ ransacReprojThreshold=ransac_reproj_thresh,
60
+ maxIters=10000,
61
+ confidence=confidence
62
+ )
63
+ return H, mask
64
+
65
+
66
+ def create_inlier_mask(keypoints, matches, inlier_mask, image_shape, radius=50):
67
+ h, w = image_shape[:2]
68
+ mask = np.zeros((h, w), dtype=bool)
69
+ for i, m in enumerate(matches):
70
+ if inlier_mask[i]:
71
+ pt = keypoints[m.trainIdx].pt
72
+ x, y = int(pt[0]), int(pt[1])
73
+ y_min, y_max = max(0, y - radius), min(h, y + radius + 1)
74
+ x_min, x_max = max(0, x - radius), min(w, x + radius + 1)
75
+ yy, xx = np.ogrid[y_min:y_max, x_min:x_max]
76
+ circle = (xx - x) ** 2 + (yy - y) ** 2 <= radius ** 2
77
+ mask[y_min:y_max, x_min:x_max] |= circle
78
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (radius, radius))
79
+ mask = cv2.dilate(mask.astype(np.uint8), kernel, iterations=2).astype(bool)
80
+ return mask
81
+
82
+
83
+ def _build_histogram_lookup(src_channel, tgt_channel, n_bins=256):
84
+ src_hist, _ = np.histogram(src_channel.flatten(), bins=n_bins, range=(0, 256))
85
+ tgt_hist, _ = np.histogram(tgt_channel.flatten(), bins=n_bins, range=(0, 256))
86
+ src_cdf = np.cumsum(src_hist).astype(np.float64)
87
+ src_cdf = src_cdf / (src_cdf[-1] + 1e-10)
88
+ tgt_cdf = np.cumsum(tgt_hist).astype(np.float64)
89
+ tgt_cdf = tgt_cdf / (tgt_cdf[-1] + 1e-10)
90
+ lookup = np.zeros(n_bins, dtype=np.uint8)
91
+ tgt_idx = 0
92
+ for src_idx in range(n_bins):
93
+ while tgt_idx < n_bins - 1 and tgt_cdf[tgt_idx] < src_cdf[src_idx]:
94
+ tgt_idx += 1
95
+ lookup[src_idx] = tgt_idx
96
+ return lookup
97
+
98
+
99
+ def _build_histogram_lookup_float(src_channel, tgt_channel, n_bins=256):
100
+ src_hist, _ = np.histogram(src_channel.flatten(), bins=n_bins, range=(0, 256))
101
+ tgt_hist, _ = np.histogram(tgt_channel.flatten(), bins=n_bins, range=(0, 256))
102
+ src_cdf = np.cumsum(src_hist).astype(np.float64)
103
+ src_cdf = src_cdf / (src_cdf[-1] + 1e-10)
104
+ tgt_cdf = np.cumsum(tgt_hist).astype(np.float64)
105
+ tgt_cdf = tgt_cdf / (tgt_cdf[-1] + 1e-10)
106
+ lookup = np.zeros(n_bins, dtype=np.float32)
107
+ tgt_idx = 0
108
+ for src_idx in range(n_bins):
109
+ while tgt_idx < n_bins - 1 and tgt_cdf[tgt_idx] < src_cdf[src_idx]:
110
+ tgt_idx += 1
111
+ lookup[src_idx] = tgt_idx
112
+ return lookup
113
+
114
+
115
+ def histogram_matching_lab(source, target, mask=None):
116
+ source_lab = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(np.float32)
117
+ target_lab = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32)
118
+ if mask is None:
119
+ matched_lab = np.zeros_like(source_lab)
120
+ for i in range(3):
121
+ matched_lab[:, :, i] = exposure.match_histograms(source_lab[:, :, i], target_lab[:, :, i])
122
+ else:
123
+ matched_lab = np.zeros_like(source_lab)
124
+ for i in range(3):
125
+ src_masked = source_lab[:, :, i][mask]
126
+ tgt_masked = target_lab[:, :, i][mask]
127
+ lookup = _build_histogram_lookup_float(src_masked, tgt_masked)
128
+ src_channel = source_lab[:, :, i]
129
+ src_floor = np.floor(src_channel).astype(np.int32)
130
+ src_ceil = np.minimum(src_floor + 1, 255)
131
+ src_frac = src_channel - src_floor
132
+ src_floor = np.clip(src_floor, 0, 255)
133
+ matched_lab[:, :, i] = (1 - src_frac) * lookup[src_floor] + src_frac * lookup[src_ceil]
134
+ matched_lab = np.clip(matched_lab, 0, 255).astype(np.uint8)
135
+ return cv2.cvtColor(matched_lab, cv2.COLOR_LAB2BGR)
136
+
137
+
138
+ def histogram_matching_rgb(source, target, mask=None):
139
+ if mask is None:
140
+ matched = np.zeros_like(source)
141
+ for i in range(3):
142
+ matched[:, :, i] = exposure.match_histograms(source[:, :, i], target[:, :, i])
143
+ return matched
144
+ matched = np.zeros_like(source)
145
+ for i in range(3):
146
+ src_masked = source[:, :, i][mask]
147
+ tgt_masked = target[:, :, i][mask]
148
+ lookup = _build_histogram_lookup(src_masked, tgt_masked)
149
+ matched[:, :, i] = lookup[source[:, :, i]]
150
+ return matched
151
+
152
+
153
+ def piecewise_linear_histogram_transfer(source, target, n_bins=256, mask=None):
154
+ result = np.zeros_like(source, dtype=np.float32)
155
+ for c in range(3):
156
+ if mask is not None:
157
+ src_channel = source[:, :, c][mask].astype(np.float32)
158
+ tgt_channel = target[:, :, c][mask].astype(np.float32)
159
+ else:
160
+ src_channel = source[:, :, c].flatten().astype(np.float32)
161
+ tgt_channel = target[:, :, c].flatten().astype(np.float32)
162
+ src_hist, _ = np.histogram(src_channel, bins=n_bins, range=(0, 256))
163
+ tgt_hist, _ = np.histogram(tgt_channel, bins=n_bins, range=(0, 256))
164
+ src_cdf = np.cumsum(src_hist).astype(np.float64)
165
+ src_cdf = src_cdf / (src_cdf[-1] + 1e-10)
166
+ tgt_cdf = np.cumsum(tgt_hist).astype(np.float64)
167
+ tgt_cdf = tgt_cdf / (tgt_cdf[-1] + 1e-10)
168
+ lookup = np.zeros(n_bins, dtype=np.float32)
169
+ tgt_idx = 0
170
+ for src_idx in range(n_bins):
171
+ while tgt_idx < n_bins - 1 and tgt_cdf[tgt_idx] < src_cdf[src_idx]:
172
+ tgt_idx += 1
173
+ lookup[src_idx] = tgt_idx
174
+ src_img = source[:, :, c].astype(np.float32)
175
+ src_floor = np.floor(src_img).astype(np.int32)
176
+ src_ceil = np.minimum(src_floor + 1, n_bins - 1)
177
+ src_frac = src_img - src_floor
178
+ src_floor = np.clip(src_floor, 0, n_bins - 1)
179
+ result[:, :, c] = (1 - src_frac) * lookup[src_floor] + src_frac * lookup[src_ceil]
180
+ return np.clip(result, 0, 255).astype(np.uint8)
181
+
182
+
183
+ def full_histogram_matching(source, target, mask=None):
184
+ lab_matched = histogram_matching_lab(source, target, mask)
185
+ cdf_matched = piecewise_linear_histogram_transfer(source, target, mask=mask)
186
+ multi_matched = histogram_matching_rgb(source, target, mask)
187
+ result = (0.5 * lab_matched.astype(np.float32) +
188
+ 0.3 * cdf_matched.astype(np.float32) +
189
+ 0.2 * multi_matched.astype(np.float32))
190
+ return np.clip(result, 0, 255).astype(np.uint8)
191
+
192
+
193
+ def align_image(source_img, target_img):
194
+ target_h, target_w = target_img.shape[:2]
195
+ target_size = (target_w, target_h)
196
+ source_resized = cv2.resize(source_img, target_size, interpolation=cv2.INTER_LANCZOS4)
197
+
198
+ kp_src, desc_src = extract_features(source_resized)
199
+ kp_tgt, desc_tgt = extract_features(target_img)
200
+ matches = match_features(desc_src, desc_tgt)
201
+
202
+ color_mask = None
203
+ if len(matches) >= 4:
204
+ H, mask = compute_homography(kp_src, kp_tgt, matches)
205
+ if H is not None and mask is not None:
206
+ inlier_mask = mask.ravel()
207
+ aligned = cv2.warpPerspective(source_resized, H, target_size,
208
+ flags=cv2.INTER_LANCZOS4,
209
+ borderMode=cv2.BORDER_REPLICATE)
210
+ color_mask = create_inlier_mask(kp_tgt, matches, inlier_mask,
211
+ target_img.shape, radius=50)
212
+ else:
213
+ aligned = source_resized
214
+ else:
215
+ aligned = source_resized
216
+
217
+ result = full_histogram_matching(aligned, target_img, mask=color_mask)
218
+ return result
219
+
220
+
221
+ # ============== FastAPI App ==============
222
+
223
+ app = FastAPI(title="Image Aligner API")
224
+
225
+ app.add_middleware(
226
+ CORSMiddleware,
227
+ allow_origins=["*"],
228
+ allow_credentials=True,
229
+ allow_methods=["*"],
230
+ allow_headers=["*"],
231
+ )
232
+
233
+
234
+ def decode_image(data: bytes) -> np.ndarray:
235
+ img_array = np.frombuffer(data, dtype=np.uint8)
236
+ img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
237
+ return img
238
+
239
+
240
+ def encode_image_png(img: np.ndarray) -> bytes:
241
+ _, buffer = cv2.imencode('.png', img)
242
+ return buffer.tobytes()
243
+
244
+
245
+ @app.post("/api/align")
246
+ async def align_api(
247
+ source: UploadFile = File(..., description="Source image to align"),
248
+ target: UploadFile = File(..., description="Target reference image")
249
+ ):
250
+ """
251
+ Align source image to target image.
252
+ Returns the aligned image as PNG.
253
+ """
254
+ try:
255
+ source_data = await source.read()
256
+ target_data = await target.read()
257
+
258
+ source_img = decode_image(source_data)
259
+ target_img = decode_image(target_data)
260
+
261
+ if source_img is None or target_img is None:
262
+ raise HTTPException(status_code=400, detail="Failed to decode images")
263
+
264
+ aligned = align_image(source_img, target_img)
265
+ png_bytes = encode_image_png(aligned)
266
+
267
+ return Response(content=png_bytes, media_type="image/png")
268
+
269
+ except Exception as e:
270
+ raise HTTPException(status_code=500, detail=str(e))
271
+
272
+
273
+ @app.post("/api/align/base64")
274
+ async def align_base64_api(
275
+ source: UploadFile = File(...),
276
+ target: UploadFile = File(...)
277
+ ):
278
+ """
279
+ Align source image to target image.
280
+ Returns the aligned image as base64-encoded PNG.
281
+ """
282
+ try:
283
+ source_data = await source.read()
284
+ target_data = await target.read()
285
+
286
+ source_img = decode_image(source_data)
287
+ target_img = decode_image(target_data)
288
+
289
+ if source_img is None or target_img is None:
290
+ raise HTTPException(status_code=400, detail="Failed to decode images")
291
+
292
+ aligned = align_image(source_img, target_img)
293
+ png_bytes = encode_image_png(aligned)
294
+ b64 = base64.b64encode(png_bytes).decode('utf-8')
295
+
296
+ return {"image": f"data:image/png;base64,{b64}"}
297
+
298
+ except Exception as e:
299
+ raise HTTPException(status_code=500, detail=str(e))
300
+
301
+
302
+ HTML_CONTENT = """
303
+ <!DOCTYPE html>
304
+ <html lang="en">
305
+ <head>
306
+ <meta charset="UTF-8">
307
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
308
+ <title>Image Aligner</title>
309
+ <style>
310
+ * { margin: 0; padding: 0; box-sizing: border-box; }
311
+ body {
312
+ font-family: 'Segoe UI', system-ui, sans-serif;
313
+ background: linear-gradient(135deg, #1a1a2e 0%, #16213e 50%, #0f3460 100%);
314
+ min-height: 100vh;
315
+ color: #e8e8e8;
316
+ padding: 2rem;
317
+ }
318
+ .dedication {
319
+ text-align: center;
320
+ padding: 2rem;
321
+ background: linear-gradient(135deg, rgba(255, 121, 198, 0.15), rgba(139, 233, 253, 0.15));
322
+ border-radius: 12px;
323
+ margin-bottom: 2rem;
324
+ }
325
+ .dedication h2 { font-size: 1.2rem; font-weight: 300; margin-bottom: 0.5rem; }
326
+ .dedication .names {
327
+ font-size: 1.5rem;
328
+ font-weight: 700;
329
+ background: linear-gradient(90deg, #ff79c6, #ffb86c, #8be9fd, #50fa7b);
330
+ -webkit-background-clip: text;
331
+ -webkit-text-fill-color: transparent;
332
+ }
333
+ .dedication .team { font-size: 1.1rem; color: #8be9fd; margin-top: 0.5rem; }
334
+ .container { max-width: 1200px; margin: 0 auto; }
335
+ h1 { text-align: center; margin-bottom: 0.5rem; font-weight: 300; font-size: 2.5rem; }
336
+ .subtitle { text-align: center; color: #888; margin-bottom: 2rem; }
337
+ .upload-grid { display: grid; grid-template-columns: 1fr 1fr; gap: 2rem; margin-bottom: 2rem; }
338
+ .upload-box {
339
+ background: rgba(255,255,255,0.03);
340
+ border: 2px dashed rgba(255,255,255,0.2);
341
+ border-radius: 12px;
342
+ padding: 2rem;
343
+ text-align: center;
344
+ cursor: pointer;
345
+ transition: all 0.3s;
346
+ min-height: 250px;
347
+ display: flex;
348
+ flex-direction: column;
349
+ align-items: center;
350
+ justify-content: center;
351
+ }
352
+ .upload-box:hover { border-color: rgba(255,255,255,0.4); background: rgba(255,255,255,0.05); }
353
+ .upload-box.has-image { padding: 1rem; }
354
+ .upload-box img { max-width: 100%; max-height: 200px; border-radius: 8px; }
355
+ .upload-box input { display: none; }
356
+ .upload-box h3 { margin-bottom: 0.5rem; }
357
+ .upload-box.source h3 { color: #8be9fd; }
358
+ .upload-box.target h3 { color: #ffb86c; }
359
+ .btn {
360
+ display: block;
361
+ width: 100%;
362
+ max-width: 300px;
363
+ margin: 0 auto 2rem;
364
+ padding: 1rem 2rem;
365
+ font-size: 1.1rem;
366
+ font-weight: 600;
367
+ border: none;
368
+ border-radius: 8px;
369
+ background: linear-gradient(135deg, #50fa7b, #00d9ff);
370
+ color: #1a1a2e;
371
+ cursor: pointer;
372
+ transition: all 0.3s;
373
+ }
374
+ .btn:hover:not(:disabled) { transform: translateY(-2px); box-shadow: 0 10px 30px rgba(80,250,123,0.3); }
375
+ .btn:disabled { opacity: 0.5; cursor: not-allowed; }
376
+ .result { text-align: center; display: none; }
377
+ .result.show { display: block; }
378
+ .result img { max-width: 100%; border-radius: 8px; margin: 1rem 0; }
379
+ .result a {
380
+ display: inline-block;
381
+ padding: 0.8rem 2rem;
382
+ background: rgba(255,255,255,0.1);
383
+ color: #fff;
384
+ text-decoration: none;
385
+ border-radius: 8px;
386
+ margin-top: 1rem;
387
+ }
388
+ .loading { display: none; text-align: center; padding: 2rem; }
389
+ .loading.show { display: block; }
390
+ .spinner {
391
+ width: 50px; height: 50px;
392
+ border: 3px solid rgba(255,255,255,0.1);
393
+ border-top-color: #50fa7b;
394
+ border-radius: 50%;
395
+ animation: spin 1s linear infinite;
396
+ margin: 0 auto 1rem;
397
+ }
398
+ @keyframes spin { to { transform: rotate(360deg); } }
399
+ .api-docs {
400
+ background: rgba(255,255,255,0.03);
401
+ border-radius: 12px;
402
+ padding: 2rem;
403
+ margin-top: 3rem;
404
+ }
405
+ .api-docs h2 { margin-bottom: 1rem; color: #50fa7b; }
406
+ .api-docs pre {
407
+ background: rgba(0,0,0,0.3);
408
+ padding: 1rem;
409
+ border-radius: 8px;
410
+ overflow-x: auto;
411
+ font-size: 0.9rem;
412
+ }
413
+ .api-docs code { color: #8be9fd; }
414
+ @media (max-width: 768px) { .upload-grid { grid-template-columns: 1fr; } }
415
+ </style>
416
+ </head>
417
+ <body>
418
+ <div class="container">
419
+ <div class="dedication">
420
+ <h2>Dedicated with ♥ love and devotion to</h2>
421
+ <div class="names">Alon Y., Daniel B., Denis Z., Tal S.</div>
422
+ <div class="team">and the rest of the Animation Taskforce 2026</div>
423
+ </div>
424
+
425
+ <h1>🎯 Image Aligner</h1>
426
+ <p class="subtitle">Geometric alignment with background-aware color matching</p>
427
+
428
+ <div class="upload-grid">
429
+ <div class="upload-box source" onclick="document.getElementById('sourceInput').click()">
430
+ <input type="file" id="sourceInput" accept="image/*">
431
+ <h3>📷 Source Image</h3>
432
+ <p>Click to upload</p>
433
+ </div>
434
+ <div class="upload-box target" onclick="document.getElementById('targetInput').click()">
435
+ <input type="file" id="targetInput" accept="image/*">
436
+ <h3>🎯 Target Reference</h3>
437
+ <p>Click to upload</p>
438
+ </div>
439
+ </div>
440
+
441
+ <button class="btn" id="alignBtn" disabled onclick="alignImages()">✨ Align Images</button>
442
+
443
+ <div class="loading" id="loading">
444
+ <div class="spinner"></div>
445
+ <p>Aligning images...</p>
446
+ </div>
447
+
448
+ <div class="result" id="result">
449
+ <h2>✨ Aligned Result</h2>
450
+ <img id="resultImg" src="">
451
+ <br>
452
+ <a id="downloadLink" download="aligned.png">Download Aligned Image</a>
453
+ </div>
454
+
455
+ <div class="api-docs">
456
+ <h2>📡 API Usage</h2>
457
+ <p>POST to <code>/api/align</code> with multipart form data:</p>
458
+ <pre><code>// JavaScript (fetch)
459
+ const formData = new FormData();
460
+ formData.append('source', sourceFile);
461
+ formData.append('target', targetFile);
462
+
463
+ const response = await fetch('/api/align', {
464
+ method: 'POST',
465
+ body: formData
466
+ });
467
+ const blob = await response.blob();
468
+ const url = URL.createObjectURL(blob);
469
+
470
+ // Or use /api/align/base64 to get base64 response:
471
+ const response = await fetch('/api/align/base64', {
472
+ method: 'POST',
473
+ body: formData
474
+ });
475
+ const data = await response.json();
476
+ console.log(data.image); // data:image/png;base64,...</code></pre>
477
+ </div>
478
+ </div>
479
+
480
+ <script>
481
+ let sourceFile = null;
482
+ let targetFile = null;
483
+
484
+ document.getElementById('sourceInput').onchange = (e) => {
485
+ sourceFile = e.target.files[0];
486
+ showPreview('source', sourceFile);
487
+ updateButton();
488
+ };
489
+
490
+ document.getElementById('targetInput').onchange = (e) => {
491
+ targetFile = e.target.files[0];
492
+ showPreview('target', targetFile);
493
+ updateButton();
494
+ };
495
+
496
+ function showPreview(type, file) {
497
+ const box = document.querySelector(`.upload-box.${type}`);
498
+ const reader = new FileReader();
499
+ reader.onload = (e) => {
500
+ box.innerHTML = `<img src="${e.target.result}">`;
501
+ box.classList.add('has-image');
502
+ };
503
+ reader.readAsDataURL(file);
504
+ }
505
+
506
+ function updateButton() {
507
+ document.getElementById('alignBtn').disabled = !(sourceFile && targetFile);
508
+ }
509
+
510
+ async function alignImages() {
511
+ const loading = document.getElementById('loading');
512
+ const result = document.getElementById('result');
513
+
514
+ loading.classList.add('show');
515
+ result.classList.remove('show');
516
+
517
+ try {
518
+ const formData = new FormData();
519
+ formData.append('source', sourceFile);
520
+ formData.append('target', targetFile);
521
+
522
+ const response = await fetch('/api/align', {
523
+ method: 'POST',
524
+ body: formData
525
+ });
526
+
527
+ if (!response.ok) throw new Error('Alignment failed');
528
+
529
+ const blob = await response.blob();
530
+ const url = URL.createObjectURL(blob);
531
+
532
+ document.getElementById('resultImg').src = url;
533
+ document.getElementById('downloadLink').href = url;
534
+ result.classList.add('show');
535
+ } catch (err) {
536
+ alert('Error: ' + err.message);
537
+ } finally {
538
+ loading.classList.remove('show');
539
+ }
540
+ }
541
+ </script>
542
+ </body>
543
+ </html>
544
+ """
545
+
546
+
547
+ @app.get("/", response_class=HTMLResponse)
548
+ async def root():
549
+ return HTML_CONTENT
550
+
551
+
552
+ if __name__ == "__main__":
553
+ uvicorn.run(app, host="0.0.0.0", port=7860)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ python-multipart
4
+ opencv-python-headless>=4.8.0
5
+ numpy>=1.24.0
6
+ scipy>=1.11.0
7
+ scikit-image>=0.21.0