Nekochu commited on
Commit
6ecf224
·
verified ·
1 Parent(s): 251e13e

Initial commit

Browse files
README.md CHANGED
@@ -1,12 +1,21 @@
1
  ---
2
  title: DeepMosaics
3
- emoji:
4
- colorFrom: pink
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 6.1.0
8
  app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
1
  ---
2
  title: DeepMosaics
3
+ emoji: 🔲👁️
4
+ colorFrom: blue
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 6.1.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
+ short_description: Add or remove mosaics from images and using AI segmentation.
12
  ---
13
 
14
+ onnx_models:
15
+ - add_face.onnx: Add mosaic to face
16
+ - mosaic_position.onnx: Detect mosaics
17
+ - clean_face_HD.onnx (int8): Face removal
18
+ - clean_youknow_img.onnx: Body/General (images)
19
+ - clean_youknow_video.onnx: Body/General (videos)
20
+
21
+ Credit: https://github.com/HypoX64/DeepMosaics
app.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepMosaics - Add/remove mosaics from images/videos using AI.
3
+ https://github.com/HypoX64/DeepMosaics
4
+ """
5
+ import os
6
+ import numpy as np
7
+ import cv2
8
+ import onnxruntime as ort
9
+
10
+ ONNX_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "onnx_models")
11
+ VIDEO_EXTS = ['.mp4', '.avi', '.mov', '.mkv', '.webm', '.gif']
12
+ sessions = {}
13
+
14
+ def get_session(name):
15
+ if name not in sessions:
16
+ path = os.path.join(ONNX_DIR, f"{name}.onnx")
17
+ if not os.path.exists(path):
18
+ raise FileNotFoundError(f"Model not found: {path}")
19
+ sessions[name] = ort.InferenceSession(path, providers=['CPUExecutionProvider'])
20
+ return sessions[name]
21
+
22
+ # ============ Segmentation ============
23
+
24
+ def run_segment(img, model, size=360):
25
+ sess = get_session(model)
26
+ resized = cv2.resize(img, (size, size)).astype(np.float32) / 255.0
27
+ tensor = np.transpose(resized, (2, 0, 1))[np.newaxis]
28
+ out = sess.run(None, {'input': tensor})[0].squeeze()
29
+ return (out * 255).clip(0, 255).astype(np.uint8)
30
+
31
+ def get_all_regions(img, model, threshold=127, ex_mul=1.5, all_areas=False):
32
+ """Get detected mosaic regions with repo-style detection. Returns (regions, mask)"""
33
+ h, w = img.shape[:2]
34
+ mask_raw = run_segment(img, model)
35
+
36
+ # Repo-style mask processing
37
+ ex_mun = max(1, int(min(h, w) / 20))
38
+ mask = cv2.threshold(mask_raw, threshold, 255, cv2.THRESH_BINARY)[1]
39
+ mask = cv2.blur(mask, (ex_mun, ex_mun))
40
+ mask = cv2.threshold(mask, int(threshold / 5), 255, cv2.THRESH_BINARY)[1]
41
+
42
+ # Find most likely ROI (largest contour) - like repo's find_mostlikely_ROI
43
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
44
+ if not all_areas and contours:
45
+ # Keep only largest contour
46
+ areas = [cv2.contourArea(c) for c in contours]
47
+ if areas:
48
+ largest_idx = areas.index(max(areas))
49
+ mask = np.zeros_like(mask)
50
+ cv2.fillPoly(mask, [contours[largest_idx]], 255)
51
+ contours = [contours[largest_idx]]
52
+
53
+ regions = []
54
+ rat = min(h, w) / 360.0
55
+
56
+ for c in contours:
57
+ if cv2.contourArea(c) < 50:
58
+ continue
59
+ x, y, bw, bh = cv2.boundingRect(c)
60
+ cx, cy = x + bw // 2, y + bh // 2
61
+ size_orig = max(bw, bh)
62
+
63
+ # Scale to original and apply Ex_mul expansion
64
+ cx = int(cx * rat)
65
+ cy = int(cy * rat)
66
+ halfsize = int(size_orig * rat * ex_mul / 2)
67
+
68
+ # Clamp to image bounds
69
+ halfsize = max(15, min(halfsize, min(h, w) // 2 - 1))
70
+ cx = max(halfsize, min(cx, w - halfsize))
71
+ cy = max(halfsize, min(cy, h - halfsize))
72
+ regions.append((cx, cy, halfsize))
73
+ return regions, mask
74
+
75
+ def get_region(img, model):
76
+ # add_youknow has weaker detection, use lower threshold
77
+ threshold = 20 if model == "add_youknow" else 127
78
+ regions, _ = get_all_regions(img, model, threshold=threshold)
79
+ return max(regions, key=lambda r: r[2]) if regions else (0, 0, 0)
80
+
81
+ # ============ Cleaning ============
82
+
83
+ def run_clean(crop, model, size):
84
+ sess = get_session(model)
85
+ img = cv2.resize(crop, (size, size))
86
+ img = img[:, :, ::-1] # BGR to RGB (model expects RGB)
87
+ img = img.astype(np.float32) / 255.0 * 2 - 1
88
+ img = np.transpose(img, (2, 0, 1))[np.newaxis]
89
+ out = sess.run(None, {'input': img})[0].squeeze()
90
+ out = np.transpose(out, (1, 2, 0))
91
+ out = ((out + 1) / 2 * 255).clip(0, 255).astype(np.uint8)
92
+ return out[:, :, ::-1] # RGB to BGR
93
+
94
+ def run_clean_video(crops, prev_frame):
95
+ """Run video model (5-frame input for temporal consistency)"""
96
+ sess = get_session("clean_youknow_video")
97
+ size = 256
98
+ frames = []
99
+ for crop in crops:
100
+ img = cv2.resize(crop, (size, size))[:, :, ::-1] # BGR to RGB
101
+ img = img.astype(np.float32) / 255.0 * 2 - 1
102
+ frames.append(np.transpose(img, (2, 0, 1)))
103
+ stream = np.stack(frames, axis=1)[np.newaxis] # [1, 3, 5, 256, 256]
104
+
105
+ if prev_frame is None:
106
+ prev = np.zeros((1, 3, size, size), dtype=np.float32)
107
+ else:
108
+ p = cv2.resize(prev_frame, (size, size))[:, :, ::-1]
109
+ p = p.astype(np.float32) / 255.0 * 2 - 1
110
+ prev = np.transpose(p, (2, 0, 1))[np.newaxis]
111
+
112
+ out = sess.run(None, {'input': stream, 'prev_frame': prev})[0].squeeze()
113
+ out = np.transpose(out, (1, 2, 0))
114
+ out = ((out + 1) / 2 * 255).clip(0, 255).astype(np.uint8)
115
+ return out[:, :, ::-1] # RGB to BGR
116
+
117
+ def blend(img, fake, x, y, size, seg_mask=None):
118
+ """Blend fake into img using segmentation mask (repo-style)"""
119
+ h, w = img.shape[:2]
120
+ fake = cv2.resize(fake, (size * 2, size * 2), interpolation=cv2.INTER_CUBIC)
121
+ y1, y2, x1, x2 = y - size, y + size, x - size, x + size
122
+ if y1 < 0 or x1 < 0 or y2 > h or x2 > w:
123
+ return img
124
+
125
+ # Use segmentation mask if provided, else use box mask
126
+ if seg_mask is not None:
127
+ # Resize mask to original image size and crop
128
+ mask_full = cv2.resize(seg_mask, (w, h))
129
+ mask_crop = mask_full[y1:y2, x1:x2]
130
+ else:
131
+ mask_crop = np.ones((size*2, size*2), dtype=np.uint8) * 255
132
+
133
+ # Feathering (eclosion like repo)
134
+ eclosion_num = int(size / 10) + 2
135
+ mask_crop = cv2.blur(mask_crop, (eclosion_num, eclosion_num))
136
+ mask_crop = mask_crop.astype(np.float32) / 255.0
137
+ mask_crop = np.stack([mask_crop]*3, axis=-1)
138
+
139
+ crop = img[y1:y2, x1:x2].astype(np.float32)
140
+ img[y1:y2, x1:x2] = np.clip(crop * (1 - mask_crop) + fake.astype(np.float32) * mask_crop, 0, 255).astype(np.uint8)
141
+ return img
142
+
143
+ def addmosaic_base(img, mask, n, model='squa_avg', feather=0):
144
+ """Repo-style mosaic adding (squa_avg with feather)"""
145
+ n = int(max(1, n))
146
+ h, w = img.shape[:2]
147
+ if mask.shape[0] != h:
148
+ mask = cv2.resize(mask, (w, h))
149
+ img_mosaic = img.copy()
150
+
151
+ h_step = h // n
152
+ w_step = w // n
153
+ pix_mid_h = n // 2
154
+ pix_mid_w = n // 2
155
+
156
+ # squa_avg: fill each block with average color
157
+ for i in range(h_step):
158
+ for j in range(w_step):
159
+ if mask[min(i*n + pix_mid_h, h-1), min(j*n + pix_mid_w, w-1)] > 0:
160
+ block = img[i*n:(i+1)*n, j*n:(j+1)*n, :]
161
+ if block.size > 0:
162
+ img_mosaic[i*n:(i+1)*n, j*n:(j+1)*n, :] = block.mean(axis=(0,1))
163
+
164
+ # Feathering for smooth edges
165
+ if feather >= 0:
166
+ blur_size = n if feather == 0 else feather
167
+ mask_blur = cv2.blur(mask.astype(np.float32), (blur_size, blur_size)) / 255.0
168
+ for i in range(3):
169
+ img_mosaic[:,:,i] = (img[:,:,i] * (1 - mask_blur) + img_mosaic[:,:,i] * mask_blur)
170
+ img_mosaic = img_mosaic.astype(np.uint8)
171
+
172
+ return img_mosaic
173
+
174
+ def get_mosaic_autosize(img, mask):
175
+ """Calculate mosaic size based on mask area (repo-style)"""
176
+ h, w = img.shape[:2]
177
+ size = min(h, w)
178
+ mask_resized = cv2.resize(mask, (size, size))
179
+ alpha = size / 512
180
+
181
+ # Calculate mask area
182
+ area = np.sum(mask_resized > 127)
183
+ area = area / (alpha * alpha)
184
+
185
+ if area > 50000:
186
+ mosaic_size = alpha * ((area - 50000) / 50000 + 12)
187
+ elif 20000 < area <= 50000:
188
+ mosaic_size = alpha * ((area - 20000) / 30000 + 8)
189
+ elif 5000 < area <= 20000:
190
+ mosaic_size = alpha * ((area - 5000) / 20000 + 7)
191
+ elif 0 <= area <= 5000:
192
+ mosaic_size = alpha * (area / 5000 + 6)
193
+ else:
194
+ mosaic_size = 7
195
+ return max(3, mosaic_size)
196
+
197
+ def add_mosaic_mask(img, model, threshold=20):
198
+ """Add mosaic using mask (repo-style for body/general mode)"""
199
+ h, w = img.shape[:2]
200
+ mask = run_segment(img, model)
201
+ mask = cv2.threshold(mask, threshold, 255, cv2.THRESH_BINARY)[1]
202
+ mask = cv2.resize(mask, (w, h))
203
+
204
+ mosaic_size = get_mosaic_autosize(img, mask)
205
+ return addmosaic_base(img, mask, mosaic_size, model='squa_avg', feather=0)
206
+
207
+ def pixelate(img, x, y, size, block=7):
208
+ y1, y2, x1, x2 = y - size, y + size, x - size, x + size
209
+ if y1 < 0 or x1 < 0 or y2 > img.shape[0] or x2 > img.shape[1]:
210
+ return img
211
+ region = img[y1:y2, x1:x2]
212
+ rh, rw = region.shape[:2]
213
+ if rh <= 0 or rw <= 0:
214
+ return img
215
+ small = cv2.resize(region, (max(1, rw//block), max(1, rh//block)), interpolation=cv2.INTER_LINEAR)
216
+ img[y1:y2, x1:x2] = cv2.resize(small, (rw, rh), interpolation=cv2.INTER_NEAREST)
217
+ return img
218
+
219
+ # ============ Processing ============
220
+
221
+ def process_image(img_bgr, action, mode="face"):
222
+ result = img_bgr.copy()
223
+ if action == "add":
224
+ if mode == "face":
225
+ x, y, size = get_region(img_bgr, "add_face")
226
+ if size >= 10:
227
+ result = pixelate(result, x, y, size)
228
+ else:
229
+ # Body mode: use mask-based mosaic (like repo)
230
+ result = add_mosaic_mask(img_bgr, "add_youknow")
231
+ else:
232
+ # Face mode uses larger expansion for better coverage
233
+ ex_mul = 2.0 if mode == "face" else 1.5
234
+ regions, seg_mask = get_all_regions(img_bgr, "mosaic_position", ex_mul=ex_mul)
235
+ for x, y, size in regions:
236
+ if size < 10:
237
+ continue
238
+ crop = result[y-size:y+size, x-size:x+size]
239
+ if crop.size == 0:
240
+ continue
241
+ if mode == "face":
242
+ fake = run_clean(crop, "clean_face_HD", 512)
243
+ else:
244
+ # Use video model for body/general (better quality than img model)
245
+ crops = [crop] * 5
246
+ fake = run_clean_video(crops, None)
247
+ result = blend(result, fake, x, y, size, seg_mask)
248
+ return result
249
+
250
+ def process_video(video_path, action, mode="face"):
251
+ import tempfile
252
+ if not video_path:
253
+ return None
254
+ cap = cv2.VideoCapture(video_path)
255
+ if not cap.isOpened():
256
+ return None
257
+ fps = cap.get(cv2.CAP_PROP_FPS) or 30
258
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
259
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
260
+ out_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
261
+ out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
262
+
263
+ # For body/general video removal, use video model with 5-frame input
264
+ if action == "remove" and mode == "body":
265
+ frames, regions = [], []
266
+ while True:
267
+ ret, frame = cap.read()
268
+ if not ret:
269
+ break
270
+ frames.append(frame)
271
+ regs, _ = get_all_regions(frame, "mosaic_position")
272
+ regions.append(regs)
273
+
274
+ prev_output = None
275
+ for i, frame in enumerate(frames):
276
+ result = frame.copy()
277
+ for x, y, size in regions[i]:
278
+ if size < 10:
279
+ continue
280
+ # Get 5 crops centered on frame i
281
+ crops = []
282
+ for j in range(i-2, i+3):
283
+ idx = max(0, min(j, len(frames)-1))
284
+ rx, ry, rs = (regions[idx][0] if regions[idx] else (x, y, size))
285
+ crop = frames[idx][ry-rs:ry+rs, rx-rs:rx+rs]
286
+ if crop.size == 0:
287
+ crop = np.zeros((size*2, size*2, 3), dtype=np.uint8)
288
+ crops.append(crop)
289
+ fake = run_clean_video(crops, prev_output)
290
+ prev_output = fake
291
+ result = blend(result, fake, x, y, size)
292
+ out.write(result)
293
+ else:
294
+ # Frame-by-frame for face or add
295
+ while True:
296
+ ret, frame = cap.read()
297
+ if not ret:
298
+ break
299
+ out.write(process_image(frame, action, mode))
300
+
301
+ cap.release()
302
+ out.release()
303
+ return out_path
304
+
305
+ # ============ Gradio ============
306
+
307
+ def is_video(file_path):
308
+ if not file_path:
309
+ return False
310
+ ext = os.path.splitext(str(file_path))[1].lower()
311
+ return ext in VIDEO_EXTS
312
+
313
+ def to_bgr(pil_img):
314
+ """Convert PIL image to BGR, handling grayscale"""
315
+ img = np.array(pil_img)
316
+ if img.ndim == 2: # Grayscale
317
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
318
+ elif img.shape[2] == 4: # RGBA
319
+ img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR)
320
+ else: # RGB
321
+ img = img[:, :, ::-1]
322
+ return img
323
+
324
+ def add_mosaic_img(file, target):
325
+ if file is None:
326
+ return None
327
+ img = to_bgr(file)
328
+ return process_image(img, "add", "face")[:, :, ::-1]
329
+
330
+ def remove_mosaic_img(file, target):
331
+ if file is None:
332
+ return None
333
+ mode = "body" if "Body" in target or "General" in target else "face"
334
+ img = to_bgr(file)
335
+ return process_image(img, "remove", mode)[:, :, ::-1]
336
+
337
+ def add_mosaic_vid(file, target):
338
+ if file is None:
339
+ return None
340
+ return process_video(file, "add", "face")
341
+
342
+ def remove_mosaic_vid(file, target):
343
+ if file is None:
344
+ return None
345
+ mode = "body" if "Body" in target or "General" in target else "face"
346
+ return process_video(file, "remove", mode)
347
+
348
+ if __name__ == "__main__":
349
+ import sys
350
+
351
+ if len(sys.argv) >= 4:
352
+ from PIL import Image
353
+ import shutil
354
+ action, inp, out = sys.argv[1], sys.argv[2], sys.argv[3]
355
+ mode = sys.argv[4] if len(sys.argv) > 4 else "face"
356
+ ext = os.path.splitext(inp)[1].lower()
357
+ if ext in VIDEO_EXTS:
358
+ result_path = process_video(inp, action, mode)
359
+ if result_path:
360
+ shutil.move(result_path, out)
361
+ print(f"Saved: {out}")
362
+ else:
363
+ img = Image.open(inp)
364
+ img_bgr = np.array(img)[:, :, :3][:, :, ::-1]
365
+ result = process_image(img_bgr, action, mode)
366
+ Image.fromarray(result[:, :, ::-1]).save(out)
367
+ print(f"Saved: {out}")
368
+
369
+ elif len(sys.argv) == 1:
370
+ import gradio as gr
371
+ from PIL import Image as PILImage
372
+
373
+ def remove_mosaic_for_example(input_img, target):
374
+ """Process for examples - returns output image"""
375
+ if input_img is None:
376
+ return None
377
+ mode = "body" if "Body" in target or "General" in target else "face"
378
+ img = to_bgr(input_img)
379
+ result = process_image(img, "remove", mode)
380
+ return PILImage.fromarray(result[:, :, ::-1])
381
+
382
+ def add_mosaic_for_example(input_img, target):
383
+ """Process for examples - returns output image"""
384
+ if input_img is None:
385
+ return None
386
+ img = to_bgr(input_img)
387
+ result = process_image(img, "add", "face")
388
+ return PILImage.fromarray(result[:, :, ::-1])
389
+
390
+ css = ".compact { max-width: 900px; margin: auto; }"
391
+
392
+ def process_any(file, target, action):
393
+ """Process image or video - auto-detect by extension"""
394
+ if file is None:
395
+ return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
396
+
397
+ path = file if isinstance(file, str) else file
398
+ ext = os.path.splitext(path)[1].lower()
399
+ mode = "body" if "Body" in target or "General" in target else "face"
400
+
401
+ if ext in VIDEO_EXTS:
402
+ # Video/GIF - show video output, hide image
403
+ result = process_video(path, action, mode)
404
+ return gr.update(visible=False, value=None), gr.update(visible=True, value=result)
405
+ else:
406
+ # Image - show image output, hide video
407
+ img = to_bgr(PILImage.open(path))
408
+ result = process_image(img, action, mode)
409
+ return gr.update(visible=True, value=PILImage.fromarray(result[:, :, ::-1])), gr.update(visible=False, value=None)
410
+
411
+ def update_preview(file):
412
+ """Update preview based on file type"""
413
+ if file is None:
414
+ return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
415
+ path = file if isinstance(file, str) else file
416
+ ext = os.path.splitext(path)[1].lower()
417
+ if ext in VIDEO_EXTS:
418
+ return gr.update(visible=False, value=None), gr.update(visible=True, value=path)
419
+ else:
420
+ return gr.update(visible=True, value=path), gr.update(visible=False, value=None)
421
+
422
+ with gr.Blocks(title="DeepMosaics") as demo:
423
+ with gr.Column(elem_classes="compact"):
424
+ gr.Markdown("## DeepMosaics")
425
+
426
+ target = gr.Radio(["Face", "Body/General"], value="Face", label="Target", scale=0)
427
+
428
+ with gr.Row():
429
+ # Input with preview
430
+ with gr.Column():
431
+ input_file = gr.File(
432
+ label="Input (Image or Video)",
433
+ file_types=[".jpg", ".jpeg", ".png", ".webp", ".bmp", ".gif", ".mp4", ".avi", ".mov", ".mkv", ".webm"]
434
+ )
435
+ preview_img = gr.Image(label="Preview", height=250, visible=True, interactive=False)
436
+ preview_vid = gr.Video(label="Preview", height=250, visible=False, interactive=False)
437
+
438
+ # Output
439
+ with gr.Column():
440
+ output_img = gr.Image(label="Output", height=300, visible=True)
441
+ output_vid = gr.Video(label="Output", height=300, visible=False)
442
+
443
+ with gr.Row():
444
+ btn_add = gr.Button("Add Mosaic")
445
+ btn_remove = gr.Button("Remove Mosaic", variant="primary")
446
+
447
+ # Examples with cached outputs
448
+ def example_remove(filepath, target):
449
+ mode = "body" if "Body" in target or "General" in target else "face"
450
+ img = to_bgr(PILImage.open(filepath))
451
+ result = process_image(img, "remove", mode)
452
+ return PILImage.fromarray(result[:, :, ::-1])
453
+
454
+ gr.Examples(
455
+ examples=[
456
+ ["examples/mosaic.jpg", "Face"],
457
+ ["examples/face_clean.jpg", "Face"],
458
+ ["examples/youknow_mosaic.png", "Body/General"],
459
+ ],
460
+ inputs=[input_file, target],
461
+ outputs=output_img,
462
+ fn=example_remove,
463
+ cache_examples=True,
464
+ cache_mode="lazy",
465
+ )
466
+
467
+ # Update preview when file uploaded
468
+ input_file.change(fn=update_preview, inputs=[input_file], outputs=[preview_img, preview_vid])
469
+
470
+ btn_add.click(
471
+ fn=lambda f, t: process_any(f, t, "add"),
472
+ inputs=[input_file, target],
473
+ outputs=[output_img, output_vid]
474
+ )
475
+ btn_remove.click(
476
+ fn=lambda f, t: process_any(f, t, "remove"),
477
+ inputs=[input_file, target],
478
+ outputs=[output_img, output_vid]
479
+ )
480
+
481
+ demo.launch(css=css)
482
+ else:
483
+ print("Usage:")
484
+ print(" python app.py # Gradio UI")
485
+ print(" python app.py add input.jpg out.jpg # Add mosaic")
486
+ print(" python app.py remove input.jpg out.jpg body # Remove body mosaic")
examples/face_clean.jpg ADDED
examples/mosaic.jpg ADDED
examples/youknow_mosaic.png ADDED
onnx_models/add_face.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a526c88d66ced19cca50aa58e110c6044c134baa909d9a26b68cd38df46b87e0
3
+ size 47541675
onnx_models/add_youknow.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:471979245fdc8c6a63249c8d82d7ca4635f8a46c3db2b9eded5d07fda77e850b
3
+ size 47541556
onnx_models/clean_face_HD.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96a988f61d308176b426879442c28274ebf64b8f0b2c927512fa6940bdae0b35
3
+ size 201649548
onnx_models/clean_youknow_img.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c982e49fa859a7f60b8734ad4b363ae7bb21e7598fe1e117380d17913dccbf4
3
+ size 45589827
onnx_models/clean_youknow_video.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c1c876717210400323d4b0193b82fd8a4219c70072fc02d2899a1b65ea1efa8
3
+ size 213449701
onnx_models/mosaic_position.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:359b3675294bf715212cd9077303c6a38974ff9f13ddd491c978ef5ac4461423
3
+ size 47541675
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==6.1.0
2
+ onnxruntime
3
+ opencv-python-headless
4
+ numpy