InsightWhispersAI commited on
Commit
0e89395
·
verified ·
1 Parent(s): 41fb8bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -601
app.py CHANGED
@@ -1,609 +1,77 @@
1
- import gradio as gr
2
  import insightface
3
- from insightface.app import FaceAnalysis
4
- import cv2
5
  import numpy as np
6
- import logging
7
- from PIL import Image
8
-
9
- assert insightface.__version__ >= '0.7'
10
-
11
- # Set up logging
12
- logging.basicConfig(level=logging.INFO)
13
- logger = logging.getLogger(__name__)
14
-
15
- def prepare_app():
16
- app = FaceAnalysis(name='buffalo_l')
17
- app.prepare(ctx_id=0, det_size=(384, 384))
18
- swapper = insightface.model_zoo.get_model('inswapper_128.onnx', download=True, download_zip=True)
19
- return app, swapper
20
-
21
- def sort_faces(faces):
22
- return sorted(faces, key=lambda x: x.bbox[0])
23
-
24
- def get_face(faces, face_id):
25
- if len(faces) < face_id or face_id < 1:
26
- raise gr.Error(f"Only {len(faces)} faces found. You asked for face {face_id}.")
27
- return faces[face_id - 1]
28
-
29
- def align_face(image, face, target_shape):
30
- src_pts = np.array(face.kps, dtype=np.float32)[:5]
31
- h, w = target_shape[:2]
32
- dst_pts = np.array([
33
- [w * 0.3, h * 0.3], [w * 0.7, h * 0.3], [w * 0.5, h * 0.5],
34
- [w * 0.35, h * 0.7], [w * 0.65, h * 0.7]
35
- ], dtype=np.float32)
36
- M, _ = cv2.estimateAffinePartial2D(src_pts, dst_pts)
37
- if M is None:
38
- raise gr.Error("Face alignment failed.")
39
- return cv2.warpAffine(image, M, (w, h))
40
-
41
- def create_feathered_mask(shape, hull, feather=3):
42
- mask = np.zeros(shape, dtype=np.uint8)
43
- cv2.fillConvexPoly(mask, hull, (255, 255, 255))
44
- return cv2.GaussianBlur(mask, (feather * 2 + 1, feather * 2 + 1), 0)
45
-
46
- def skin_tone_match(source, target, face_region):
47
- source_lab = cv2.cvtColor(source, cv2.COLOR_BGR2LAB)
48
- target_lab = cv2.cvtColor(target, cv2.COLOR_BGR2LAB)
49
- mask = face_region.astype(np.uint8)
50
- mean_source = cv2.mean(source_lab, mask=mask)[:3]
51
- mean_target = cv2.mean(target_lab, mask=mask)[:3]
52
- diff = np.array(mean_target) - np.array(mean_source)
53
- source_lab = source_lab.astype(np.float32)
54
- for i in range(3):
55
- source_lab[:, :, i] = np.clip(source_lab[:, :, i] + diff[i], 0, 255)
56
- return cv2.cvtColor(source_lab.astype(np.uint8), cv2.COLOR_LAB2BGR)
57
-
58
- def enhance_face(image):
59
- lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
60
- l, a, b = cv2.split(lab)
61
- l = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)).apply(l)
62
- enhanced = cv2.merge((l, a, b))
63
- sharpen = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
64
- return cv2.filter2D(cv2.cvtColor(enhanced, cv2.COLOR_LAB2BGR), -1, sharpen)
65
-
66
- app, swapper = prepare_app()
67
-
68
- def swap_faces(sourceImage, sourceFaceIndex, destinationImage, destinationFaceIndex):
69
- if sourceImage is None or destinationImage is None:
70
- raise gr.Error("Both source and destination images are required.")
71
-
72
- source = cv2.cvtColor(np.array(sourceImage), cv2.COLOR_RGB2BGR)
73
- destination = cv2.cvtColor(np.array(destinationImage), cv2.COLOR_RGB2BGR)
74
-
75
- for image in (source, destination):
76
- h, w = image.shape[:2]
77
- if h < 128 or w < 128:
78
- raise gr.Error("Images must be at least 128px in size.")
79
- if max(h, w) > 512:
80
- scale = 512 / max(h, w)
81
- image = cv2.resize(image, (int(w * scale), int(h * scale)))
82
-
83
- source_faces = sort_faces(app.get(source))
84
- dest_faces = sort_faces(app.get(destination))
85
-
86
- source_face = get_face(source_faces, sourceFaceIndex)
87
- dest_face = get_face(dest_faces, destinationFaceIndex)
88
 
89
- source_aligned = align_face(source, source_face, destination.shape)
 
90
 
91
- swapped = swapper.get(destination.copy(), dest_face, source_face, paste_back=False)
92
- if swapped is None or np.any(np.isnan(swapped)):
93
- swapped = swapper.get(destination.copy(), dest_face, source_face, paste_back=True)
94
- if swapped is None:
95
- raise gr.Error("Face swap failed.")
96
 
97
- pts = np.array(dest_face.kps).astype(np.int32)
98
- hull = cv2.convexHull(pts)
99
- mask = create_feathered_mask(swapped.shape, hull)
 
 
100
 
101
- face_region = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) > 0
102
- swapped = skin_tone_match(swapped, destination, face_region)
103
- swapped = enhance_face(swapped)
 
104
 
105
- center = tuple(np.mean(hull[:, 0, :], axis=0).astype(np.int32))
106
  try:
107
- blended = cv2.seamlessClone(swapped, destination, mask, center, cv2.NORMAL_CLONE)
108
- except:
109
- blended = swapper.get(destination.copy(), dest_face, source_face, paste_back=True)
110
- if blended is None:
111
- raise gr.Error("Blending failed.")
112
- return cv2.cvtColor(blended, cv2.COLOR_BGR2RGB)
113
-
114
- # Updated CSS with further shrunk heading and darker tagline
115
- custom_css = """
116
- /* Import futuristic fonts */
117
- @import url('https://fonts.googleapis.com/css2?family=Fira+Code:wght@400;500;600&family=Neuropol:wght@400;700&display=swap');
118
-
119
- /* Global styles */
120
- body, .gradio-container {
121
- background: linear-gradient(135deg, #0A0A1F 0%, #1C2526 100%); /* Dark AI gradient */
122
- color: #E0E7FF !important;
123
- font-family: 'Fira Code', monospace !important;
124
- overflow-x: hidden;
125
- position: relative;
126
- margin: 0;
127
- }
128
-
129
- /* AI-themed circuit background with animated nodes */
130
- body::before {
131
- content: '';
132
- position: fixed;
133
- top: 0;
134
- left: 0;
135
- width: 100%;
136
- height: 100%;
137
- background: repeating-linear-gradient(
138
- 45deg,
139
- rgba(0, 209, 255, 0.05),
140
- rgba(0, 209, 255, 0.05) 2px,
141
- transparent 2px,
142
- transparent 50px
143
- );
144
- opacity: 0.3;
145
- pointer-events: none;
146
- animation: circuitFlow 20s linear infinite;
147
- }
148
-
149
- body::after {
150
- content: '';
151
- position: fixed;
152
- top: 0;
153
- left: 0;
154
- width: 100%;
155
- height: 100%;
156
- background: radial-gradient(
157
- circle at 20% 30%,
158
- rgba(0, 209, 255, 0.2) 0%,
159
- transparent 10%
160
- ),
161
- radial-gradient(
162
- circle at 80% 70%,
163
- rgba(161, 0, 255, 0.2) 0%,
164
- transparent 10%
165
- );
166
- opacity: 0.5;
167
- animation: nodePulse 5s ease-in-out infinite;
168
- }
169
-
170
- @keyframes circuitFlow {
171
- 0% { background-position: 0 0; }
172
- 100% { background-position: 50px 50px; }
173
- }
174
-
175
- @keyframes nodePulse {
176
- 0% { opacity: 0.5; transform: scale(1); }
177
- 50% { opacity: 0.8; transform: scale(1.1); }
178
- 100% { opacity: 0.5; transform: scale(1); }
179
- }
180
-
181
- /* Header */
182
- .header {
183
- text-align: center;
184
- margin: 60px 0 40px;
185
- position: relative;
186
- z-index: 10;
187
- }
188
-
189
- h1 {
190
- font-size: 2.5rem; /* Further reduced from 3rem */
191
- font-family: 'Neuropol', sans-serif;
192
- background: linear-gradient(90deg, #00D1FF, #A100FF); /* AI-themed gradient */
193
- -webkit-background-clip: text;
194
- -webkit-text-fill-color: transparent;
195
- margin: 10px 0 5px; /* Reduced margin to minimize vertical space */
196
- letter-spacing: 4px;
197
- text-transform: uppercase;
198
- opacity: 1 !important;
199
- animation: enhancedGlowPulse 3s ease-in-out infinite;
200
- }
201
-
202
- @keyframes enhancedGlowPulse {
203
- 0% {
204
- text-shadow: 0 0 8px rgba(0, 209, 255, 0.5), 0 0 8px rgba(161, 0, 255, 0.5);
205
- transform: scale(1);
206
- }
207
- 50% {
208
- text-shadow: 0 0 20px rgba(0, 209, 255, 0.9), 0 0 20px rgba(161, 0, 255, 0.9), 0 0 30px rgba(0, 209, 255, 0.7);
209
- transform: scale(1.02); /* Further reduced from 1.03 */
210
- }
211
- 100% {
212
- text-shadow: 0 0 8px rgba(0, 209, 255, 0.5), 0 0 8px rgba(161, 0, 255, 0.5);
213
- transform: scale(1);
214
- }
215
- }
216
-
217
- /* Tagline - Darkened for better contrast */
218
- .tagline {
219
- font-size: 1.2rem;
220
- color: #B0B8D1 !important; /* Light grayish-blue, darker than #ffffff */
221
- text-shadow: 0 0 4px rgba(176, 184, 209, 0.3); /* Adjusted shadow to match new color */
222
- font-family: 'Fira Code', monospace;
223
- font-weight: 400;
224
- opacity: 1 !important;
225
- margin-top: 20px; /* Spacing from header */
226
- }
227
-
228
- /* Main container - Circuit texture and glowing edges */
229
- .container {
230
- background: rgba(10, 10, 31, 0.9);
231
- border: 3px solid transparent;
232
- border-image: linear-gradient(45deg, #00D1FF, #A100FF) 1;
233
- border-radius: 20px;
234
- padding: 40px;
235
- margin: 30px auto;
236
- max-width: 1100px;
237
- box-shadow: 0 0 30px rgba(0, 209, 255, 0.3), 0 0 30px rgba(161, 0, 255, 0.3);
238
- position: relative;
239
- transform: translateY(0);
240
- transition: transform 0.4s ease, box-shadow 0.4s ease;
241
- background-image: linear-gradient(
242
- rgba(0, 209, 255, 0.05),
243
- rgba(0, 209, 255, 0.05)
244
- ),
245
- repeating-linear-gradient(
246
- 45deg,
247
- rgba(161, 0, 255, 0.1),
248
- rgba(161, 0, 255, 0.1) 2px,
249
- transparent 2px,
250
- transparent 20px
251
- );
252
- }
253
-
254
- .container:hover {
255
- transform: translateY(-5px);
256
- box-shadow: 0 0 40px rgba(0, 209, 255, 0.5), 0 0 40px rgba(161, 0, 255, 0.5);
257
- }
258
-
259
- /* Responsive adjustments */
260
- @media (max-width: 768px) {
261
- .container {
262
- margin: 15px;
263
- padding: 20px;
264
- }
265
- h1 {
266
- font-size: 1.5rem; /* Further reduced from 2rem */
267
- }
268
- .tagline {
269
- font-size: 1rem;
270
- margin-top: 15px;
271
- }
272
- }
273
-
274
- /* Image inputs - Holographic effect */
275
- .gr-image img {
276
- border: 3px solid transparent;
277
- border-image: linear-gradient(45deg, #00D1FF, #A100FF) 1;
278
- border-radius: 15px;
279
- box-shadow: 0 0 15px rgba(0, 209, 255, 0.3);
280
- transition: all 0.4s ease;
281
- max-width: 100%;
282
- height: auto;
283
- }
284
-
285
- .gr-image img:hover {
286
- box-shadow: 0 0 25px rgba(0, 209, 255, 0.5);
287
- transform: scale(1.05);
288
- }
289
-
290
- /* Output image block - Distinct styling */
291
- .output-image-block {
292
- background: rgba(20, 20, 40, 0.95);
293
- border: 3px solid transparent;
294
- border-image: linear-gradient(45deg, #00D1FF, #A100FF) 1;
295
- border-radius: 15px;
296
- padding: 20px;
297
- margin: 20px 0;
298
- box-shadow: 0 0 20px rgba(0, 209, 255, 0.3), 0 0 20px rgba(161, 0, 255, 0.3);
299
- }
300
-
301
- /* Labels - Fixed for clarity */
302
- label, .gr-number label, .gr-image label, .gr-textbox label {
303
- color: #00D1FF !important; /* Neon blue for AI theme */
304
- background: transparent !important;
305
- box-shadow: none !important;
306
- opacity: 1 !important;
307
- text-shadow: 0 0 3px rgba(0, 0, 0, 0.5); /* Dark shadow for contrast on white */
308
- font-family: 'Neuropol', sans-serif;
309
- font-weight: 400;
310
- font-size: 1.3rem;
311
- margin-bottom: 12px;
312
- }
313
-
314
- /* Fix placeholder text ("Drop IMAGE Here") */
315
- .gr-image .prose,
316
- .gr-image input::placeholder,
317
- .gr-image input::-webkit-input-placeholder,
318
- .gr-image input::-moz-placeholder,
319
- .gr-image input:-ms-input-placeholder,
320
- .gr-image input:-moz-placeholder {
321
- color: #00D1FF !important; /* Neon blue for visibility */
322
- opacity: 1 !important;
323
- text-shadow: 0 0 3px rgba(0, 0, 0, 0.5); /* Dark shadow for contrast */
324
- font-family: 'Fira Code', monospace;
325
- font-size: 1rem;
326
- }
327
-
328
- /* Number inputs */
329
- .gr-number {
330
- background: transparent !important;
331
- border: 2px solid transparent;
332
- border-image: linear-gradient(45deg, #00D1FF, #A100FF) 1;
333
- border-radius: 10px;
334
- padding: 15px;
335
- box-shadow: none !important;
336
- opacity: 1 !important;
337
- transition: border-image 0.4s ease;
338
- }
339
-
340
- .gr-number:hover {
341
- border-image: linear-gradient(45deg, #A100FF, #00D1FF) 1;
342
- }
343
-
344
- .gr-number input {
345
- background: #1A1A3B;
346
- color: #E0E7FF !important;
347
- border: 1px solid #00D1FF;
348
- border-radius: 8px;
349
- padding: 10px;
350
- font-family: 'Fira Code', monospace;
351
- font-size: 1rem;
352
- text-shadow: none;
353
- box-shadow: none;
354
- opacity: 1 !important;
355
- transition: border-color 0.4s ease;
356
- }
357
-
358
- .gr-number input:focus {
359
- border-color: #A100FF;
360
- outline: none;
361
- }
362
-
363
- .gr-number .prose {
364
- color: #ffffff !important; /* White for clarity */
365
- background: transparent !important;
366
- box-shadow: none !important;
367
- opacity: 1 !important;
368
- text-shadow: 0 0 2px rgba(255, 255, 255, 0.2);
369
- font-size: 0.9rem;
370
- font-family: 'Fira Code', monospace;
371
- }
372
-
373
- /* Button - AI-themed with data stream effect */
374
- .neon-button {
375
- background: linear-gradient(45deg, #00D1FF, #A100FF);
376
- color: #ffffff !important; /* White text for clarity */
377
- border: none;
378
- border-radius: 12px;
379
- padding: 15px 35px;
380
- font-size: 1.2rem;
381
- font-family: 'Neuropol', sans-serif;
382
- font-weight: 400;
383
- text-transform: uppercase;
384
- box-shadow: 0 0 20px rgba(0, 209, 255, 0.5), 0 0 20px rgba(161, 0, 255, 0.5);
385
- transition: all 0.4s ease;
386
- letter-spacing: 3px;
387
- margin: 25px 0;
388
- position: relative;
389
- overflow: hidden;
390
- opacity: 1 !important;
391
- text-shadow: 0 0 3px rgba(255, 255, 255, 0.3);
392
- animation: buttonGlow 2s ease-in-out infinite;
393
- }
394
-
395
- .neon-button::before {
396
- content: '';
397
- position: absolute;
398
- top: 0;
399
- left: -100%;
400
- width: 50%;
401
- height: 100%;
402
- background: linear-gradient(
403
- 90deg,
404
- transparent,
405
- rgba(255, 255, 255, 0.4),
406
- transparent
407
- );
408
- animation: dataStream 3s linear infinite;
409
- }
410
-
411
- .neon-button::after {
412
- content: '';
413
- position: absolute;
414
- top: 0;
415
- left: -100%;
416
- width: 100%;
417
- height: 100%;
418
- background: rgba(255, 255, 255, 0.3);
419
- transition: left 0.4s ease;
420
- }
421
-
422
- .neon-button:hover::after {
423
- left: 100%;
424
- }
425
-
426
- .neon-button:hover {
427
- box-shadow: 0 0 30px rgba(0, 209, 255, 0.7), 0 0 30px rgba(161, 0, 255, 0.7);
428
- transform: translateY(-3px);
429
- }
430
-
431
- @keyframes buttonGlow {
432
- 0% { box-shadow: 0 0 20px rgba(0, 209, 255, 0.5), 0 0 20px rgba(161, 0, 255, 0.5); }
433
- 50% { box-shadow: 0 0 30px rgba(0, 209, 255, 0.7), 0 0 30px rgba(161, 0, 255, 0.7); }
434
- 100% { box-shadow: 0 0 20px rgba(0, 209, 255, 0.5), 0 0 20px rgba(161, 0, 255, 0.5); }
435
- }
436
-
437
- @keyframes dataStream {
438
- 0% { left: -100%; }
439
- 100% { left: 100%; }
440
- }
441
-
442
- /* Spinner */
443
- .spinner {
444
- color: #ffffff !important; /* White for clarity */
445
- text-shadow: 0 0 3px rgba(255, 255, 255, 0.3);
446
- font-family: 'Fira Code', monospace;
447
- font-size: 1rem;
448
- margin: 20px 0;
449
- opacity: 1 !important;
450
- }
451
-
452
- .spinner::before {
453
- content: '🤖 ';
454
- vertical-align: middle;
455
- }
456
-
457
- /* Error messages */
458
- .gr-textbox {
459
- background: #1A1A3B;
460
- color: #FF6B6B !important; /* Bright red for visibility */
461
- border: 2px solid #FF6B6B;
462
- border-radius: 10px;
463
- box-shadow: 0 0 10px rgba(255, 107, 107, 0.4);
464
- font-family: 'Fira Code', monospace;
465
- padding: 12px;
466
- font-size: 0.95rem;
467
- opacity: 1 !important;
468
- text-shadow: none;
469
- }
470
-
471
- /* Placeholder text in error textbox */
472
- .gr-textbox::placeholder {
473
- color: #E0E7FF !important;
474
- opacity: 1 !important;
475
- text-shadow: none;
476
- }
477
-
478
- /* Footer - Refactored for clarity with AI accents */
479
- .footer {
480
- color: #FFFFFF !important;
481
- font-size: 1.1rem !important;
482
- font-family: 'Fira Code', monospace !important;
483
- background: rgba(20, 20, 40, 0.95);
484
- border: 2px solid transparent;
485
- border-image: linear-gradient(45deg, #00D1FF, #A100FF) 1;
486
- border-radius: 20px;
487
- padding: 25px;
488
- margin: 50px 0 30px;
489
- text-align: center;
490
- line-height: 1.75;
491
- text-shadow: 0 0 6px rgba(255, 255, 255, 0.4);
492
- box-shadow: 0 0 25px rgba(0, 209, 255, 0.4), 0 0 25px rgba(161, 0, 255, 0.4);
493
- opacity: 1 !important;
494
- }
495
-
496
- .footer a {
497
- color: #A100FF !important;
498
- font-weight: bold;
499
- text-decoration: underline;
500
- text-shadow: 0 0 4px rgba(161, 0, 255, 0.6);
501
- opacity: 1 !important;
502
- }
503
-
504
- .footer a:hover {
505
- color: #00D1FF !important;
506
- text-shadow: 0 0 6px rgba(0, 209, 255, 0.7);
507
- }
508
-
509
- /* Light mode adjustments for Hugging Face Spaces */
510
- @media (prefers-color-scheme: light) {
511
- body, .gradio-container {
512
- background: linear-gradient(135deg, #E0E7FF 0%, #C3D7E8 100%); /* Light gradient */
513
- }
514
- body::before {
515
- background: repeating-linear-gradient(
516
- 45deg,
517
- rgba(0, 209, 255, 0.15),
518
- rgba(0, 209, 255, 0.15) 2px,
519
- transparent 2px,
520
- transparent 50px
521
- );
522
- }
523
- body::after {
524
- background: radial-gradient(
525
- circle at 20% 30%,
526
- rgba(0, 209, 255, 0.3) 0%,
527
- transparent 10%
528
- ),
529
- radial-gradient(
530
- circle at 80% 70%,
531
- rgba(161, 0, 255, 0.3) 0%,
532
- transparent 10%
533
- );
534
- }
535
- .container {
536
- background: rgba(255, 255, 255, 0.9);
537
- box-shadow: 0 0 30px rgba(0, 209, 255, 0.2), 0 0 30px rgba(161, 0, 255, 0.2);
538
- }
539
- .output-image-block {
540
- background: rgba(240, 240, 255, 0.95);
541
- box-shadow: 0 0 20px rgba(0, 209, 255, 0.2), 0 0 20px rgba(161, 0, 255, 0.2);
542
- }
543
- .footer {
544
- background: rgba(240, 240, 255, 0.95);
545
- box-shadow: 0 0 25px rgba(0, 209, 255, 0.3), 0 0 25px rgba(161, 0, 255, 0.3);
546
- }
547
- .gr-number input {
548
- background: #E0E7FF;
549
- color: #1A1A3B !important;
550
- }
551
- .gr-textbox {
552
- background: #E0E7FF;
553
- color: #FF6B6B !important;
554
- }
555
- /* Ensure labels remain legible in light mode */
556
- label, .gr-number label, .gr-image label, .gr-textbox label {
557
- color: #00D1FF !important;
558
- text-shadow: 0 0 3px rgba(0, 0, 0, 0.5); /* Dark shadow for contrast */
559
- }
560
- .gr-image .prose,
561
- .gr-image input::placeholder,
562
- .gr-image input::-webkit-input-placeholder,
563
- .gr-image input::-moz-placeholder,
564
- .gr-image input:-ms-input-placeholder,
565
- .gr-image input:-moz-placeholder {
566
- color: #00D1FF !important;
567
- text-shadow: 0 0 3px rgba(0, 0, 0, 0.5);
568
- }
569
- /* Darken tagline in light mode */
570
- .tagline {
571
- color: #6B7280 !important; /* Darker gray for light mode */
572
- text-shadow: 0 0 4px rgba(107, 114, 128, 0.3); /* Adjusted shadow */
573
- }
574
- }
575
- """
576
-
577
- with gr.Blocks(css=custom_css) as demo:
578
- with gr.Column(elem_classes="container"):
579
- gr.HTML("""
580
- <div class='header'>
581
- <h1>InsightWhispersAI Face Swap</h1>
582
- <p class='tagline'>Dive into the Future with AI-Driven Face Swaps</p>
583
- </div>
584
- """)
585
- with gr.Row():
586
- with gr.Column():
587
- source_input = gr.Image(label="Source Image (Face to Swap)", type="pil", height=300)
588
- source_face_idx = gr.Number(precision=0, value=1, label='Source Face Position', info='Select face from left, starting at 1')
589
- with gr.Column():
590
- target_input = gr.Image(label="Target Image (Body to Swap Onto)", type="pil", height=300)
591
- target_face_idx = gr.Number(precision=0, value=1, label='Target Face Position', info='Select face to replace from left, starting at 1')
592
- run_btn = gr.Button("🚀 Launch Face Swap", elem_classes="neon-button")
593
- spinner = gr.Markdown("<span class='spinner'>🌌 Processing... (10-30 seconds)</span>", visible=False)
594
- with gr.Row(elem_classes="output-image-block"):
595
- result_image = gr.Image(label="Swapped Result", height=400)
596
- error_output = gr.Textbox(label="Status / Error Messages", interactive=False, placeholder="Status will appear here...")
597
- gr.HTML("""
598
- <div class='footer'>
599
- <p>Powered by <a href='https://insightwhispersai.com'>InsightWhispersAI</a> | Built with xAI Technology 🤖</p>
600
- <p><strong>Pro Tip:</strong> Use high-resolution, well-lit images for optimal results. Processing takes 10-30 seconds.</p>
601
- </div>
602
- """)
603
- run_btn.click(
604
- fn=lambda *args: (gr.update(visible=True), *swap_faces(*args)),
605
- inputs=[source_input, source_face_idx, target_input, target_face_idx],
606
- outputs=[spinner, result_image, error_output, spinner]
607
- )
608
-
609
- demo.launch()
 
 
1
  import insightface
 
 
2
  import numpy as np
3
+ import cv2
4
+ import gradio as gr
5
+ from insightface.app import FaceAnalysis
6
+ import onnxruntime
7
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ app = FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider'])
10
+ app.prepare(ctx_id=0, det_size=(640, 640))
11
 
12
+ inswapper_path = "checkpoints/inswapper_128.onnx"
13
+ if not os.path.exists(inswapper_path):
14
+ raise FileNotFoundError(f"Model not found at {inswapper_path}")
15
+ swapper = insightface.model_zoo.get_model(inswapper_path, providers=['CPUExecutionProvider'])
 
16
 
17
+ def preprocess_image(img):
18
+ img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
19
+ img_eq = cv2.equalizeHist(img_gray)
20
+ img_rgb = cv2.cvtColor(img_eq, cv2.COLOR_GRAY2RGB)
21
+ return img_rgb
22
 
23
+ def sharpen_image(img):
24
+ kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
25
+ sharpened = cv2.filter2D(img, -1, kernel)
26
+ return np.clip(sharpened, 0, 255).astype(np.uint8)
27
 
28
+ def swap_faces(src_img, dst_img, blur_strength=3, sharpen=False):
29
  try:
30
+ src = preprocess_image(src_img)
31
+ dst = preprocess_image(dst_img)
32
+
33
+ src_faces = app.get(src)
34
+ dst_faces = app.get(dst)
35
+
36
+ if not src_faces or not dst_faces:
37
+ raise ValueError("No faces detected in one of the images.")
38
+
39
+ src_face = src_faces[0]
40
+ dst_face = dst_faces[0]
41
+
42
+ swapped_img = swapper.get(dst, dst_face, src_face, paste_back=True)
43
+
44
+ if blur_strength > 0:
45
+ swapped_img = cv2.GaussianBlur(swapped_img, (blur_strength, blur_strength), 0)
46
+
47
+ if sharpen:
48
+ swapped_img = sharpen_image(swapped_img)
49
+
50
+ result = cv2.cvtColor(swapped_img, cv2.COLOR_RGB2BGR)
51
+ return result
52
+ except Exception as e:
53
+ print(f"Error: {str(e)}")
54
+ return np.zeros((640, 640, 3), dtype=np.uint8)
55
+
56
+ title = "🧠 Futuristic Face Swapper with inswapper_128"
57
+ description = (
58
+ "Upload a source face and a target image. The AI swaps the face using inswapper_128.onnx "
59
+ "for clean, smooth results. Adjust blur strength or enable sharpening for enhanced output."
60
+ )
61
+
62
+ demo = gr.Interface(
63
+ fn=swap_faces,
64
+ inputs=[
65
+ gr.Image(label="Source Face", type="numpy"),
66
+ gr.Image(label="Target Image", type="numpy"),
67
+ gr.Slider(label="Blur Strength", minimum=0, maximum=5, step=1, value=3),
68
+ gr.Checkbox(label="Enable Sharpening", value=False)
69
+ ],
70
+ outputs=gr.Image(label="Face Swapped Output"),
71
+ title=title,
72
+ description=description,
73
+ flagging_mode="never"
74
+ )
75
+
76
+ if __name__ == "__main__":
77
+ demo.queue().launch()