Spaces:
Runtime error
Runtime error
File size: 5,610 Bytes
279af2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
# app.py
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.responses import Response
from fastapi.middleware.cors import CORSMiddleware
import cv2
import numpy as np
import face_recognition
import os
import shutil
import logging
app = FastAPI()
# Enable CORS to allow Framer frontend to connect
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Configure logging
logging.basicConfig(level=logging.INFO)
def get_face_data(image):
"""Detect face and landmarks in an image."""
face_locations = face_recognition.face_locations(image)
if len(face_locations) == 0:
raise ValueError("No face detected in the image.")
if len(face_locations) > 1:
raise ValueError("Multiple faces detected; only one face per image is supported.")
face_location = face_locations[0] # (top, right, bottom, left)
landmarks = face_recognition.face_landmarks(image, face_locations=[face_location])
if not landmarks:
raise ValueError("Could not detect face landmarks.")
return face_location, landmarks[0]
def get_face_size(face_location):
"""Calculate the width and height of the face bounding box."""
top, right, bottom, left = face_location
width = right - left
height = bottom - top
return width, height
def resize_face_image(source_img, target_face_size, source_face_location):
"""Resize the source image to match the target face size."""
source_width, source_height = get_face_size(source_face_location)
target_width, target_height = target_face_size
# Calculate scaling factor to match the target face size
scale_x = target_width / source_width
scale_y = target_height / source_height
scale = min(scale_x, scale_y) # Use the smaller scale to avoid distortion
# Resize the source image
new_width = int(source_img.shape[1] * scale)
new_height = int(source_img.shape[0] * scale)
resized_source = cv2.resize(source_img, (new_width, new_height), interpolation=cv2.INTER_AREA)
return resized_source, scale
def swap_faces(source_img, target_img):
"""Perform face swapping with size preservation and seamless blending."""
# Convert images to RGB (face_recognition expects RGB)
source_rgb = cv2.cvtColor(source_img, cv2.COLOR_BGR2RGB)
target_rgb = cv2.cvtColor(target_img, cv2.COLOR_BGR2RGB)
# Detect faces and landmarks
source_face_location, source_landmarks = get_face_data(source_rgb)
target_face_location, target_landmarks = get_face_data(target_rgb)
# Calculate face sizes
target_face_size = get_face_size(target_face_location)
# Resize source image to match target face size
resized_source, scale = resize_face_image(source_img, target_face_size, source_face_location)
# Adjust source face location after resizing
source_top, source_right, source_bottom, source_left = source_face_location
adjusted_source_location = (
int(source_top * scale),
int(source_right * scale),
int(source_bottom * scale),
int(source_left * scale)
)
# Extract the source face region
source_face = resized_source[
adjusted_source_location[0]:adjusted_source_location[2],
adjusted_source_location[3]:adjusted_source_location[1]
]
# Calculate the center of the target face
target_top, target_right, target_bottom, target_left = target_face_location
target_center_x = (target_left + target_right) // 2
target_center_y = (target_top + target_bottom) // 2
# Create a mask for the source face
mask = 255 * np.ones(source_face.shape, source_face.dtype)
# Perform seamless cloning
try:
result = cv2.seamlessClone(
source_face,
target_img,
mask,
(target_center_x, target_center_y),
cv2.NORMAL_CLONE
)
except Exception as e:
logging.error(f"Seamless cloning failed: {str(e)}")
raise ValueError("Failed to blend the faces seamlessly.")
return result
@app.post("/swap-face/")
async def swap_face(
source_file: UploadFile = File(...),
target_file: UploadFile = File(...),
doFaceEnhancer: bool = True
):
try:
# Save uploaded files temporarily
source_path = f"temp_source_{source_file.filename}"
target_path = f"temp_target_{target_file.filename}"
output_path = "output.jpg"
with open(source_path, "wb") as f:
shutil.copyfileobj(source_file.file, f)
with open(target_path, "wb") as f:
shutil.copyfileobj(target_file.file, f)
# Read images
source_img = cv2.imread(source_path)
target_img = cv2.imread(target_path)
if source_img is None or target_img is None:
raise ValueError("Failed to load one or both images.")
# Perform custom face swap
result_img = swap_faces(source_img, target_img)
# Save the result
cv2.imwrite(output_path, result_img)
# Read the output image
with open(output_path, "rb") as f:
image_data = f.read()
# Clean up temporary files
for path in [source_path, target_path, output_path]:
if os.path.exists(path):
os.remove(path)
# Return the swapped image
return Response(content=image_data, media_type="image/jpeg")
except Exception as e:
logging.error(f"Error in face swap: {str(e)}")
raise HTTPException(status_code=500, detail=f"Face swap failed: {str(e)}") |