Spaces:
Sleeping
Sleeping
File size: 3,089 Bytes
5ce2981 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
import numpy as np
import cv2
from PIL import Image
import insightface
from insightface.app import FaceAnalysis
import torch
from torch import nn
import torchvision
# Gender model
gender_model = torchvision.models.resnet50(weights=None)
num_features = gender_model.fc.in_features
gender_model.fc = nn.Linear(num_features, 2)
def load_face_analysis_model():
app = FaceAnalysis(name='buffalo_l')
app.prepare(ctx_id=0, det_size=(640, 640))
return app
def detect_faces(img):
app = load_face_analysis_model()
faces = app.get(img)
return faces
def detect_gender(img_list,
model, model_file='resnet50.pth', device='cpu'):
model.load_state_dict(torch.load(model_file, map_location=device))
model.to(device)
model.eval()
results = []
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
class_names = ['Female', 'Male']
for img in img_list:
image = Image.fromarray(img).convert('RGB')
image = transform(image).unsqueeze(0).to(device)
with torch.no_grad():
outputs = model(image)
_, predicted = torch.max(outputs, 1)
res = class_names[predicted.item()]
results.append(res)
return results
def swap_faces(target_image, source_image):
if target_image is None or source_image is None:
return None
# Convert to OpenCV BGR
target_img = cv2.cvtColor(np.array(target_image), cv2.COLOR_RGB2BGR)
source_img = cv2.cvtColor(np.array(source_image), cv2.COLOR_RGB2BGR)
res = target_img.copy()
# Detect faces
faces_target = detect_faces(target_img)
faces_source = detect_faces(source_img)
if len(faces_target) == 0 or len(faces_source) == 0:
return target_image # yüz bulunmazsa orijinali döndür
# Load swapper
swapper = insightface.model_zoo.get_model('inswapper_128.onnx',
download=False,
download_zip=False)
# İlk source yüzünü alıyoruz
source_face = faces_source[0]
for face in faces_target:
res = swapper.get(res, face, source_face, paste_back=True)
res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
return res
# Gradio arayüzü
with gr.Blocks() as demo:
gr.Markdown("### Face Swapper Uygulaması")
with gr.Row():
with gr.Column():
target_input = gr.Image(label="Hedef Resim", type="pil")
source_input = gr.Image(label="Kaynak Resim", type="pil")
process_button = gr.Button("Face Swap Yap", variant="primary")
with gr.Column():
output_image = gr.Image(label="İşlem Sonucu")
process_button.click(
fn=swap_faces,
inputs=[target_input, source_input],
outputs=[output_image]
)
demo.launch()
|