sk75 commited on
Commit
5ce2981
·
1 Parent(s): 879a63a

All files has been added...

Browse files
Files changed (4) hide show
  1. app.py +98 -0
  2. inswapper_128.onnx +3 -0
  3. requirements.txt +7 -0
  4. resnet50.pth +3 -0
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ from PIL import Image
5
+ import insightface
6
+ from insightface.app import FaceAnalysis
7
+ import torch
8
+ from torch import nn
9
+ import torchvision
10
+
11
+ # Gender model
12
+ gender_model = torchvision.models.resnet50(weights=None)
13
+ num_features = gender_model.fc.in_features
14
+ gender_model.fc = nn.Linear(num_features, 2)
15
+
16
+ def load_face_analysis_model():
17
+ app = FaceAnalysis(name='buffalo_l')
18
+ app.prepare(ctx_id=0, det_size=(640, 640))
19
+ return app
20
+
21
+ def detect_faces(img):
22
+ app = load_face_analysis_model()
23
+ faces = app.get(img)
24
+ return faces
25
+
26
+ def detect_gender(img_list,
27
+ model, model_file='resnet50.pth', device='cpu'):
28
+ model.load_state_dict(torch.load(model_file, map_location=device))
29
+ model.to(device)
30
+ model.eval()
31
+ results = []
32
+ transform = torchvision.transforms.Compose([
33
+ torchvision.transforms.Resize((224, 224)),
34
+ torchvision.transforms.ToTensor(),
35
+ torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
36
+ ])
37
+ class_names = ['Female', 'Male']
38
+
39
+ for img in img_list:
40
+ image = Image.fromarray(img).convert('RGB')
41
+ image = transform(image).unsqueeze(0).to(device)
42
+ with torch.no_grad():
43
+ outputs = model(image)
44
+ _, predicted = torch.max(outputs, 1)
45
+ res = class_names[predicted.item()]
46
+ results.append(res)
47
+ return results
48
+
49
+ def swap_faces(target_image, source_image):
50
+ if target_image is None or source_image is None:
51
+ return None
52
+
53
+ # Convert to OpenCV BGR
54
+ target_img = cv2.cvtColor(np.array(target_image), cv2.COLOR_RGB2BGR)
55
+ source_img = cv2.cvtColor(np.array(source_image), cv2.COLOR_RGB2BGR)
56
+ res = target_img.copy()
57
+
58
+ # Detect faces
59
+ faces_target = detect_faces(target_img)
60
+ faces_source = detect_faces(source_img)
61
+
62
+ if len(faces_target) == 0 or len(faces_source) == 0:
63
+ return target_image # yüz bulunmazsa orijinali döndür
64
+
65
+ # Load swapper
66
+ swapper = insightface.model_zoo.get_model('inswapper_128.onnx',
67
+ download=False,
68
+ download_zip=False)
69
+
70
+ # İlk source yüzünü alıyoruz
71
+ source_face = faces_source[0]
72
+
73
+ for face in faces_target:
74
+ res = swapper.get(res, face, source_face, paste_back=True)
75
+
76
+ res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
77
+ return res
78
+
79
+ # Gradio arayüzü
80
+ with gr.Blocks() as demo:
81
+ gr.Markdown("### Face Swapper Uygulaması")
82
+
83
+ with gr.Row():
84
+ with gr.Column():
85
+ target_input = gr.Image(label="Hedef Resim", type="pil")
86
+ source_input = gr.Image(label="Kaynak Resim", type="pil")
87
+ process_button = gr.Button("Face Swap Yap", variant="primary")
88
+
89
+ with gr.Column():
90
+ output_image = gr.Image(label="İşlem Sonucu")
91
+
92
+ process_button.click(
93
+ fn=swap_faces,
94
+ inputs=[target_input, source_input],
95
+ outputs=[output_image]
96
+ )
97
+
98
+ demo.launch()
inswapper_128.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4a3f08c753cb72d04e10aa0f7dbe3deebbf39567d4ead6dce08e98aa49e16af
3
+ size 554253681
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ onnxruntime
2
+ insightface
3
+ numpy
4
+ opencv-python
5
+ torch
6
+ torchvision
7
+ gradio
resnet50.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:372dcdf7c4650ce475025018dc663eae35da8dbcfa3a1b0b4ae85a49785dcc3d
3
+ size 94366454