|
|
import sys |
|
|
import os |
|
|
import dlib |
|
|
import numpy as np |
|
|
from skimage import io |
|
|
import cv2 |
|
|
from imutils import face_utils |
|
|
import argparse |
|
|
import shutil |
|
|
import random |
|
|
|
|
|
import subprocess |
|
|
|
|
|
|
|
|
class NoFaceFound(Exception): |
|
|
"""Raised when there is no face found""" |
|
|
pass |
|
|
|
|
|
def calculate_margin_help(img1,img2): |
|
|
size1 = img1.shape |
|
|
size2 = img2.shape |
|
|
diff0 = abs(size1[0]-size2[0])//2 |
|
|
diff1 = abs(size1[1]-size2[1])//2 |
|
|
avg0 = (size1[0]+size2[0])//2 |
|
|
avg1 = (size1[1]+size2[1])//2 |
|
|
|
|
|
return [size1,size2,diff0,diff1,avg0,avg1] |
|
|
|
|
|
def crop_image(img1,img2): |
|
|
[size1,size2,diff0,diff1,avg0,avg1] = calculate_margin_help(img1,img2) |
|
|
|
|
|
if(size1[0] == size2[0] and size1[1] == size2[1]): |
|
|
return [img1,img2] |
|
|
|
|
|
elif(size1[0] <= size2[0] and size1[1] <= size2[1]): |
|
|
scale0 = size1[0]/size2[0] |
|
|
scale1 = size1[1]/size2[1] |
|
|
if(scale0 > scale1): |
|
|
res = cv2.resize(img2,None,fx=scale0,fy=scale0,interpolation=cv2.INTER_AREA) |
|
|
else: |
|
|
res = cv2.resize(img2,None,fx=scale1,fy=scale1,interpolation=cv2.INTER_AREA) |
|
|
return crop_image_help(img1,res) |
|
|
|
|
|
elif(size1[0] >= size2[0] and size1[1] >= size2[1]): |
|
|
scale0 = size2[0]/size1[0] |
|
|
scale1 = size2[1]/size1[1] |
|
|
if(scale0 > scale1): |
|
|
res = cv2.resize(img1,None,fx=scale0,fy=scale0,interpolation=cv2.INTER_AREA) |
|
|
else: |
|
|
res = cv2.resize(img1,None,fx=scale1,fy=scale1,interpolation=cv2.INTER_AREA) |
|
|
return crop_image_help(res,img2) |
|
|
|
|
|
elif(size1[0] >= size2[0] and size1[1] <= size2[1]): |
|
|
return [img1[diff0:avg0,:],img2[:,-diff1:avg1]] |
|
|
|
|
|
else: |
|
|
return [img1[:,diff1:avg1],img2[-diff0:avg0,:]] |
|
|
|
|
|
def crop_image_help(img1,img2): |
|
|
[size1,size2,diff0,diff1,avg0,avg1] = calculate_margin_help(img1,img2) |
|
|
|
|
|
if(size1[0] == size2[0] and size1[1] == size2[1]): |
|
|
return [img1,img2] |
|
|
|
|
|
elif(size1[0] <= size2[0] and size1[1] <= size2[1]): |
|
|
return [img1,img2[-diff0:avg0,-diff1:avg1]] |
|
|
|
|
|
elif(size1[0] >= size2[0] and size1[1] >= size2[1]): |
|
|
return [img1[diff0:avg0,diff1:avg1],img2] |
|
|
|
|
|
elif(size1[0] >= size2[0] and size1[1] <= size2[1]): |
|
|
return [img1[diff0:avg0,:],img2[:,-diff1:avg1]] |
|
|
|
|
|
else: |
|
|
return [img1[:,diff1:avg1],img2[diff0:avg0,:]] |
|
|
|
|
|
def generate_face_correspondences(theImage1, theImage2): |
|
|
|
|
|
detector = dlib.get_frontal_face_detector() |
|
|
predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat') |
|
|
corresp = np.zeros((68,2)) |
|
|
|
|
|
imgList = crop_image(theImage1,theImage2) |
|
|
list1 = [] |
|
|
list2 = [] |
|
|
j = 1 |
|
|
|
|
|
for img in imgList: |
|
|
|
|
|
size = (img.shape[0],img.shape[1]) |
|
|
if(j == 1): |
|
|
currList = list1 |
|
|
else: |
|
|
currList = list2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dets = detector(img, 1) |
|
|
|
|
|
try: |
|
|
if len(dets) == 0: |
|
|
raise NoFaceFound |
|
|
except NoFaceFound: |
|
|
print("Sorry, but I couldn't find a face in the image.") |
|
|
|
|
|
j=j+1 |
|
|
|
|
|
for k, rect in enumerate(dets): |
|
|
|
|
|
|
|
|
shape = predictor(img, rect) |
|
|
|
|
|
|
|
|
for i in range(0,68): |
|
|
x = shape.part(i).x |
|
|
y = shape.part(i).y |
|
|
currList.append((x, y)) |
|
|
corresp[i][0] += x |
|
|
corresp[i][1] += y |
|
|
|
|
|
|
|
|
|
|
|
currList.append((1,1)) |
|
|
currList.append((size[1]-1,1)) |
|
|
currList.append(((size[1]-1)//2,1)) |
|
|
currList.append((1,size[0]-1)) |
|
|
currList.append((1,(size[0]-1)//2)) |
|
|
currList.append(((size[1]-1)//2,size[0]-1)) |
|
|
currList.append((size[1]-1,size[0]-1)) |
|
|
currList.append(((size[1]-1),(size[0]-1)//2)) |
|
|
|
|
|
|
|
|
narray = corresp/2 |
|
|
narray = np.append(narray,[[1,1]],axis=0) |
|
|
narray = np.append(narray,[[size[1]-1,1]],axis=0) |
|
|
narray = np.append(narray,[[(size[1]-1)//2,1]],axis=0) |
|
|
narray = np.append(narray,[[1,size[0]-1]],axis=0) |
|
|
narray = np.append(narray,[[1,(size[0]-1)//2]],axis=0) |
|
|
narray = np.append(narray,[[(size[1]-1)//2,size[0]-1]],axis=0) |
|
|
narray = np.append(narray,[[size[1]-1,size[0]-1]],axis=0) |
|
|
narray = np.append(narray,[[(size[1]-1),(size[0]-1)//2]],axis=0) |
|
|
|
|
|
return [size,imgList[0],imgList[1],list1,list2,narray] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def rect_contains(rect, point): |
|
|
|
|
|
if point[0] < rect[0]: |
|
|
return False |
|
|
elif point[1] < rect[1]: |
|
|
return False |
|
|
elif point[0] > rect[2]: |
|
|
return False |
|
|
elif point[1] > rect[3]: |
|
|
return False |
|
|
return True |
|
|
|
|
|
|
|
|
def draw_delaunay(f_w, f_h, subdiv, dictionary1): |
|
|
|
|
|
list4 = [] |
|
|
|
|
|
triangleList = subdiv.getTriangleList() |
|
|
r = (0, 0, f_w, f_h) |
|
|
|
|
|
for t in triangleList : |
|
|
pt1 = (int(t[0]), int(t[1])) |
|
|
pt2 = (int(t[2]), int(t[3])) |
|
|
pt3 = (int(t[4]), int(t[5])) |
|
|
|
|
|
if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3) : |
|
|
list4.append((dictionary1[pt1],dictionary1[pt2],dictionary1[pt3])) |
|
|
|
|
|
dictionary1 = {} |
|
|
return list4 |
|
|
|
|
|
def make_delaunay(f_w, f_h, theList, img1, img2): |
|
|
|
|
|
|
|
|
rect = (0, 0, f_w, f_h) |
|
|
|
|
|
|
|
|
subdiv = cv2.Subdiv2D(rect) |
|
|
|
|
|
|
|
|
theList = theList.tolist() |
|
|
points = [(int(x[0]),int(x[1])) for x in theList] |
|
|
dictionary = {x[0]:x[1] for x in list(zip(points, range(76)))} |
|
|
|
|
|
|
|
|
for p in points : |
|
|
subdiv.insert(p) |
|
|
|
|
|
|
|
|
list4 = draw_delaunay(f_w, f_h, subdiv, dictionary) |
|
|
|
|
|
|
|
|
return list4 |
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
|
import cv2 |
|
|
import sys |
|
|
import os |
|
|
import math |
|
|
from subprocess import Popen, PIPE |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
|
|
|
def apply_affine_transform(src, srcTri, dstTri, size) : |
|
|
|
|
|
|
|
|
warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri)) |
|
|
|
|
|
|
|
|
dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) |
|
|
|
|
|
return dst |
|
|
|
|
|
|
|
|
|
|
|
def morph_triangle(img1, img2, img, t1, t2, t, alpha) : |
|
|
|
|
|
|
|
|
r1 = cv2.boundingRect(np.float32([t1])) |
|
|
r2 = cv2.boundingRect(np.float32([t2])) |
|
|
r = cv2.boundingRect(np.float32([t])) |
|
|
|
|
|
|
|
|
t1Rect = [] |
|
|
t2Rect = [] |
|
|
tRect = [] |
|
|
|
|
|
for i in range(0, 3): |
|
|
tRect.append(((t[i][0] - r[0]),(t[i][1] - r[1]))) |
|
|
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1]))) |
|
|
t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1]))) |
|
|
|
|
|
|
|
|
mask = np.zeros((r[3], r[2], 3), dtype = np.float32) |
|
|
cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0) |
|
|
|
|
|
|
|
|
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]] |
|
|
img2Rect = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] |
|
|
|
|
|
size = (r[2], r[3]) |
|
|
warpImage1 = apply_affine_transform(img1Rect, t1Rect, tRect, size) |
|
|
warpImage2 = apply_affine_transform(img2Rect, t2Rect, tRect, size) |
|
|
|
|
|
|
|
|
imgRect = (1.0 - alpha) * warpImage1 + alpha * warpImage2 |
|
|
|
|
|
|
|
|
img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] = img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] * ( 1 - mask ) + imgRect * mask |
|
|
|
|
|
|
|
|
def generate_morph_sequence(duration, frame_rate, img1, img2, points1, points2, tri_list, size, output): |
|
|
num_images = int(duration * frame_rate) |
|
|
p = subprocess.Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-r', str(frame_rate), '-s', str(size[1])+'x'+str(size[0]), '-i', '-', '-c:v', 'libx264', '-crf', '25', '-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2', '-pix_fmt', 'yuv420p', output], stdin=subprocess.PIPE) |
|
|
|
|
|
for _ in range(10): |
|
|
res = Image.fromarray(cv2.cvtColor(np.uint8(img1), cv2.COLOR_BGR2RGB)) |
|
|
res.save(p.stdin, 'JPEG') |
|
|
|
|
|
for j in range(0, num_images): |
|
|
img1 = np.float32(img1) |
|
|
img2 = np.float32(img2) |
|
|
points = [] |
|
|
alpha = j / (num_images - 1) |
|
|
|
|
|
for i in range(0, len(points1)): |
|
|
x = (1 - alpha) * points1[i][0] + alpha * points2[i][0] |
|
|
y = (1 - alpha) * points1[i][1] + alpha * points2[i][1] |
|
|
points.append((x, y)) |
|
|
|
|
|
morphed_frame = np.zeros(img1.shape, dtype=img1.dtype) |
|
|
|
|
|
for i in range(len(tri_list)): |
|
|
x = int(tri_list[i][0]) |
|
|
y = int(tri_list[i][1]) |
|
|
z = int(tri_list[i][2]) |
|
|
|
|
|
t1 = [points1[x], points1[y], points1[z]] |
|
|
t2 = [points2[x], points2[y], points2[z]] |
|
|
t = [points[x], points[y], points[z]] |
|
|
|
|
|
morph_triangle(img1, img2, morphed_frame, t1, t2, t, alpha) |
|
|
|
|
|
res = Image.fromarray(cv2.cvtColor(np.uint8(morphed_frame), cv2.COLOR_BGR2RGB)) |
|
|
res.save(p.stdin, 'JPEG') |
|
|
|
|
|
p.stdin.close() |
|
|
p.wait() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def doMorphing(image_paths, duration, frame_rate, output): |
|
|
output_files = [] |
|
|
for i in range(len(image_paths) - 1): |
|
|
img1 = cv2.imread(image_paths[i]) |
|
|
img2 = cv2.imread(image_paths[i + 1]) |
|
|
size, img1, img2, points1, points2, list3 = generate_face_correspondences(img1, img2) |
|
|
tri = make_delaunay(size[1], size[0], list3, img1, img2) |
|
|
output_file = f"{output}_{i}.mp4" |
|
|
generate_morph_sequence(duration, frame_rate, img1, img2, points1, points2, tri, size, output_file) |
|
|
output_files.append(output_file) |
|
|
|
|
|
|
|
|
ffmpeg_command = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', 'files.txt', '-c', 'copy', f"{output}_combined.mp4"] |
|
|
with open(f'files.txt', 'w') as f: |
|
|
for file in output_files: |
|
|
f.write(f"file '{file}'\n") |
|
|
subprocess.run(ffmpeg_command) |
|
|
os.remove(f'files.txt') |
|
|
|
|
|
|
|
|
gif_command = [ |
|
|
'ffmpeg', '-y', '-i', f"{output}_combined.mp4", '-vf', 'fps=10,scale=600:-1:flags=lanczos,palettegen', f'{output}_palette.png' |
|
|
] |
|
|
subprocess.run(gif_command) |
|
|
|
|
|
gif_command = [ |
|
|
'ffmpeg', '-y', '-i', f"{output}_combined.mp4", '-i', f'{output}_palette.png', '-filter_complex', |
|
|
'fps=10,scale=320:-1:flags=lanczos[x];[x][1:v]paletteuse', f'{output}.gif' |
|
|
] |
|
|
subprocess.run(gif_command) |
|
|
os.remove(f'{output}_palette.png') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|