Update app.py
Browse files
app.py
CHANGED
|
@@ -1,52 +1,53 @@
|
|
|
|
|
|
|
|
| 1 |
from moviepy.editor import *
|
| 2 |
-
import face_recognition
|
| 3 |
from path import Path
|
| 4 |
from PIL import Image, ImageDraw
|
| 5 |
import numpy as np
|
| 6 |
|
| 7 |
import gradio
|
| 8 |
import os
|
| 9 |
-
|
|
|
|
| 10 |
|
|
|
|
| 11 |
|
| 12 |
def procss_video(video_str):
|
| 13 |
source_frames = []
|
| 14 |
-
|
| 15 |
clip = VideoFileClip(video_str)
|
| 16 |
for item in clip.iter_frames():
|
| 17 |
source_frames.append(item)
|
| 18 |
|
| 19 |
audioclip = clip.audio
|
| 20 |
-
im2 = Image.open("mask_output.png")
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
|
| 25 |
|
|
|
|
|
|
|
| 26 |
|
|
|
|
| 27 |
dealed_frames = []
|
| 28 |
-
batch_size = 8
|
| 29 |
-
for i in range(0, len(source_frames), batch_size):
|
| 30 |
-
frames = source_frames[i:i+batch_size]
|
| 31 |
-
# print('processing framese from {} to {}'.format(i, i+batch_size))
|
| 32 |
-
batch_of_face_locations = face_recognition.batch_face_locations(frames, number_of_times_to_upsample=2, batch_size = batch_size)
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
# Loop through each face found in the unknown image
|
| 40 |
-
for (top, right, bottom, left) in face_locations:
|
| 41 |
|
| 42 |
-
|
| 43 |
-
pil_image.paste(im, (left, int((top+bottom)/2)), im)
|
| 44 |
-
dealed_frames.append(np.array(pil_image))
|
| 45 |
|
| 46 |
-
output_clip = ImageSequenceClip(dealed_frames, fps=
|
| 47 |
new_audioclip = CompositeAudioClip([audioclip])
|
| 48 |
output_clip.audio = new_audioclip
|
| 49 |
-
output_clip.write_videofile(os.path.join('video', 'processed_'+Path(video_str).name),
|
| 50 |
|
| 51 |
return os.path.join('video', 'processed_'+Path(video_str).name)
|
| 52 |
# os.mkdir('video')
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
from moviepy.editor import *
|
|
|
|
| 4 |
from path import Path
|
| 5 |
from PIL import Image, ImageDraw
|
| 6 |
import numpy as np
|
| 7 |
|
| 8 |
import gradio
|
| 9 |
import os
|
| 10 |
+
from insightface.app import FaceAnalysis
|
| 11 |
+
from insightface.data import get_image as ins_get_image
|
| 12 |
|
| 13 |
+
os.makedirs('video', exist_ok=True)
|
| 14 |
|
| 15 |
def procss_video(video_str):
|
| 16 |
source_frames = []
|
| 17 |
+
|
| 18 |
clip = VideoFileClip(video_str)
|
| 19 |
for item in clip.iter_frames():
|
| 20 |
source_frames.append(item)
|
| 21 |
|
| 22 |
audioclip = clip.audio
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
|
| 25 |
+
app = FaceAnalysis(providers=['CUDAExecutionProvider'])
|
| 26 |
+
app.prepare(ctx_id=0, det_size=(640, 640))
|
| 27 |
|
| 28 |
+
im2 = Image.open('mask_output.png')
|
| 29 |
dealed_frames = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
for item in source_frames:
|
| 32 |
+
pil_image = Image.fromarray(item)
|
| 33 |
+
faces = app.get(item)
|
| 34 |
+
# rimg = app.draw_on(img, faces)
|
| 35 |
+
for face in faces:
|
| 36 |
+
# print(face)
|
| 37 |
+
face.bbox = face.bbox.astype(np.int)
|
| 38 |
+
top , right,bottom, left = face.bbox
|
| 39 |
+
#find top right bottom left from face.bbox
|
| 40 |
+
left, bottom, right, top = face.bbox
|
| 41 |
|
| 42 |
+
im = im2.resize((int(abs(top-bottom)*0.8), int(abs(left-right)*0.8) ))
|
| 43 |
+
pil_image.paste(im, (left, int((top+bottom)/2)), im)
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
dealed_frames.append(np.array(pil_image))
|
|
|
|
|
|
|
| 46 |
|
| 47 |
+
output_clip = ImageSequenceClip(dealed_frames, fps = clip.fps)
|
| 48 |
new_audioclip = CompositeAudioClip([audioclip])
|
| 49 |
output_clip.audio = new_audioclip
|
| 50 |
+
output_clip.write_videofile(os.path.join('video', 'processed_'+Path(video_str).name),codec="libx264", audio_codec="aac")
|
| 51 |
|
| 52 |
return os.path.join('video', 'processed_'+Path(video_str).name)
|
| 53 |
# os.mkdir('video')
|