Spaces:
Running
Running
| import os | |
| import argparse | |
| import subprocess | |
| import platform | |
| import numpy as np | |
| import cv2 | |
| import torch | |
| from tqdm import tqdm | |
| from face_detection import FaceAlignment, LandmarksType | |
| from wav2lip_models import Wav2Lip | |
| from face_parsing import init_parser, swap_regions | |
| from esrgan.upsample import upscale, load_sr | |
| from basicsr.utils.download_util import load_file_from_url | |
| import audio | |
| def parse_arguments(): | |
| parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models') | |
| parser.add_argument('--checkpoint_path', type=str, default="checkpoints/wav2lip_gan.pth", | |
| help='Name of saved checkpoint to load weights from', required=False) | |
| parser.add_argument('--segmentation_path', type=str, default="checkpoints/face_segmentation.pth", | |
| help='Name of saved checkpoint of segmentation network', required=False) | |
| parser.add_argument('--sr_path', type=str, default='weights/4x_BigFace_v3_Clear.pth', | |
| help='Name of saved checkpoint of super-resolution network', required=False) | |
| parser.add_argument('--face', type=str, | |
| help='Filepath of video/image that contains faces to use', required=True) | |
| parser.add_argument('--audio', type=str, | |
| help='Filepath of video/audio file to use as raw audio source', required=True) | |
| parser.add_argument('--outfile', type=str, help='Video path to save result. See default for an e.g.', | |
| default='results/result_voice.mp4') | |
| parser.add_argument('--static', action='store_true', | |
| help='If set, use only first video frame for inference') | |
| parser.add_argument('--fps', type=float, help='Can be specified only if input is a static image (default: 25)', | |
| default=25., required=False) | |
| parser.add_argument('--pads', nargs=4, type=int, default=[0, 10, 0, 0], | |
| help='Padding (top, bottom, left, right). Please adjust to include chin at least') | |
| parser.add_argument('--face_det_batch_size', type=int, help='Batch size for face detection', default=32) | |
| parser.add_argument('--wav2lip_batch_size', type=int, help='Batch size for Wav2Lip model(s)', default=256) | |
| parser.add_argument('--resize_factor', default=1, type=int, | |
| help='Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p') | |
| parser.add_argument('--crop', nargs=4, type=int, default=[0, -1, 0, -1], | |
| help='Crop video to a smaller region (top, bottom, left, right). Applied after resize_factor and rotate arg. ' | |
| 'Useful if multiple face present. -1 implies the value will be auto-inferred based on height, width') | |
| parser.add_argument('--box', nargs=4, type=int, default=[-1, -1, -1, -1], | |
| help='Specify a constant bounding box for the face. Use only as a last resort if the face is not detected.' | |
| 'Also, might work only if the face is not moving around much. Syntax: (top, bottom, left, right).') | |
| parser.add_argument('--rotate', action='store_true', | |
| help='Sometimes videos taken from a phone can be flipped 90deg. If set, will flip video right by 90deg.' | |
| 'Use if you get a flipped result, despite feeding a normal looking video') | |
| parser.add_argument('--nosmooth', action='store_true', | |
| help='Prevent smoothing face detections over a short temporal window') | |
| parser.add_argument('--no_seg', action='store_true', | |
| help='Prevent using face segmentation') | |
| parser.add_argument('--no_sr', action='store_true', | |
| help='Prevent using super resolution') | |
| parser.add_argument('--enhance_face', choices=['gfpgan','codeformer'], | |
| help='Use GFP-GAN or CodeFormer to enhance facial details.') | |
| parser.add_argument('-w', '--fidelity_weight', type=float, default=0.75, | |
| help='Balance the quality and fidelity. Default: 0.75') | |
| parser.add_argument('--save_frames', action='store_true', | |
| help='Save each frame as an image. Use with caution') | |
| parser.add_argument('--gt_path', type=str, | |
| help='Where to store saved ground truth frames', required=False) | |
| parser.add_argument('--pred_path', type=str, | |
| help='Where to store frames produced by algorithm', required=False) | |
| parser.add_argument('--save_as_video', action="store_true", default=False, | |
| help='Whether to save frames as video', required=False) | |
| parser.add_argument('--image_prefix', type=str, default="", | |
| help='Prefix to save frames with', required=False) | |
| args = parser.parse_args() | |
| if os.path.isfile(args.face) and os.path.splitext(args.face)[1].lower() in ['.jpg', '.png', '.jpeg']: | |
| args.static = True | |
| args.img_size = 96 | |
| return args | |
| def get_smoothened_boxes(boxes, T): | |
| for i in range(len(boxes)): | |
| window = boxes[max(i - T + 1, 0):i + 1] | |
| boxes[i] = np.mean(window, axis=0) | |
| return boxes | |
| def face_detect(detector, images, args): | |
| predictions = [] | |
| batch_size = args.face_det_batch_size | |
| try: | |
| for i in range(0, len(images), batch_size): | |
| batch_images = np.array(images[i:i + batch_size]) | |
| predictions.extend(detector.get_detections_for_batch(batch_images)) | |
| except RuntimeError: | |
| if batch_size == 1: | |
| raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument') | |
| batch_size //= 2 | |
| print(f'Recovering from OOM error; New batch size: {batch_size}') | |
| return face_detect(detector, images, args) | |
| results = [] | |
| pady1, pady2, padx1, padx2 = args.pads | |
| for rect, image in zip(predictions, images): | |
| if rect is None: | |
| continue | |
| y1 = max(0, rect[1] - pady1) | |
| y2 = min(image.shape[0], rect[3] + pady2) | |
| x1 = max(0, rect[0] - padx1) | |
| x2 = min(image.shape[1], rect[2] + padx2) | |
| results.append([x1, y1, x2, y2]) | |
| boxes = np.array(results) | |
| if not args.nosmooth and len(boxes) > 0: | |
| boxes = get_smoothened_boxes(boxes, T=5) | |
| results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] | |
| for image, (x1, y1, x2, y2) in zip(images, boxes)] | |
| return results | |
| def datagen(mels, reader, detector, args): | |
| img_batch, mel_batch, frame_batch, coords_batch = [], [], [], [] | |
| for m in mels: | |
| frame_to_save = next(reader, None) | |
| if frame_to_save is None: | |
| reader = read_frames(args.face, args.resize_factor, args.rotate, args.crop) | |
| frame_to_save = next(reader, None) | |
| if frame_to_save is None: | |
| break | |
| face_detect_result = face_detect(detector, [frame_to_save], args) | |
| if len(face_detect_result) > 0: # Check if face detection was successful | |
| face, coords = face_detect_result[0] | |
| face = cv2.resize(face, (args.img_size, args.img_size)) | |
| img_batch.append(face) | |
| mel_batch.append(m) | |
| frame_batch.append(frame_to_save) | |
| coords_batch.append(coords) | |
| if len(img_batch) >= args.wav2lip_batch_size: | |
| img_batch_np = np.asarray(img_batch) | |
| mel_batch_np = np.asarray(mel_batch) | |
| img_masked = img_batch_np.copy() | |
| img_masked[:, args.img_size // 2:] = 0 | |
| img_batch_np = np.concatenate((img_masked, img_batch_np), axis=3) / 255.0 | |
| mel_batch_np = mel_batch_np.reshape(len(mel_batch_np), mel_batch_np.shape[1], mel_batch_np.shape[2], 1) | |
| yield img_batch_np, mel_batch_np, frame_batch, coords_batch | |
| img_batch, mel_batch, frame_batch, coords_batch = [], [], [], [] | |
| if len(img_batch) > 0: | |
| img_batch_np = np.asarray(img_batch) | |
| mel_batch_np = np.asarray(mel_batch) | |
| img_masked = img_batch_np.copy() | |
| img_masked[:, args.img_size // 2:] = 0 | |
| img_batch_np = np.concatenate((img_masked, img_batch_np), axis=3) / 255.0 | |
| mel_batch_np = mel_batch_np.reshape(len(mel_batch_np), mel_batch_np.shape[1], mel_batch_np.shape[2], 1) | |
| yield img_batch_np, mel_batch_np, frame_batch, coords_batch | |
| def load_checkpoint(checkpoint_path, device): | |
| if device == 'cuda': | |
| checkpoint = torch.load(checkpoint_path) | |
| else: | |
| checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) | |
| return checkpoint | |
| def load_model(checkpoint_path, device): | |
| model = Wav2Lip() | |
| print(f"Loading checkpoint from: {checkpoint_path}") | |
| checkpoint = load_checkpoint(checkpoint_path, device) | |
| state_dict = checkpoint["state_dict"] | |
| new_state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()} | |
| model.load_state_dict(new_state_dict) | |
| model = model.to(device) | |
| model.eval() | |
| return model | |
| def read_frames(face_path, resize_factor, rotate, crop): | |
| if os.path.splitext(face_path)[1].lower() in ['.jpg', '.png', '.jpeg']: | |
| face = cv2.imread(face_path) | |
| if resize_factor > 1: | |
| face = cv2.resize(face, (face.shape[1]//resize_factor, face.shape[0]//resize_factor)) | |
| if rotate: | |
| face = cv2.rotate(face, cv2.ROTATE_90_CLOCKWISE) | |
| y1, y2, x1, x2 = crop | |
| if x2 == -1: x2 = face.shape[1] | |
| if y2 == -1: y2 = face.shape[0] | |
| face = face[y1:y2, x1:x2] | |
| while True: | |
| yield face | |
| else: | |
| video_stream = cv2.VideoCapture(face_path) | |
| fps = video_stream.get(cv2.CAP_PROP_FPS) | |
| print('Reading video frames from start...') | |
| while True: | |
| still_reading, frame = video_stream.read() | |
| if not still_reading: | |
| video_stream.release() | |
| break | |
| if resize_factor > 1: | |
| frame = cv2.resize(frame, (frame.shape[1]//resize_factor, frame.shape[0]//resize_factor)) | |
| if rotate: | |
| frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE) | |
| y1, y2, x1, x2 = crop | |
| if x2 == -1: x2 = frame.shape[1] | |
| if y2 == -1: y2 = frame.shape[0] | |
| frame = frame[y1:y2, x1:x2] | |
| yield frame | |
| def main(): | |
| args = parse_arguments() | |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| print(f'Using {device} for inference.') | |
| # Инициализация моделей вне циклов | |
| detector = FaceAlignment(LandmarksType._2D, flip_input=False, device=device) | |
| if not args.no_seg: | |
| print("Loading segmentation network...") | |
| seg_net = init_parser(args.segmentation_path) | |
| else: | |
| seg_net = None | |
| if not args.no_sr: | |
| print("Loading super resolution model...") | |
| run_params = load_sr(args.sr_path, device, args.enhance_face) | |
| else: | |
| run_params = None | |
| model = load_model(args.checkpoint_path, device) | |
| print("Model loaded") | |
| if not os.path.isfile(args.face): | |
| raise ValueError('--face argument must be a valid path to video/image file') | |
| if not args.audio.endswith('.wav'): | |
| print('Extracting raw audio...') | |
| temp_wav = os.path.join(os.path.dirname(args.outfile), 'temp.wav') | |
| command = f'ffmpeg -y -i "{args.audio}" -strict -2 "{temp_wav}"' | |
| subprocess.call(command, shell=True) | |
| args.audio = temp_wav | |
| wav = audio.load_wav(args.audio, 16000) | |
| mel = audio.melspectrogram(wav) | |
| print(mel.shape) | |
| if np.isnan(mel).any(): | |
| raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again') | |
| mel_step_size = 16 | |
| fps = args.fps if args.static else None | |
| if not args.static: | |
| video_stream = cv2.VideoCapture(args.face) | |
| fps = video_stream.get(cv2.CAP_PROP_FPS) | |
| video_stream.release() | |
| mel_idx_multiplier = 80.0 / fps | |
| mel_chunks = [] | |
| i = 0 | |
| while True: | |
| start_idx = int(i * mel_idx_multiplier) | |
| if start_idx + mel_step_size > mel.shape[1]: | |
| mel_chunks.append(mel[:, -mel_step_size:]) | |
| break | |
| mel_chunks.append(mel[:, start_idx:start_idx + mel_step_size]) | |
| i += 1 | |
| print(f"Length of mel chunks: {len(mel_chunks)}") | |
| reader = read_frames(args.face, args.resize_factor, args.rotate, args.crop) | |
| generator = datagen(mel_chunks, reader, detector, args) | |
| if args.save_as_video: | |
| frame_sample = next(reader) | |
| frame_h, frame_w = frame_sample.shape[:2] | |
| # Определяем путь для result.avi в той же директории, что и outfile | |
| result_avi = os.path.join(os.path.dirname(args.outfile), "result.avi") | |
| out = cv2.VideoWriter(result_avi, | |
| cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h)) | |
| if args.save_frames: | |
| gt_out = cv2.VideoWriter(os.path.join(os.path.dirname(args.outfile), "gt.avi"), cv2.VideoWriter_fourcc(*'DIVX'), fps, (384, 384)) | |
| pred_out = cv2.VideoWriter(os.path.join(os.path.dirname(args.outfile), "pred.avi"), cv2.VideoWriter_fourcc(*'DIVX'), fps, (96, 96)) | |
| else: | |
| out = None | |
| gt_out = None | |
| pred_out = None | |
| abs_idx = 0 | |
| for i, (img_batch, mel_batch, frames, coords) in enumerate(tqdm(generator, | |
| total=int(np.ceil(len(mel_chunks)/args.wav2lip_batch_size)))): | |
| img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device) | |
| mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(device) | |
| with torch.no_grad(): | |
| pred = model(mel_batch, img_batch) | |
| pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.0 | |
| for p, f, c in zip(pred, frames, coords): | |
| y1, y2, x1, x2 = c | |
| if args.save_frames: | |
| if args.save_as_video: | |
| pred_out.write(p.astype(np.uint8)) | |
| gt_resized = cv2.resize(f[y1:y2, x1:x2], (384, 384)) | |
| gt_out.write(gt_resized) | |
| else: | |
| if args.gt_path and args.pred_path: | |
| os.makedirs(args.gt_path, exist_ok=True) | |
| os.makedirs(args.pred_path, exist_ok=True) | |
| cv2.imwrite(f"{args.gt_path}/{args.image_prefix}{abs_idx}.png", f[y1:y2, x1:x2]) | |
| cv2.imwrite(f"{args.pred_path}/{args.image_prefix}{abs_idx}.png", p) | |
| abs_idx += 1 | |
| if not args.no_sr: | |
| if args.enhance_face is None: | |
| p = upscale(p, 0, run_params) | |
| elif args.enhance_face == 'codeformer': | |
| p = upscale(p, 2, [run_params, device, args.fidelity_weight]) | |
| elif args.enhance_face == 'gfpgan': | |
| p = upscale(p, 1, run_params) | |
| p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1)) | |
| if not args.no_seg and seg_net is not None: | |
| p = swap_regions(f[y1:y2, x1:x2], p, seg_net) | |
| f[y1:y2, x1:x2] = p | |
| if out: | |
| out.write(f) | |
| if out: | |
| out.release() | |
| if args.save_as_video: | |
| final_command = f'ffmpeg -y -i "{args.audio}" -i "{result_avi}" -strict -2 -q:v 1 "{args.outfile}"' | |
| subprocess.call(final_command, shell=(platform.system() != 'Windows')) | |
| if args.save_frames and args.save_as_video: | |
| gt_out.release() | |
| pred_out.release() | |
| gt_video_cmd = f'ffmpeg -y -i "{os.path.join(os.path.dirname(args.outfile), "gt.avi")}" -i "{args.audio}" -strict -2 -q:v 1 "{args.gt_path}"' | |
| pred_video_cmd = f'ffmpeg -y -i "{os.path.join(os.path.dirname(args.outfile), "pred.avi")}" -i "{args.audio}" -strict -2 -q:v 1 "{args.pred_path}"' | |
| subprocess.call(gt_video_cmd, shell=(platform.system() != 'Windows')) | |
| subprocess.call(pred_video_cmd, shell=(platform.system() != 'Windows')) | |
| if __name__ == '__main__': | |
| main() | |