Spaces:
Runtime error
Runtime error
Update sonic.py
Browse files
sonic.py
CHANGED
|
@@ -1,109 +1,122 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
• config.pretrained_model_name_or_path 가 실제 폴더인지 확인
|
| 5 |
-
• 없다면 huggingface_hub.snapshot_download 로 자동 다운로드
|
| 6 |
-
• 경로가 준비된 뒤 모델 로드 진행
|
| 7 |
-
"""
|
| 8 |
-
import os, math, torch, cv2
|
| 9 |
from PIL import Image
|
| 10 |
from omegaconf import OmegaConf
|
| 11 |
-
from tqdm
|
| 12 |
-
|
|
|
|
|
|
|
| 13 |
from transformers import WhisperModel, CLIPVisionModelWithProjection, AutoFeatureExtractor
|
| 14 |
-
|
| 15 |
from src.utils.util import save_videos_grid, seed_everything
|
| 16 |
from src.dataset.test_preprocess import process_bbox, image_audio_to_tensor
|
| 17 |
-
from src.models.base.unet_spatio_temporal_condition import
|
|
|
|
|
|
|
| 18 |
from src.pipelines.pipeline_sonic import SonicPipeline
|
| 19 |
from src.models.audio_adapter.audio_proj import AudioProjModel
|
| 20 |
from src.models.audio_adapter.audio_to_bucket import Audio2bucketModel
|
| 21 |
from src.utils.RIFE.RIFE_HDv3 import RIFEModel
|
| 22 |
from src.dataset.face_align.align import AlignImage
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 25 |
-
HF_STABLE_REPO = "stabilityai/stable-video-diffusion-img2vid-xt"
|
| 26 |
-
LOCAL_STABLE_DIR = os.path.join(BASE_DIR, "checkpoints", "stable-video-diffusion-img2vid-xt")
|
| 27 |
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
#
|
| 31 |
-
#
|
| 32 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
width, height, batch):
|
| 34 |
|
| 35 |
-
#
|
| 36 |
for k, v in batch.items():
|
| 37 |
if isinstance(v, torch.Tensor):
|
| 38 |
-
batch[k] = v.unsqueeze(0).
|
| 39 |
|
| 40 |
-
ref_img = batch[
|
| 41 |
-
clip_img = batch[
|
| 42 |
-
face_mask = batch[
|
| 43 |
-
|
| 44 |
|
| 45 |
-
|
| 46 |
-
audio_len
|
| 47 |
-
step
|
| 48 |
|
| 49 |
-
window = 16_000
|
| 50 |
-
|
| 51 |
|
| 52 |
-
for i in range(0,
|
| 53 |
-
chunk =
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
|
| 59 |
-
if not
|
| 60 |
-
raise ValueError("
|
| 61 |
|
| 62 |
-
audio_prompts = torch.cat(
|
| 63 |
-
last_prompts = torch.cat(
|
| 64 |
|
| 65 |
-
#
|
| 66 |
-
audio_prompts = torch.cat(
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
last_prompts,
|
| 71 |
-
torch.zeros_like(last_prompts[:,:26]) ], 1)
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
|
| 78 |
|
| 79 |
-
for i in tqdm(range(
|
| 80 |
st = i * 2 * step
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
buck = buck[:, :50].permute(0,2,1,3).reshape(1, 50, 10, 5, 384)
|
| 93 |
-
|
| 94 |
-
motion = audio2bucket(buck, img_emb) * 16 + 16
|
| 95 |
-
|
| 96 |
-
ref_L.append(ref_img[0])
|
| 97 |
-
aud_L.append(audio_pe(cond).squeeze(0)) # (10,1024)
|
| 98 |
-
uncond_L.append(audio_pe(torch.zeros_like(cond)).squeeze(0))
|
| 99 |
buckets.append(motion[0])
|
| 100 |
|
| 101 |
-
|
| 102 |
-
vid = pipe(
|
| 103 |
ref_img, clip_img, face_mask,
|
| 104 |
-
|
| 105 |
height=height, width=width,
|
| 106 |
-
num_frames=len(
|
| 107 |
decode_chunk_size=cfg.decode_chunk_size,
|
| 108 |
motion_bucket_scale=cfg.motion_bucket_scale,
|
| 109 |
fps=cfg.fps,
|
|
@@ -119,13 +132,12 @@ def test(pipe, cfg, wav_enc, audio_pe, audio2bucket, img_enc,
|
|
| 119 |
i2i_noise_strength=cfg.i2i_noise_strength,
|
| 120 |
).frames
|
| 121 |
|
| 122 |
-
return (
|
| 123 |
-
|
| 124 |
|
| 125 |
-
# ------------------------------------------------------------------
|
| 126 |
-
# Sonic wrapper
|
| 127 |
-
# ------------------------------------------------------------------
|
| 128 |
|
|
|
|
|
|
|
|
|
|
| 129 |
class Sonic:
|
| 130 |
config_file = os.path.join(BASE_DIR, "config/inference/sonic.yaml")
|
| 131 |
config = OmegaConf.load(config_file)
|
|
@@ -135,92 +147,125 @@ class Sonic:
|
|
| 135 |
cfg.use_interframe = enable_interpolate_frame
|
| 136 |
self.device = f"cuda:{device_id}" if torch.cuda.is_available() and device_id >= 0 else "cpu"
|
| 137 |
|
| 138 |
-
#
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
snapshot_download(repo_id=HF_STABLE_REPO,
|
| 142 |
-
local_dir=LOCAL_STABLE_DIR,
|
| 143 |
-
resume_download=True,
|
| 144 |
-
local_dir_use_symlinks=False)
|
| 145 |
-
cfg.pretrained_model_name_or_path = LOCAL_STABLE_DIR
|
| 146 |
-
# ------------------------------------------------------------------
|
| 147 |
-
|
| 148 |
-
self._load_models(cfg)
|
| 149 |
-
print("Sonic init done")
|
| 150 |
|
|
|
|
|
|
|
| 151 |
|
| 152 |
-
#
|
| 153 |
-
def _load_models(self, cfg):
|
| 154 |
dtype = {"fp16": torch.float16, "fp32": torch.float32, "bf16": torch.bfloat16}[cfg.weight_dtype]
|
| 155 |
|
|
|
|
| 156 |
vae = AutoencoderKLTemporalDecoder.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="vae", variant="fp16")
|
| 157 |
sched = EulerDiscreteScheduler.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="scheduler")
|
| 158 |
-
|
| 159 |
unet = UNetSpatioTemporalConditionModel.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="unet", variant="fp16")
|
| 160 |
add_ip_adapters(unet, [32], [cfg.ip_audio_scale])
|
| 161 |
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
self.face_det = AlignImage(self.device, det_path=os.path.join(BASE_DIR, "checkpoints/yoloface_v5m.pt"))
|
| 174 |
if cfg.use_interframe:
|
| 175 |
-
self.rife = RIFEModel(device=self.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
|
| 177 |
-
|
| 178 |
-
self.
|
| 179 |
-
self.
|
|
|
|
|
|
|
| 180 |
|
| 181 |
-
#
|
| 182 |
-
def preprocess(self,
|
| 183 |
-
img = cv2.imread(
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
-
#
|
| 190 |
@torch.no_grad()
|
| 191 |
-
def process(self,
|
| 192 |
-
min_resolution=512, inference_steps=25,
|
| 193 |
-
|
| 194 |
|
| 195 |
cfg = self.config
|
| 196 |
-
if seed is not None:
|
|
|
|
| 197 |
cfg.num_inference_steps = inference_steps
|
| 198 |
cfg.motion_bucket_scale = dynamic_scale
|
| 199 |
seed_everything(cfg.seed)
|
| 200 |
|
| 201 |
-
|
|
|
|
| 202 |
self.face_det, self.feature_extractor,
|
| 203 |
-
|
| 204 |
-
image_size=min_resolution, area=cfg.area
|
| 205 |
)
|
| 206 |
-
if
|
|
|
|
| 207 |
|
| 208 |
-
h,w =
|
| 209 |
-
|
| 210 |
-
|
|
|
|
|
|
|
|
|
|
| 211 |
|
| 212 |
video = test(self.pipe, cfg, self.whisper, self.audio2token,
|
| 213 |
-
self.audio2bucket, self.image_encoder,
|
|
|
|
| 214 |
|
| 215 |
-
|
| 216 |
-
|
|
|
|
| 217 |
for i in tqdm(range(out.shape[2]-1), ncols=0):
|
| 218 |
-
mid = self.rife.inference(out[:,:,i], out[:,:,i+1]).clamp(0,1)
|
| 219 |
-
frames
|
| 220 |
-
frames.append(out[:,:,-1])
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
os.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sonic.py (전체 파일)
|
| 2 |
+
|
| 3 |
+
import os, math, glob, torch, cv2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
from omegaconf import OmegaConf
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
from diffusers import AutoencoderKLTemporalDecoder
|
| 9 |
+
from diffusers.schedulers import EulerDiscreteScheduler
|
| 10 |
from transformers import WhisperModel, CLIPVisionModelWithProjection, AutoFeatureExtractor
|
| 11 |
+
|
| 12 |
from src.utils.util import save_videos_grid, seed_everything
|
| 13 |
from src.dataset.test_preprocess import process_bbox, image_audio_to_tensor
|
| 14 |
+
from src.models.base.unet_spatio_temporal_condition import (
|
| 15 |
+
UNetSpatioTemporalConditionModel, add_ip_adapters,
|
| 16 |
+
)
|
| 17 |
from src.pipelines.pipeline_sonic import SonicPipeline
|
| 18 |
from src.models.audio_adapter.audio_proj import AudioProjModel
|
| 19 |
from src.models.audio_adapter.audio_to_bucket import Audio2bucketModel
|
| 20 |
from src.utils.RIFE.RIFE_HDv3 import RIFEModel
|
| 21 |
from src.dataset.face_align.align import AlignImage
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
from safetensors.torch import load_file as safe_load
|
| 25 |
+
except ImportError: # safetensors 가 없으면 torch.load 만 사용
|
| 26 |
+
safe_load = None
|
| 27 |
+
|
| 28 |
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
+
# -------------------------------------------------------------------
|
| 32 |
+
# 공용 : 체크포인트(가중치) 탐색 함수
|
| 33 |
+
# -------------------------------------------------------------------
|
| 34 |
+
def _find_ckpt(root: str, keyword: str):
|
| 35 |
+
"""root 아래에서 keyword 가 포함된 .pth / .pt / .safetensors 파일 검색"""
|
| 36 |
+
patterns = [f"**/*{keyword}*.pth", f"**/*{keyword}*.pt",
|
| 37 |
+
f"**/*{keyword}*.safetensors"]
|
| 38 |
+
files = []
|
| 39 |
+
for p in patterns:
|
| 40 |
+
files.extend(glob.glob(os.path.join(root, p), recursive=True))
|
| 41 |
+
return files[0] if files else None
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# -------------------------------------------------------------------
|
| 45 |
+
# single image + speech → video tensor
|
| 46 |
+
# -------------------------------------------------------------------
|
| 47 |
+
def test(pipe, cfg, wav_enc, audio_pe, audio2bucket, image_encoder,
|
| 48 |
width, height, batch):
|
| 49 |
|
| 50 |
+
# 배치 차원 맞추기
|
| 51 |
for k, v in batch.items():
|
| 52 |
if isinstance(v, torch.Tensor):
|
| 53 |
+
batch[k] = v.unsqueeze(0).to(pipe.device).float()
|
| 54 |
|
| 55 |
+
ref_img = batch["ref_img"]
|
| 56 |
+
clip_img = batch["clip_images"]
|
| 57 |
+
face_mask = batch["face_mask"]
|
| 58 |
+
image_embeds = image_encoder(clip_img).image_embeds
|
| 59 |
|
| 60 |
+
audio_feature = batch["audio_feature"] # (1,80,T)
|
| 61 |
+
audio_len = int(batch["audio_len"])
|
| 62 |
+
step = max(1, int(cfg.step),) # 최소 1
|
| 63 |
|
| 64 |
+
window = 16_000 # 1초 단위
|
| 65 |
+
audio_prompts, last_prompts = [], []
|
| 66 |
|
| 67 |
+
for i in range(0, audio_feature.shape[-1], window):
|
| 68 |
+
chunk = audio_feature[:, :, i:i+window]
|
| 69 |
+
hidden_layers = wav_enc.encoder(chunk, output_hidden_states=True).hidden_states
|
| 70 |
+
last_hidden = wav_enc.encoder(chunk).last_hidden_state.unsqueeze(-2)
|
| 71 |
+
audio_prompts.append(torch.stack(hidden_layers, dim=2))
|
| 72 |
+
last_prompts.append(last_hidden)
|
| 73 |
|
| 74 |
+
if not audio_prompts:
|
| 75 |
+
raise ValueError("[ERROR] No speech recognised in the provided audio.")
|
| 76 |
|
| 77 |
+
audio_prompts = torch.cat(audio_prompts, dim=1)
|
| 78 |
+
last_prompts = torch.cat(last_prompts , dim=1)
|
| 79 |
|
| 80 |
+
# padding 규칙
|
| 81 |
+
audio_prompts = torch.cat(
|
| 82 |
+
[torch.zeros_like(audio_prompts[:, :4]),
|
| 83 |
+
audio_prompts,
|
| 84 |
+
torch.zeros_like(audio_prompts[:, :6])], dim=1)
|
|
|
|
|
|
|
| 85 |
|
| 86 |
+
last_prompts = torch.cat(
|
| 87 |
+
[torch.zeros_like(last_prompts[:, :24]),
|
| 88 |
+
last_prompts,
|
| 89 |
+
torch.zeros_like(last_prompts[:, :26])], dim=1)
|
| 90 |
+
|
| 91 |
+
total_tokens = audio_prompts.shape[1]
|
| 92 |
+
num_chunks = max(1, math.ceil(total_tokens / (2*step)))
|
| 93 |
|
| 94 |
+
ref_list, audio_list, uncond_list, buckets = [], [], [], []
|
| 95 |
|
| 96 |
+
for i in tqdm(range(num_chunks)):
|
| 97 |
st = i * 2 * step
|
| 98 |
+
cond = audio_prompts[:, st: st+10]
|
| 99 |
+
if cond.shape[2] < 10:
|
| 100 |
+
pad = torch.zeros_like(cond[:, :, :10-cond.shape[2]])
|
| 101 |
+
cond = torch.cat([cond, pad], dim=2)
|
| 102 |
|
| 103 |
+
bucket_clip = last_prompts[:, st: st+50]
|
| 104 |
+
if bucket_clip.shape[2] < 50:
|
| 105 |
+
pad = torch.zeros_like(bucket_clip[:, :, :50-bucket_clip.shape[2]])
|
| 106 |
+
bucket_clip = torch.cat([bucket_clip, pad], dim=2)
|
| 107 |
+
|
| 108 |
+
motion = audio2bucket(bucket_clip, image_embeds) * 16 + 16
|
| 109 |
+
|
| 110 |
+
ref_list.append(ref_img[0])
|
| 111 |
+
audio_list.append(audio_pe(cond).squeeze(0))
|
| 112 |
+
uncond_list.append(audio_pe(torch.zeros_like(cond)).squeeze(0))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
buckets.append(motion[0])
|
| 114 |
|
| 115 |
+
video = pipe(
|
|
|
|
| 116 |
ref_img, clip_img, face_mask,
|
| 117 |
+
audio_list, uncond_list, buckets,
|
| 118 |
height=height, width=width,
|
| 119 |
+
num_frames=len(audio_list),
|
| 120 |
decode_chunk_size=cfg.decode_chunk_size,
|
| 121 |
motion_bucket_scale=cfg.motion_bucket_scale,
|
| 122 |
fps=cfg.fps,
|
|
|
|
| 132 |
i2i_noise_strength=cfg.i2i_noise_strength,
|
| 133 |
).frames
|
| 134 |
|
| 135 |
+
return (video * 0.5 + 0.5).clamp(0, 1).unsqueeze(0).cpu()
|
|
|
|
| 136 |
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
+
# -------------------------------------------------------------------
|
| 139 |
+
# Sonic ✨
|
| 140 |
+
# -------------------------------------------------------------------
|
| 141 |
class Sonic:
|
| 142 |
config_file = os.path.join(BASE_DIR, "config/inference/sonic.yaml")
|
| 143 |
config = OmegaConf.load(config_file)
|
|
|
|
| 147 |
cfg.use_interframe = enable_interpolate_frame
|
| 148 |
self.device = f"cuda:{device_id}" if torch.cuda.is_available() and device_id >= 0 else "cpu"
|
| 149 |
|
| 150 |
+
# 가중치 루트
|
| 151 |
+
ckpt_root = os.path.join(BASE_DIR, "checkpoints", "Sonic")
|
| 152 |
+
cfg.pretrained_model_name_or_path = ckpt_root # diffusers 형식
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
+
self._load_models(cfg, ckpt_root)
|
| 155 |
+
print("Sonic init done")
|
| 156 |
|
| 157 |
+
# --------------------------------------------------------------
|
| 158 |
+
def _load_models(self, cfg, ckpt_root):
|
| 159 |
dtype = {"fp16": torch.float16, "fp32": torch.float32, "bf16": torch.bfloat16}[cfg.weight_dtype]
|
| 160 |
|
| 161 |
+
# diffusers 기본 가중치
|
| 162 |
vae = AutoencoderKLTemporalDecoder.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="vae", variant="fp16")
|
| 163 |
sched = EulerDiscreteScheduler.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="scheduler")
|
| 164 |
+
image_enc = CLIPVisionModelWithProjection.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="image_encoder", variant="fp16")
|
| 165 |
unet = UNetSpatioTemporalConditionModel.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="unet", variant="fp16")
|
| 166 |
add_ip_adapters(unet, [32], [cfg.ip_audio_scale])
|
| 167 |
|
| 168 |
+
# ------------ 추가 체크포인트 (.pth / .safetensors) ------------
|
| 169 |
+
def _try_load(module, keyword):
|
| 170 |
+
path = _find_ckpt(ckpt_root, keyword)
|
| 171 |
+
if not path:
|
| 172 |
+
print(f"[WARN] {keyword} checkpoint not found → skip")
|
| 173 |
+
return
|
| 174 |
+
print(f"[INFO] load {keyword} ckpt → {os.path.relpath(path, BASE_DIR)}")
|
| 175 |
+
if path.endswith(".safetensors") and safe_load is not None:
|
| 176 |
+
state = safe_load(path, device="cpu")
|
| 177 |
+
else:
|
| 178 |
+
state = torch.load(path, map_location="cpu")
|
| 179 |
+
module.load_state_dict(state, strict=False)
|
| 180 |
+
|
| 181 |
+
_try_load(unet, "unet")
|
| 182 |
+
# audio adapters (필수)
|
| 183 |
+
a2t = AudioProjModel(10, 5, 384, 1024, 1024, 32).to(self.device)
|
| 184 |
+
a2b = Audio2bucketModel(50, 1, 384, 1024, 1024, 1, 2).to(self.device)
|
| 185 |
+
_try_load(a2t, "audio2token")
|
| 186 |
+
_try_load(a2b, "audio2bucket")
|
| 187 |
+
|
| 188 |
+
# whisper tiny
|
| 189 |
+
whisper = WhisperModel.from_pretrained(
|
| 190 |
+
os.path.join(BASE_DIR, "checkpoints/whisper-tiny")
|
| 191 |
+
).to(self.device).eval()
|
| 192 |
+
whisper.requires_grad_(False)
|
| 193 |
+
|
| 194 |
+
self.feature_extractor = AutoFeatureExtractor.from_pretrained(
|
| 195 |
+
os.path.join(BASE_DIR, "checkpoints/whisper-tiny")
|
| 196 |
+
)
|
| 197 |
self.face_det = AlignImage(self.device, det_path=os.path.join(BASE_DIR, "checkpoints/yoloface_v5m.pt"))
|
| 198 |
if cfg.use_interframe:
|
| 199 |
+
self.rife = RIFEModel(device=self.device)
|
| 200 |
+
self.rife.load_model(os.path.join(BASE_DIR, "checkpoints/RIFE/"))
|
| 201 |
+
|
| 202 |
+
for m in (image_enc, vae, unet):
|
| 203 |
+
m.to(dtype)
|
| 204 |
|
| 205 |
+
self.pipe = SonicPipeline(unet=unet, image_encoder=image_enc, vae=vae, scheduler=sched).to(device=self.device, dtype=dtype)
|
| 206 |
+
self.image_encoder = image_enc
|
| 207 |
+
self.audio2token = a2t
|
| 208 |
+
self.audio2bucket = a2b
|
| 209 |
+
self.whisper = whisper
|
| 210 |
|
| 211 |
+
# --------------------------------------------------------------
|
| 212 |
+
def preprocess(self, image_path: str, expand_ratio: float = 1.0):
|
| 213 |
+
img = cv2.imread(image_path)
|
| 214 |
+
h, w = img.shape[:2]
|
| 215 |
+
_, _, bboxes = self.face_det(img, maxface=True)
|
| 216 |
+
if bboxes:
|
| 217 |
+
x1, y1, ww, hh = bboxes[0]
|
| 218 |
+
return {"face_num": 1,
|
| 219 |
+
"crop_bbox": process_bbox((x1, y1, x1+ww, y1+hh), expand_ratio, h, w)}
|
| 220 |
+
return {"face_num": 0, "crop_bbox": None}
|
| 221 |
|
| 222 |
+
# --------------------------------------------------------------
|
| 223 |
@torch.no_grad()
|
| 224 |
+
def process(self, image_path, audio_path, output_path,
|
| 225 |
+
min_resolution=512, inference_steps=25, dynamic_scale=1.0,
|
| 226 |
+
keep_resolution=False, seed=None):
|
| 227 |
|
| 228 |
cfg = self.config
|
| 229 |
+
if seed is not None:
|
| 230 |
+
cfg.seed = seed
|
| 231 |
cfg.num_inference_steps = inference_steps
|
| 232 |
cfg.motion_bucket_scale = dynamic_scale
|
| 233 |
seed_everything(cfg.seed)
|
| 234 |
|
| 235 |
+
# 이미지·오디오 → tensor
|
| 236 |
+
data = image_audio_to_tensor(
|
| 237 |
self.face_det, self.feature_extractor,
|
| 238 |
+
image_path, audio_path,
|
| 239 |
+
limit=-1, image_size=min_resolution, area=cfg.area
|
| 240 |
)
|
| 241 |
+
if data is None:
|
| 242 |
+
return -1
|
| 243 |
|
| 244 |
+
h, w = data["ref_img"].shape[-2:]
|
| 245 |
+
if keep_resolution:
|
| 246 |
+
im = Image.open(image_path)
|
| 247 |
+
resolution = f"{im.width//2*2}x{im.height//2*2}"
|
| 248 |
+
else:
|
| 249 |
+
resolution = f"{w}x{h}"
|
| 250 |
|
| 251 |
video = test(self.pipe, cfg, self.whisper, self.audio2token,
|
| 252 |
+
self.audio2bucket, self.image_encoder,
|
| 253 |
+
w, h, data)
|
| 254 |
|
| 255 |
+
# 인터프레임 보간
|
| 256 |
+
if cfg.use_interframe:
|
| 257 |
+
out, frames = video.to(self.device), []
|
| 258 |
for i in tqdm(range(out.shape[2]-1), ncols=0):
|
| 259 |
+
mid = self.rife.inference(out[:,:,i], out[:,:,i+1]).clamp(0,1).detach()
|
| 260 |
+
frames.extend([out[:,:,i], mid])
|
| 261 |
+
frames.append(out[:,:,-1])
|
| 262 |
+
video = torch.stack(frames, 2).cpu()
|
| 263 |
+
|
| 264 |
+
tmp = output_path.replace(".mp4", "_noaudio.mp4")
|
| 265 |
+
save_videos_grid(video, tmp, n_rows=video.shape[0],
|
| 266 |
+
fps=cfg.fps*(2 if cfg.use_interframe else 1))
|
| 267 |
+
os.system(
|
| 268 |
+
f"ffmpeg -loglevel error -y -i '{tmp}' -i '{audio_path}' -s {resolution} "
|
| 269 |
+
f"-vcodec libx264 -acodec aac -crf 18 -shortest '{output_path}'")
|
| 270 |
+
os.remove(tmp)
|
| 271 |
+
return 0
|