File size: 10,175 Bytes
afc1ec7 837f256 181149d 21626b4 e6b275f afc1ec7 181149d afc1ec7 21626b4 07d48bb 21626b4 07d48bb a7d3d06 21626b4 07d48bb 1b89d46 181149d afc1ec7 07d48bb afc1ec7 181149d afc1ec7 181149d 837f256 21626b4 837f256 afc1ec7 181149d afc1ec7 181149d afc1ec7 181149d afc1ec7 07d48bb 181149d afc1ec7 1b89d46 afc1ec7 a7d3d06 1b89d46 afc1ec7 07d48bb 181149d 1b89d46 a7d3d06 afc1ec7 1b89d46 afc1ec7 837f256 a7d3d06 837f256 1b89d46 181149d 21626b4 afc1ec7 07d48bb e6b275f afc1ec7 21626b4 07d48bb 837f256 fd37ae9 21626b4 07d48bb 181149d 837f256 07d48bb 181149d e6b275f 21626b4 afc1ec7 f236d0d afc1ec7 f236d0d afc1ec7 21626b4 afc1ec7 e6b275f 21626b4 837f256 21626b4 837f256 afc1ec7 21626b4 afc1ec7 e6b275f afc1ec7 f236d0d afc1ec7 21626b4 afc1ec7 e6b275f afc1ec7 f236d0d afc1ec7 e6b275f f236d0d 21626b4 f236d0d e6b275f afc1ec7 181149d afc1ec7 a9e5a39 afc1ec7 a9e5a39 afc1ec7 a9e5a39 afc1ec7 e6b275f 181149d afc1ec7 e6b275f afc1ec7 e6b275f afc1ec7 e6b275f 181149d afc1ec7 e6b275f afc1ec7 21626b4 e17ad9a afc1ec7 1b89d46 21626b4 e17ad9a 21626b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
import os
import sys
import uuid
import tempfile
import json
import inspect
import shutil
import torch
import gradio as gr
from huggingface_hub import snapshot_download
from omegaconf import OmegaConf
from diffusers import AutoencoderKL, DDIMScheduler
# βββ 0. Chuyα»n CWD & thiαΊΏt lαΊp PYTHONPATH βββββββββββββββββββββββββββ
BASE_DIR = os.path.dirname(__file__)
# Chuyα»n working dir vΓ o LatentSync Δα» cΓ‘c ΔΖ°α»ng dαΊ«n relative bΓͺn trong ΔΓΊng
os.chdir(os.path.join(BASE_DIR, "LatentSync"))
# Copy mask.png tα»« assets β latentsync/utils nαΊΏu cαΊ§n
assets_mask = os.path.join("assets", "mask.png")
utils_mask = os.path.join("latentsync", "utils", "mask.png")
if os.path.exists(assets_mask) and not os.path.exists(utils_mask):
shutil.copy(assets_mask, utils_mask)
# ThΓͺm Long_Tieng vΓ LatentSync vΓ o sys.path Δα» import modules
sys.path.insert(0, os.path.join(BASE_DIR, "Long_Tieng"))
sys.path.insert(0, os.path.join(BASE_DIR, "LatentSync"))
# βββ 1. MMAUDIO (Long_Tieng) setup βββββββββββββββββββββββββββββββββ
from mmaudio.eval_utils import (
ModelConfig, all_model_cfg,
generate, load_video, make_video,
setup_eval_logging
)
from mmaudio.model.flow_matching import FlowMatching
from mmaudio.model.sequence_config import SequenceConfig
from mmaudio.model.utils.features_utils import FeaturesUtils
from mmaudio.model.networks import MMAudio, get_my_mmaudio
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.bfloat16 if device.type == "cuda" else torch.float32
mma_cfg: ModelConfig = all_model_cfg["large_44k_v2"]
mma_cfg.download_if_needed()
setup_eval_logging()
net: MMAudio = get_my_mmaudio(mma_cfg.model_name).to(device, dtype).eval()
net.load_weights(torch.load(
mma_cfg.model_path, map_location=device, weights_only=True
))
feature_utils = FeaturesUtils(
tod_vae_ckpt=mma_cfg.vae_path,
synchformer_ckpt=mma_cfg.synchformer_ckpt,
enable_conditions=True,
mode=mma_cfg.mode,
bigvgan_vocoder_ckpt=mma_cfg.bigvgan_16k_path,
need_vae_encoder=False
).to(device, dtype).eval()
seq_cfg: SequenceConfig = mma_cfg.seq_cfg
@torch.inference_mode()
def text_to_audio_fn(prompt, neg_prompt, seed, num_steps, guidance, duration):
rng = torch.Generator(device=device)
if seed >= 0:
rng.manual_seed(seed)
fm = FlowMatching(min_sigma=0, inference_mode="euler", num_steps=num_steps)
seq_cfg.duration = duration
net.update_seq_lengths(
seq_cfg.latent_seq_len,
seq_cfg.clip_seq_len,
seq_cfg.sync_seq_len
)
audios = generate(
None, None, [prompt],
negative_text=[neg_prompt],
feature_utils=feature_utils,
net=net, fm=fm, rng=rng, cfg_strength=guidance
)
audio = audios.float().cpu()[0]
out = tempfile.NamedTemporaryFile(delete=False, suffix=".flac").name
import torchaudio
torchaudio.save(out, audio, seq_cfg.sampling_rate)
return out
@torch.inference_mode()
def video_to_audio_fn(video, prompt, neg_prompt, seed, num_steps, guidance, duration):
from mmaudio.eval_utils import load_video, make_video
from mmaudio.model.flow_matching import FlowMatching
info = load_video(video, duration)
clip = info.clip_frames.unsqueeze(0)
sync = info.sync_frames.unsqueeze(0)
rng = torch.Generator(device=device)
if seed >= 0:
rng.manual_seed(seed)
fm = FlowMatching(min_sigma=0, inference_mode="euler", num_steps=num_steps)
seq_cfg.duration = info.duration_sec
net.update_seq_lengths(
seq_cfg.latent_seq_len,
seq_cfg.clip_seq_len,
seq_cfg.sync_seq_len
)
audios = generate(
clip, sync, [prompt],
negative_text=[neg_prompt],
feature_utils=feature_utils,
net=net, fm=fm, rng=rng, cfg_strength=guidance
)
audio = audios.float().cpu()[0]
out_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
make_video(info, out_video, audio, sampling_rate=seq_cfg.sampling_rate)
return out_video
# βββ 2. LATENTSYNC setup βββββββββββββββββββββββββββββββββββββββββββββ
# 2.1 Download checkpoints
REPO_ID = "LTTEAM/Nhep_Mieng"
ckpt_dir = os.path.join(BASE_DIR, "checkpoints")
os.makedirs(ckpt_dir, exist_ok=True)
snapshot_download(repo_id=REPO_ID, local_dir=ckpt_dir)
# 2.2 Load U-Net config
cfg_path = os.path.join(BASE_DIR, "LatentSync", "configs", "unet", "second_stage.yaml")
conf = OmegaConf.load(cfg_path)
# 2.3 Load scheduler config locally + filter invalid args
sched_path = os.path.join(BASE_DIR, "LatentSync", "configs", "scheduler_config.json")
with open(sched_path, "r") as f:
sched_cfg = json.load(f)
valid_args = inspect.signature(DDIMScheduler.__init__).parameters.keys()
init_cfg = {k: v for k, v in sched_cfg.items() if k in valid_args}
scheduler = DDIMScheduler(**init_cfg)
# 2.4 Load VAE and fix missing shift_factor
vae = AutoencoderKL.from_pretrained(
"stabilityai/sd-vae-ft-mse",
torch_dtype=torch.float16 if device.type == "cuda" else torch.float32
)
if not hasattr(vae.config, "shift_factor") or vae.config.shift_factor is None:
vae.config.shift_factor = 0.0
# 2.5 Whisper audio encoder
from latentsync.whisper.audio2feature import Audio2Feature
dim = conf.model.cross_attention_dim
wh = "small.pt" if dim == 768 else "tiny.pt"
audio_encoder = Audio2Feature(
model_path=os.path.join(ckpt_dir, "whisper", wh),
device=device,
num_frames=conf.data.num_frames
)
# 2.6 Load UNet3DConditionModel
from latentsync.models.unet import UNet3DConditionModel
unet, _ = UNet3DConditionModel.from_pretrained(
OmegaConf.to_container(conf.model),
os.path.join(ckpt_dir, "latentsync_unet.pt"),
device=device
)
unet = unet.to(torch.float16) if device.type == "cuda" else unet.to(torch.float32)
# 2.7 Build LipsyncPipeline
from latentsync.pipelines.lipsync_pipeline import LipsyncPipeline
pipe_sync = LipsyncPipeline(
vae=vae,
audio_encoder=audio_encoder,
unet=unet,
scheduler=scheduler
).to(device)
def lipsync_fn(video_path, audio_path, seed, num_frames, inference_steps):
from accelerate.utils import set_seed
if seed >= 0:
set_seed(seed)
out_id = uuid.uuid4().hex
result = f"lipsync_{out_id}.mp4"
try:
pipe_sync(
video_path=video_path,
audio_path=audio_path,
video_out_path=result,
video_mask_path=result.replace(".mp4","_mask.mp4"),
num_frames=num_frames,
num_inference_steps=inference_steps,
guidance_scale=1.0,
weight_dtype=torch.float16 if device.type=="cuda" else torch.float32,
width=conf.data.resolution,
height=conf.data.resolution
)
except RuntimeError as e:
if "Face not detected" in str(e):
raise ValueError("KhΓ΄ng phΓ‘t hiα»n khuΓ΄n mαΊ·t trong video. Vui lΓ²ng chα»n video cΓ³ khuΓ΄n mαΊ·t rΓ΅ rΓ ng.")
else:
raise
return result
# βββ 3. Gradio UI ββββββββββββββββββββββββββββββββββββββββββββββββββββ
text2audio = gr.Interface(
fn=text_to_audio_fn,
inputs=[
gr.Textbox(label="Prompt"),
gr.Textbox(label="Negative Prompt", value="music"),
gr.Number(label="Seed", value=-1, precision=0),
gr.Number(label="Num Steps", value=25, precision=0),
gr.Number(label="Guidance Strength", value=4.5),
gr.Number(label="Duration (s)", value=8),
],
outputs=gr.Audio(label="Generated Audio"),
title="Text β Audio"
)
video2audio = gr.Interface(
fn=video_to_audio_fn,
inputs=[
gr.Video(label="Input Video"),
gr.Textbox(label="Prompt"),
gr.Textbox(label="Negative Prompt", value="music"),
gr.Number(label="Seed", value=-1, precision=0),
gr.Number(label="Num Steps", value=25, precision=0),
gr.Number(label="Guidance Strength", value=4.5),
gr.Number(label="Duration (s)", value=8),
],
outputs=gr.Video(label="Video with Audio"),
title="Video β Audio"
)
audio2video = gr.Interface(
fn=lipsync_fn,
inputs=[
gr.Video(label="Input Video"),
gr.Audio(label="Input Audio", type="filepath"),
gr.Number(label="Seed", value=-1, precision=0),
gr.Number(label="Num Frames", value=conf.data.num_frames, precision=0),
gr.Number(label="Inference Steps", value=conf.run.inference_steps, precision=0),
],
outputs=gr.Video(label="Lip-Synced Video"),
title="Audio β Lip-Sync"
)
def text_video2video_fn(prompt, neg_prompt, seed, num_steps, guidance, duration,
video, num_frames, inference_steps):
audio = text_to_audio_fn(prompt, neg_prompt, seed, num_steps, guidance, duration)
video_out = lipsync_fn(video, audio, seed, num_frames, inference_steps)
return audio, video_out
text_video2video = gr.Interface(
fn=text_video2video_fn,
inputs=[
gr.Textbox(label="Prompt"),
gr.Textbox(label="Negative Prompt", value="music"),
gr.Number(label="Seed", value=-1, precision=0),
gr.Number(label="Num Steps", value=25, precision=0),
gr.Number(label="Guidance Strength", value=4.5),
gr.Number(label="Duration (s)", value=8),
gr.Video(label="Input Video"),
gr.Number(label="Num Frames", value=conf.data.num_frames, precision=0),
gr.Number(label="Inference Steps", value=conf.run.inference_steps, precision=0),
],
outputs=[gr.Audio(label="Generated Audio"), gr.Video(label="Lip-Synced Video")],
title="Text + Video β Lip-Sync"
)
# TαΊ‘o tabbed interface vΓ bαΊt queue (mαΊ·c Δα»nh)
demo = gr.TabbedInterface(
[text2audio, video2audio, audio2video, text_video2video],
["TextβAudio","VideoβAudio","AudioβLipSync","Text+VideoβLipSync"]
).queue()
# Launch vα»i share=True
demo.launch(share=True)
|