temp_dataset / dataprocess_code /run_syncnet_pipeline_on_dir.py
xingzhaohu's picture
Initial upload
4b3a024 verified
#!/usr/bin/env python3
"""
批量计算一组视频的 SyncNet lipsync 分数,并输出均值。
默认假设视频文件采用 `tai2v_output_{index}.mp4` 命名规则,
可选地提供一个 YAML(与 `test_hyvideo1p5_inference_tai2v.py` 相同的 cases 格式),
用来查找每个 index 对应的 `audio_path`;若未提供或找不到,就直接使用视频自身的音频轨道。
"""
import argparse
import json
import logging
import re
from pathlib import Path
from typing import Dict, List, Optional, Sequence
from syncnet_pipeline import SyncNetPipeline
LOGGER = logging.getLogger("syncnet_batch")
INDEX_PATTERN = re.compile(r"(\d+)")
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="批量运行 SyncNetPipeline 并统计 lipsync 分数")
parser.add_argument(
"--video-dir",
type=str,
default="outputs/test_results",
help="存放生成视频的目录,默认指向 outputs/audio_token_4",
)
parser.add_argument(
"--video-glob",
type=str,
default="tai2v_output_*.mp4",
help="匹配视频的 glob 模式(相对于 video-dir)",
)
parser.add_argument(
"--cases-yaml",
type=str,
default="/mnt/nfs/datasets/MochaBench/inference_cases.yaml",
help="可选:提供一个 cases.yaml,用来读取 audio_path(index 与 tai2v_output_{index}.mp4 一一对应)",
)
parser.add_argument(
"--limit",
type=int,
default=None,
help="只评估前 N 个匹配视频,默认全部",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
choices=["cuda", "cpu"],
help="SyncNet 推理设备(默认 cuda,如不可用可改为 cpu)",
)
parser.add_argument(
"--s3fd-weights",
type=str,
default="/mnt/nfs/datasets/MochaBench/MoChaBench/eval-lipsync/weights/sfd_face.pth",
help="S3FD 模型权重路径",
)
parser.add_argument(
"--syncnet-weights",
type=str,
default="/mnt/nfs/datasets/MochaBench/MoChaBench/eval-lipsync/weights/syncnet_v2.model",
help="SyncNet 模型权重路径",
)
parser.add_argument(
"--save-json",
type=str,
default=None,
help="若指定,则将每条结果与统计信息保存为 JSON",
)
parser.add_argument(
"--log-level",
type=str,
default="INFO",
help="日志等级(默认 INFO)",
)
return parser.parse_args()
def _normalize_cases(raw) -> List[dict]:
if raw is None:
return []
if isinstance(raw, list):
return raw
if isinstance(raw, dict):
if isinstance(raw.get("cases"), list):
return raw["cases"]
if isinstance(raw.get("image_paths_and_scales"), list):
return raw["image_paths_and_scales"]
return [raw]
return []
def _load_audio_map(yaml_path: Optional[str]) -> Dict[int, str]:
if not yaml_path:
return {}
try:
import yaml
except ImportError as exc: # pragma: no cover - 仅在缺失依赖时触发
raise RuntimeError("需要 PyYAML 以解析 cases YAML,请先安装 `pip install pyyaml`.") from exc
yaml_file = Path(yaml_path)
if not yaml_file.exists():
LOGGER.warning("提供的 cases YAML 不存在:%s", yaml_file)
return {}
with yaml_file.open("r") as f:
raw = yaml.safe_load(f) or []
cases = _normalize_cases(raw)
audio_map: Dict[int, str] = {}
for idx, entry in enumerate(cases):
audio_path = None
if isinstance(entry, dict):
audio_path = entry.get("audio_path") or entry.get("audio")
elif isinstance(entry, Sequence) and not isinstance(entry, (str, bytes)):
# 旧格式仅有图片 / prompt,无法提供音频信息,直接跳过
audio_path = None
if audio_path:
audio_map[idx] = str(audio_path)
LOGGER.info("从 %s 读取到 %d 条 audio_path 映射", yaml_file, len(audio_map))
return audio_map
def _extract_index(path: Path) -> Optional[int]:
matches = INDEX_PATTERN.findall(path.stem)
if not matches:
return None
try:
return int(matches[-1])
except ValueError:
return None
def _collect_videos(video_dir: Path, pattern: str) -> List[Path]:
videos = sorted(video_dir.glob(pattern), key=lambda p: (_extract_index(p) is None, _extract_index(p), p.name))
return [p for p in videos if p.is_file()]
def main():
args = _parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper(), logging.INFO))
video_dir = Path(args.video_dir)
if not video_dir.exists():
raise FileNotFoundError(f"视频目录不存在:{video_dir}")
video_files = _collect_videos(video_dir, args.video_glob)
if not video_files:
raise FileNotFoundError(f"目录 {video_dir} 中未找到匹配 {args.video_glob} 的视频")
if args.limit is not None:
video_files = video_files[: args.limit]
audio_map = _load_audio_map(args.cases_yaml)
pipe = SyncNetPipeline(
{
"s3fd_weights": args.s3fd_weights,
"syncnet_weights": args.syncnet_weights,
},
device=args.device,
)
records = []
valid_confidences: List[float] = []
valid_min_dists: List[float] = []
for vid in video_files:
vid_idx = _extract_index(vid)
audio_path = None
if vid_idx is not None and vid_idx in audio_map:
candidate = Path(audio_map[vid_idx])
if candidate.exists():
audio_path = str(candidate)
else:
LOGGER.warning("cases YAML 中的音频不存在(index=%s):%s,改用视频自身音频", vid_idx, candidate)
if audio_path is None:
audio_path = str(vid)
LOGGER.info("评估视频 %s (index=%s) ,音频:%s", vid.name, vid_idx, audio_path)
try:
offsets, confs, dists, best_conf, min_dist, _, has_face = pipe.inference(
video_path=str(vid),
audio_path=audio_path,
)
result = {
"video": str(vid),
"index": vid_idx,
"audio": audio_path,
"offsets": offsets,
"confidences": confs,
"distances": dists,
"best_confidence": float(best_conf),
"min_distance": float(min_dist),
"has_face": bool(has_face),
"valid": bool(offsets),
}
except Exception as exc: # pragma: no cover - 运行期错误信息
LOGGER.exception("评估 %s 失败:%s", vid, exc)
result = {
"video": str(vid),
"index": vid_idx,
"audio": audio_path,
"error": str(exc),
"valid": False,
}
records.append(result)
if result.get("valid"):
if result.get("best_confidence") is not None:
valid_confidences.append(result["best_confidence"])
if result.get("min_distance") is not None:
valid_min_dists.append(result["min_distance"])
LOGGER.info(
"视频 %s -> confidence=%.3f, min_dist=%.3f, offsets=%s",
vid.name,
result.get("best_confidence", 0.0),
result.get("min_distance", 0.0),
result.get("offsets"),
)
mean_conf = sum(valid_confidences) / len(valid_confidences) if valid_confidences else 0.0
mean_min_dist = sum(valid_min_dists) / len(valid_min_dists) if valid_min_dists else 0.0
LOGGER.info(
"有效样本 %d 条,平均 confidence=%.3f,平均 min_dist=%.3f",
len(valid_confidences),
mean_conf,
mean_min_dist,
)
print(
f"\n合计 {len(records)} 条视频;有效样本 {len(valid_confidences)} 条,"
f"平均 lipsync confidence = {mean_conf:.3f},平均 min_dist = {mean_min_dist:.3f}"
)
if args.save_json:
payload = {
"mean_confidence": mean_conf,
"mean_min_distance": mean_min_dist,
"records": records,
}
with open(args.save_json, "w", encoding="utf-8") as f:
json.dump(payload, f, indent=2, ensure_ascii=False)
LOGGER.info("结果已保存至 %s", args.save_json)
if __name__ == "__main__":
main()