temp_dataset / dataprocess_code /FaceParse /ray_face_parse_hallo3_pipeline.py
xingzhaohu's picture
Initial upload
4b3a024 verified
"""Ray-based face parsing pipeline for local Hallo3 videos.
This script scans a local directory of raw videos and runs SegFormer-based
face parsing on each file in parallel across multiple GPUs using Ray.
For each input video, it produces a grayscale label video where each pixel
stores the class index (uint8) for that pixel. The labels are saved using
a lossless FFV1 codec (e.g., MKV container) so that labels can be read
back exactly as uint8 arrays.
The saving format is compatible with the ``save_labels_to_video`` /
``read_labels_from_video`` helpers in ``face_parse_example.py``.
Example:
```
python ray_face_parse_hallo3_pipeline.py \
--input-dir /mnt/nfs/datasets/hallo3_data/videos \
--output-dir /mnt/nfs/datasets/hallo3_data/face_parse_labels \
--num-gpu-workers 4 \
--stride 1
```
"""
from __future__ import annotations
import argparse
import os
from typing import Dict, List, Optional, Sequence
import ray
import torch
from torch import nn
from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation
import cv2
import ffmpeg
import numpy as np
from PIL import Image
def _ensure_dir(path: str) -> str:
os.makedirs(path, exist_ok=True)
return path
def _list_video_files(
input_dir: str,
exts: Sequence[str] = (".mp4", ".mkv", ".webm", ".avi", ".mov"),
) -> List[str]:
input_dir = os.path.abspath(input_dir)
if not os.path.isdir(input_dir):
raise ValueError(f"input_dir does not exist or is not a directory: {input_dir}")
exts = tuple(ext.lower() for ext in exts)
video_paths: List[str] = []
for root, _, files in os.walk(input_dir):
for name in files:
if name.lower().endswith(exts):
video_paths.append(os.path.join(root, name))
video_paths.sort()
if not video_paths:
raise ValueError(f"No video files found under {input_dir}")
return video_paths
def _build_output_path(file_name: str, output_dir: str) -> str:
"""Build output path as '<video_filename>.mkv' under output_dir."""
base_name = os.path.basename(file_name)
out_name = f"{base_name}.mkv"
return os.path.join(output_dir, out_name)
def read_labels_from_video(video_path: str) -> Optional[np.ndarray]:
"""Read grayscale video back as numpy array."""
try:
probe = ffmpeg.probe(video_path)
video_info = next(s for s in probe["streams"] if s["codec_type"] == "video")
width = int(video_info["width"])
height = int(video_info["height"])
out, _ = (
ffmpeg.input(video_path)
.output("pipe:", format="rawvideo", pix_fmt="gray")
.run(capture_stdout=True, capture_stderr=True)
)
decoded = np.frombuffer(out, np.uint8).reshape((-1, height, width))
return decoded
except Exception as e:
print(f"Error reading video {video_path}: {e}")
return None
def save_labels_to_video(labels: np.ndarray, output_path: str, fps: int = 30) -> bool:
"""Save numpy array (frames, height, width) as grayscale lossless video."""
try:
if labels.ndim != 3:
raise ValueError("Input array must be 3D (frames, height, width)")
frames, height, width = labels.shape
if labels.dtype != np.uint8:
labels = labels.astype(np.uint8)
process = (
ffmpeg.input(
"pipe:",
format="rawvideo",
pix_fmt="gray",
s=f"{width}x{height}",
r=int(fps),
)
.output(
output_path,
pix_fmt="gray",
vcodec="ffv1",
level=3,
)
.overwrite_output()
.run_async(pipe_stdin=True)
)
process.stdin.write(labels.tobytes())
process.stdin.close()
process.wait()
return True
except Exception as e:
print(f"Error saving video {output_path}: {e}")
return False
def _device() -> torch.device:
if torch.cuda.is_available():
return torch.device("cuda")
if torch.backends.mps.is_available():
return torch.device("mps")
return torch.device("cpu")
def _parse_video_to_labels(
image_processor: SegformerImageProcessor,
model: SegformerForSemanticSegmentation,
video_path: str,
stride: int,
) -> np.ndarray:
"""Run face parsing on a video and return labels as (T, H, W) uint8."""
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise RuntimeError(f"Failed to open video: {video_path}")
labels_list: List[np.ndarray] = []
idx = 0
try:
with torch.no_grad():
while True:
ret, frame = cap.read()
if not ret:
break
if stride > 1 and (idx % stride) != 0:
idx += 1
continue
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame_rgb)
inputs = image_processor(images=image, return_tensors="pt")
inputs = {k: v.to(model.device) for k, v in inputs.items()}
outputs = model(**inputs)
logits = outputs.logits
upsampled_logits = nn.functional.interpolate(
logits,
size=image.size[::-1],
mode="bilinear",
align_corners=False,
)
labels = upsampled_logits.argmax(dim=1)[0]
labels_np = labels.cpu().numpy().astype(np.uint8)
labels_list.append(labels_np)
idx += 1
finally:
cap.release()
if not labels_list:
return np.zeros((0, 0, 0), dtype=np.uint8)
return np.stack(labels_list, axis=0)
@ray.remote(num_gpus=1)
class FaceParseWorker:
def __init__(
self,
output_dir: str,
stride: int,
skip_existing: bool,
) -> None:
dev = _device()
self.device = dev
self.image_processor = SegformerImageProcessor.from_pretrained(
"jonathandinu/face-parsing"
)
self.model = SegformerForSemanticSegmentation.from_pretrained(
"jonathandinu/face-parsing"
).to(dev)
self.output_dir = _ensure_dir(output_dir)
self.stride = stride
self.skip_existing = skip_existing
def parse(self, record: Dict) -> Dict:
index = int(record["index"])
video_path = record["path"]
file_name = record["file_name"]
out_path = _build_output_path(file_name, self.output_dir)
if self.skip_existing and os.path.exists(out_path):
return {
"index": index,
"file_name": file_name,
"result_path": out_path,
"frame_count": 0,
"skipped": True,
}
labels = _parse_video_to_labels(
self.image_processor,
self.model,
video_path,
stride=self.stride,
)
if labels.size == 0:
frame_count = 0
save_ok = False
else:
frame_count = int(labels.shape[0])
fps = 25 # fallback if we can't probe; video-specific fps is optional
save_ok = save_labels_to_video(labels, out_path, fps=fps)
return {
"index": index,
"file_name": file_name,
"result_path": out_path,
"frame_count": frame_count,
"skipped": False,
"saved": bool(save_ok),
}
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Ray-based face parsing for local Hallo3 videos"
)
parser.add_argument(
"--input-dir",
default="/mnt/nfs/datasets/hallo3_data/videos",
help="Directory containing raw Hallo3 video files",
)
parser.add_argument(
"--output-dir",
default="/mnt/nfs/datasets/hallo3_data/face_parse_labels",
help="Directory to store face parsing label videos",
)
parser.add_argument(
"--stride",
type=int,
default=1,
help="Frame stride for processing (>=1)",
)
parser.add_argument(
"--num-gpu-workers", type=int, default=4, help="Number of GPU workers"
)
parser.add_argument(
"--max-inflight",
type=int,
default=0,
help="Max concurrent parsing tasks (0 = 2x workers)",
)
parser.add_argument(
"--start",
type=int,
default=0,
help="Start index in the sorted video list",
)
parser.add_argument(
"--stop",
type=int,
default=None,
help="Stop index (exclusive) in the sorted video list; default processes to the end",
)
parser.add_argument(
"--limit",
type=int,
default=0,
help="Max number of videos to process (0 = no limit)",
)
parser.add_argument(
"--skip-existing",
action="store_true",
help="Skip parsing if the output label video already exists",
)
parser.add_argument("--ray-address", default=None, help="Existing Ray cluster address")
parser.add_argument(
"--shutdown-ray",
action="store_true",
help="Shutdown Ray at exit (useful for local runs)",
)
return parser.parse_args()
def run_pipeline(args: argparse.Namespace) -> None:
if not ray.is_initialized():
ray.init(address=args.ray_address, ignore_reinit_error=True)
_ensure_dir(args.output_dir)
video_paths = _list_video_files(args.input_dir)
dataset_size = len(video_paths)
start_idx = max(args.start, 0)
stop_idx = args.stop if args.stop is not None else dataset_size
stop_idx = min(stop_idx, dataset_size)
if start_idx >= stop_idx:
raise ValueError(
f"Invalid range: start={start_idx}, stop={stop_idx}, dataset_size={dataset_size}"
)
workers = [
FaceParseWorker.remote(
output_dir=args.output_dir,
stride=args.stride,
skip_existing=args.skip_existing,
)
for _ in range(args.num_gpu_workers)
]
if not workers:
raise ValueError("num_gpu_workers must be >= 1")
max_inflight = args.max_inflight or (len(workers) * 2)
pending: List[ray.ObjectRef] = []
total_submitted = 0
total_completed = 0
next_worker = 0
def _drain(num_returns: int) -> None:
nonlocal pending, total_completed
if not pending:
return
ready, rest = ray.wait(pending, num_returns=num_returns)
pending = rest
results: List[Dict] = ray.get(ready)
for res in results:
total_completed += 1
status = "skipped" if res.get("skipped") else "done"
saved = res.get("saved", False)
print(
f"[{total_completed}] idx={res['index']} file={res['file_name']} "
f"-> {res['result_path']} ({status}, frames={res['frame_count']}, saved={saved})"
)
try:
for idx in range(start_idx, stop_idx):
if args.limit and total_submitted >= args.limit:
break
video_path = video_paths[idx]
file_name = os.path.basename(video_path)
if args.skip_existing:
out_path = _build_output_path(file_name, args.output_dir)
if os.path.exists(out_path):
print(f"[skip existing] idx={idx} file={file_name} -> {out_path}")
continue
record = {
"index": idx,
"file_name": file_name,
"path": video_path,
}
worker = workers[next_worker]
next_worker = (next_worker + 1) % len(workers)
pending.append(worker.parse.remote(record))
total_submitted += 1
if len(pending) >= max_inflight:
_drain(num_returns=1)
_drain(num_returns=len(pending))
finally:
if args.shutdown_ray and ray.is_initialized():
ray.shutdown()
def main() -> None:
args = parse_args()
run_pipeline(args)
if __name__ == "__main__":
main()