Update inference_cli.py
Browse files- inference_cli.py +196 -22
inference_cli.py
CHANGED
|
@@ -9,15 +9,27 @@ import os
|
|
| 9 |
import argparse
|
| 10 |
import time
|
| 11 |
import multiprocessing as mp
|
| 12 |
-
import queue #
|
| 13 |
|
|
|
|
| 14 |
if mp.get_start_method(allow_none=True) != 'spawn':
|
| 15 |
mp.set_start_method('spawn', force=True)
|
| 16 |
|
|
|
|
|
|
|
| 17 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 18 |
|
| 19 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
|
|
|
|
|
|
| 21 |
import torch
|
| 22 |
import cv2
|
| 23 |
import numpy as np
|
|
@@ -25,16 +37,118 @@ from datetime import datetime
|
|
| 25 |
from pathlib import Path
|
| 26 |
from src.utils.downloads import download_weight
|
| 27 |
|
|
|
|
| 28 |
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 29 |
if script_dir not in sys.path:
|
| 30 |
sys.path.insert(0, script_dir)
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
# --- LÓGICA DO WORKER (com a fila de progresso) ---
|
| 36 |
def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None):
|
| 37 |
-
"""
|
|
|
|
|
|
|
| 38 |
os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
|
| 39 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 40 |
|
|
@@ -65,10 +179,10 @@ def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, p
|
|
| 65 |
import traceback
|
| 66 |
error_msg = f"ERROR in worker {proc_idx}: {e}\n{traceback.format_exc()}"
|
| 67 |
print(error_msg)
|
| 68 |
-
if progress_queue:
|
|
|
|
| 69 |
return_queue.put((proc_idx, error_msg))
|
| 70 |
|
| 71 |
-
# --- PROCESSAMENTO PRINCIPAL (COM MONITORAMENTO ROBUSTO) ---
|
| 72 |
def _gpu_processing(frames_tensor, device_list, args, progress_callback=None):
|
| 73 |
"""
|
| 74 |
Divide os quadros, gerencia os workers e monitora o progresso de forma robusta.
|
|
@@ -102,8 +216,11 @@ def _gpu_processing(frames_tensor, device_list, args, progress_callback=None):
|
|
| 102 |
while not progress_queue.empty():
|
| 103 |
try:
|
| 104 |
proc_idx, batch_idx, total_batches, message = progress_queue.get_nowait()
|
| 105 |
-
if batch_idx == -1:
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
total_progress = sum(worker_progress) / num_devices
|
| 109 |
progress_callback(total_progress, desc=f"GPU {proc_idx+1}/{num_devices}: {message}")
|
|
@@ -126,28 +243,85 @@ def _gpu_processing(frames_tensor, device_list, args, progress_callback=None):
|
|
| 126 |
|
| 127 |
for p in workers: p.join()
|
| 128 |
|
|
|
|
| 129 |
if any(r is None for r in results_np):
|
| 130 |
raise RuntimeError("One or more workers failed to return a result.")
|
| 131 |
|
| 132 |
return torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
|
| 133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
-
# --- FUNÇÃO DE LÓGICA E MAIN (sem alterações) ---
|
| 136 |
def run_inference_logic(args, progress_callback=None):
|
| 137 |
-
|
| 138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
result_tensor = _gpu_processing(frames_tensor, device_list, args, progress_callback)
|
| 140 |
-
# ...
|
| 141 |
-
return result_tensor, original_fps, generation_time, len(frames_tensor)
|
| 142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
def main():
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
if __name__ == "__main__":
|
| 149 |
-
main()
|
| 150 |
-
|
| 151 |
-
# Adicione aqui o código completo das funções omitidas para garantir que nada se perca
|
| 152 |
-
# (extract_frames_from_video, save_frames_to_video, parse_arguments, run_inference_logic, main)
|
| 153 |
-
# Omiti por brevidade, mas você deve tê-los no seu arquivo.
|
|
|
|
| 9 |
import argparse
|
| 10 |
import time
|
| 11 |
import multiprocessing as mp
|
| 12 |
+
import queue # Importa a classe de exceção para filas vazias
|
| 13 |
|
| 14 |
+
# Garante o uso seguro de CUDA com multiprocessing, essencial para estabilidade.
|
| 15 |
if mp.get_start_method(allow_none=True) != 'spawn':
|
| 16 |
mp.set_start_method('spawn', force=True)
|
| 17 |
|
| 18 |
+
# -------------------------------------------------------------
|
| 19 |
+
# 1) Configuração de alocação de memória da VRAM
|
| 20 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 21 |
|
| 22 |
+
# 2) Pré-análise dos argumentos para configurar a visibilidade dos dispositivos CUDA
|
| 23 |
+
_pre_parser = argparse.ArgumentParser(add_help=False)
|
| 24 |
+
_pre_parser.add_argument("--cuda_device", type=str, default=None)
|
| 25 |
+
_pre_args, _ = _pre_parser.parse_known_args()
|
| 26 |
+
if _pre_args.cuda_device is not None:
|
| 27 |
+
device_list_env = [x.strip() for x in _pre_args.cuda_device.split(',') if x.strip()!='']
|
| 28 |
+
if len(device_list_env) == 1:
|
| 29 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = device_list_env[0]
|
| 30 |
|
| 31 |
+
# -------------------------------------------------------------
|
| 32 |
+
# 3) Importações pesadas (torch, etc.) são feitas após a configuração do ambiente.
|
| 33 |
import torch
|
| 34 |
import cv2
|
| 35 |
import numpy as np
|
|
|
|
| 37 |
from pathlib import Path
|
| 38 |
from src.utils.downloads import download_weight
|
| 39 |
|
| 40 |
+
# Adiciona o diretório raiz do projeto ao path do sistema para permitir importações de `src`
|
| 41 |
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 42 |
if script_dir not in sys.path:
|
| 43 |
sys.path.insert(0, script_dir)
|
| 44 |
+
root_dir = os.path.join(script_dir, '..', '..')
|
| 45 |
+
if root_dir not in sys.path:
|
| 46 |
+
sys.path.insert(0, root_dir)
|
| 47 |
|
| 48 |
+
def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None):
|
| 49 |
+
"""
|
| 50 |
+
Extrai quadros de um vídeo e os converte para o formato de tensor.
|
| 51 |
+
"""
|
| 52 |
+
if debug:
|
| 53 |
+
print(f"🎬 Extracting frames from video: {video_path}")
|
| 54 |
+
|
| 55 |
+
if not os.path.exists(video_path):
|
| 56 |
+
raise FileNotFoundError(f"Video file not found: {video_path}")
|
| 57 |
+
|
| 58 |
+
cap = cv2.VideoCapture(video_path)
|
| 59 |
+
if not cap.isOpened():
|
| 60 |
+
raise ValueError(f"Cannot open video file: {video_path}")
|
| 61 |
+
|
| 62 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 63 |
+
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 64 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 65 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 66 |
+
|
| 67 |
+
if debug:
|
| 68 |
+
print(f"📊 Video info: {frame_count} frames, {width}x{height}, {fps:.2f} FPS")
|
| 69 |
+
if skip_first_frames:
|
| 70 |
+
print(f"⏭️ Will skip first {skip_first_frames} frames")
|
| 71 |
+
if load_cap:
|
| 72 |
+
print(f"🔢 Will load maximum {load_cap} frames")
|
| 73 |
+
|
| 74 |
+
frames = []
|
| 75 |
+
frame_idx = 0
|
| 76 |
+
frames_loaded = 0
|
| 77 |
+
|
| 78 |
+
while True:
|
| 79 |
+
ret, frame = cap.read()
|
| 80 |
+
if not ret:
|
| 81 |
+
break
|
| 82 |
+
|
| 83 |
+
if frame_idx < skip_first_frames:
|
| 84 |
+
frame_idx += 1
|
| 85 |
+
continue
|
| 86 |
+
|
| 87 |
+
if load_cap is not None and load_cap > 0 and frames_loaded >= load_cap:
|
| 88 |
+
break
|
| 89 |
+
|
| 90 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 91 |
+
frame = frame.astype(np.float32) / 255.0
|
| 92 |
+
|
| 93 |
+
frames.append(frame)
|
| 94 |
+
frame_idx += 1
|
| 95 |
+
frames_loaded += 1
|
| 96 |
+
|
| 97 |
+
if debug and frames_loaded % 100 == 0:
|
| 98 |
+
total_to_load = min(frame_count, load_cap) if load_cap else frame_count
|
| 99 |
+
print(f"📹 Extracted {frames_loaded}/{total_to_load} frames")
|
| 100 |
+
|
| 101 |
+
cap.release()
|
| 102 |
+
|
| 103 |
+
if len(frames) == 0:
|
| 104 |
+
raise ValueError(f"No frames extracted from video: {video_path}")
|
| 105 |
+
|
| 106 |
+
if debug:
|
| 107 |
+
print(f"✅ Extracted {len(frames)} frames")
|
| 108 |
+
|
| 109 |
+
frames_tensor = torch.from_numpy(np.stack(frames)).to(torch.float16)
|
| 110 |
+
|
| 111 |
+
if debug:
|
| 112 |
+
print(f"📊 Frames tensor shape: {frames_tensor.shape}, dtype: {frames_tensor.dtype}")
|
| 113 |
+
|
| 114 |
+
return frames_tensor, fps
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
|
| 118 |
+
"""
|
| 119 |
+
Salva um tensor de quadros em um arquivo de vídeo.
|
| 120 |
+
"""
|
| 121 |
+
if debug:
|
| 122 |
+
print(f"🎬 Saving {frames_tensor.shape[0]} frames to video: {output_path}")
|
| 123 |
+
|
| 124 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 125 |
+
|
| 126 |
+
frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
|
| 127 |
+
|
| 128 |
+
T, H, W, C = frames_np.shape
|
| 129 |
+
|
| 130 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 131 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
|
| 132 |
+
|
| 133 |
+
if not out.isOpened():
|
| 134 |
+
raise ValueError(f"Cannot create video writer for: {output_path}")
|
| 135 |
+
|
| 136 |
+
for i, frame in enumerate(frames_np):
|
| 137 |
+
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
| 138 |
+
out.write(frame_bgr)
|
| 139 |
+
|
| 140 |
+
if debug and (i + 1) % 100 == 0:
|
| 141 |
+
print(f"💾 Saved {i + 1}/{T} frames")
|
| 142 |
+
|
| 143 |
+
out.release()
|
| 144 |
+
|
| 145 |
+
if debug:
|
| 146 |
+
print(f"✅ Video saved successfully: {output_path}")
|
| 147 |
|
|
|
|
| 148 |
def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None):
|
| 149 |
+
"""
|
| 150 |
+
Processo filho (worker) que executa o upscaling em uma GPU dedicada.
|
| 151 |
+
"""
|
| 152 |
os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
|
| 153 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 154 |
|
|
|
|
| 179 |
import traceback
|
| 180 |
error_msg = f"ERROR in worker {proc_idx}: {e}\n{traceback.format_exc()}"
|
| 181 |
print(error_msg)
|
| 182 |
+
if progress_queue:
|
| 183 |
+
progress_queue.put((proc_idx, -1, -1, error_msg))
|
| 184 |
return_queue.put((proc_idx, error_msg))
|
| 185 |
|
|
|
|
| 186 |
def _gpu_processing(frames_tensor, device_list, args, progress_callback=None):
|
| 187 |
"""
|
| 188 |
Divide os quadros, gerencia os workers e monitora o progresso de forma robusta.
|
|
|
|
| 216 |
while not progress_queue.empty():
|
| 217 |
try:
|
| 218 |
proc_idx, batch_idx, total_batches, message = progress_queue.get_nowait()
|
| 219 |
+
if batch_idx == -1: # Mensagem de erro do worker
|
| 220 |
+
raise RuntimeError(f"Worker {proc_idx} error: {message}")
|
| 221 |
+
|
| 222 |
+
if total_batches > 0:
|
| 223 |
+
worker_progress[proc_idx] = batch_idx / total_batches
|
| 224 |
|
| 225 |
total_progress = sum(worker_progress) / num_devices
|
| 226 |
progress_callback(total_progress, desc=f"GPU {proc_idx+1}/{num_devices}: {message}")
|
|
|
|
| 243 |
|
| 244 |
for p in workers: p.join()
|
| 245 |
|
| 246 |
+
# Verifica se algum resultado está faltando, indicando um erro não capturado
|
| 247 |
if any(r is None for r in results_np):
|
| 248 |
raise RuntimeError("One or more workers failed to return a result.")
|
| 249 |
|
| 250 |
return torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
|
| 251 |
|
| 252 |
+
def parse_arguments():
|
| 253 |
+
"""Analisa os argumentos da linha de comando."""
|
| 254 |
+
parser = argparse.ArgumentParser(description="SeedVR2 Video Upscaler CLI")
|
| 255 |
+
parser.add_argument("--video_path", type=str, required=True, help="Path to input video file")
|
| 256 |
+
parser.add_argument("--seed", type=int, default=100, help="Random seed for generation (default: 100)")
|
| 257 |
+
parser.add_argument("--resolution", type=int, default=1072, help="Target resolution of the short side (default: 1072)")
|
| 258 |
+
parser.add_argument("--batch_size", type=int, default=5, help="Number of frames per batch (default: 5)")
|
| 259 |
+
parser.add_argument("--model", type=str, default="seedvr2_ema_3b_fp16.safetensors",
|
| 260 |
+
choices=["seedvr2_ema_3b_fp16.safetensors", "seedvr2_ema_3b_fp8_e4m3fn.safetensors",
|
| 261 |
+
"seedvr2_ema_7b_fp16.safetensors", "seedvr2_ema_7b_fp8_e4m3fn.safetensors"],
|
| 262 |
+
help="Model to use")
|
| 263 |
+
parser.add_argument("--model_dir", type=str, default=None, help="Directory containing the model files")
|
| 264 |
+
parser.add_argument("--skip_first_frames", type=int, default=0, help="Skip the first frames during processing")
|
| 265 |
+
parser.add_argument("--load_cap", type=int, default=0, help="Maximum number of frames to load from video (default: load all)")
|
| 266 |
+
parser.add_argument("--output", type=str, default=None, help="Output path")
|
| 267 |
+
parser.add_argument("--output_format", type=str, default="video", choices=["video", "png"], help="Output format: 'video' (mp4) or 'png' images")
|
| 268 |
+
parser.add_argument("--preserve_vram", action="store_true", help="Enable VRAM preservation mode")
|
| 269 |
+
parser.add_argument("--debug", action="store_true", help="Enable debug logging")
|
| 270 |
+
parser.add_argument("--cuda_device", type=str, default=None, help="CUDA device id(s). e.g., '0' or '0,1' for multi-GPU")
|
| 271 |
+
|
| 272 |
+
return parser.parse_args()
|
| 273 |
|
|
|
|
| 274 |
def run_inference_logic(args, progress_callback=None):
|
| 275 |
+
"""
|
| 276 |
+
Função principal que executa o pipeline de upscaling. Pode ser importada e chamada por outros scripts.
|
| 277 |
+
"""
|
| 278 |
+
if args.debug:
|
| 279 |
+
print(f"📋 Argumentos da Lógica de Inferência: {vars(args)}")
|
| 280 |
+
|
| 281 |
+
if progress_callback: progress_callback(0.05, "Extracting frames...")
|
| 282 |
+
print("🎬 Extraindo frames do vídeo...")
|
| 283 |
+
start_time = time.time()
|
| 284 |
+
frames_tensor, original_fps = extract_frames_from_video(
|
| 285 |
+
args.video_path, args.debug, args.skip_first_frames, args.load_cap
|
| 286 |
+
)
|
| 287 |
+
if args.debug:
|
| 288 |
+
print(f"🔄 Tempo de extração de frames: {time.time() - start_time:.2f}s")
|
| 289 |
+
|
| 290 |
+
device_list = [d.strip() for d in str(args.cuda_device).split(',') if d.strip()] if args.cuda_device else ["0"]
|
| 291 |
+
if args.debug:
|
| 292 |
+
print(f"🚀 Usando dispositivos: {device_list}")
|
| 293 |
+
|
| 294 |
+
if progress_callback: progress_callback(0.1, "Starting generation...")
|
| 295 |
+
processing_start = time.time()
|
| 296 |
+
download_weight(args.model, args.model_dir)
|
| 297 |
+
|
| 298 |
result_tensor = _gpu_processing(frames_tensor, device_list, args, progress_callback)
|
|
|
|
|
|
|
| 299 |
|
| 300 |
+
generation_time = time.time() - processing_start
|
| 301 |
+
if args.debug:
|
| 302 |
+
print(f"🔄 Tempo de Geração: {generation_time:.2f}s")
|
| 303 |
+
print(f"📊 Resultado: {result_tensor.shape}, dtype: {result_tensor.dtype}")
|
| 304 |
+
|
| 305 |
+
return result_tensor, original_fps, generation_time, len(frames_tensor)
|
| 306 |
|
| 307 |
def main():
|
| 308 |
+
"""
|
| 309 |
+
Função principal para execução via linha de comando (CLI).
|
| 310 |
+
"""
|
| 311 |
+
print(f"🚀 SeedVR2 Video Upscaler CLI iniciado às {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
| 312 |
+
args = parse_arguments()
|
| 313 |
+
try:
|
| 314 |
+
result_tensor, original_fps, _, _ = run_inference_logic(args)
|
| 315 |
+
|
| 316 |
+
print(f"💾 Salvando vídeo em: {args.output}")
|
| 317 |
+
save_frames_to_video(result_tensor, args.output, original_fps, args.debug)
|
| 318 |
+
print("✅ Upscaling via CLI concluído com sucesso!")
|
| 319 |
+
|
| 320 |
+
except Exception as e:
|
| 321 |
+
print(f"❌ Erro durante o processamento via CLI: {e}")
|
| 322 |
+
import traceback
|
| 323 |
+
traceback.print_exc()
|
| 324 |
+
sys.exit(1)
|
| 325 |
|
| 326 |
if __name__ == "__main__":
|
| 327 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|