| |
|
|
| import os |
| import sys |
| import time |
| import subprocess |
| import queue |
| import multiprocessing as mp |
| from pathlib import Path |
| from typing import Optional, Callable |
|
|
| from huggingface_hub import hf_hub_download |
|
|
| |
| |
| |
|
|
| |
| if mp.get_start_method(allow_none=True) != 'spawn': |
| mp.set_start_method('spawn', force=True) |
|
|
| |
| os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync") |
|
|
| |
| SEEDVR_REPO_PATH = Path(os.getenv("SEEDVR_ROOT", "/data/SeedVR")) |
| if str(SEEDVR_REPO_PATH) not in sys.path: |
| sys.path.insert(0, str(SEEDVR_REPO_PATH)) |
|
|
| |
| import torch |
| import cv2 |
| import numpy as np |
| from datetime import datetime |
|
|
| |
| |
| |
|
|
| def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None): |
| """Extrai quadros de um vídeo e os converte para o formato de tensor.""" |
| if debug: print(f"🎬 Extraindo frames de: {video_path}") |
| if not os.path.exists(video_path): raise FileNotFoundError(f"Arquivo de vídeo não encontrado: {video_path}") |
|
|
| cap = cv2.VideoCapture(video_path) |
| if not cap.isOpened(): raise ValueError(f"Não foi possível abrir o arquivo de vídeo: {video_path}") |
|
|
| fps = cap.get(cv2.CAP_PROP_FPS) |
| frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| if debug: print(f"📊 Info do vídeo: {frame_count} frames, {fps:.2f} FPS") |
|
|
| frames = [] |
| frames_loaded = 0 |
| for i in range(frame_count): |
| ret, frame = cap.read() |
| if not ret: break |
| if i < skip_first_frames: continue |
| if load_cap and frames_loaded >= load_cap: break |
| |
| frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
| frames.append(frame.astype(np.float32) / 255.0) |
| frames_loaded += 1 |
| cap.release() |
|
|
| if not frames: raise ValueError(f"Nenhum frame foi extraído do vídeo: {video_path}") |
| if debug: print(f"✅ {len(frames)} frames extraídos com sucesso.") |
| return torch.from_numpy(np.stack(frames)).to(torch.float16), fps |
|
|
| def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False): |
| """Salva um tensor de quadros em um arquivo de vídeo.""" |
| if debug: print(f"🎬 Salvando {frames_tensor.shape[0]} frames em: {output_path}") |
| os.makedirs(os.path.dirname(output_path), exist_ok=True) |
| |
| frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8) |
| T, H, W, _ = frames_np.shape |
| |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
| out = cv2.VideoWriter(output_path, fourcc, fps, (W, H)) |
| if not out.isOpened(): raise ValueError(f"Não foi possível criar o arquivo de vídeo: {output_path}") |
| |
| for frame in frames_np: |
| out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) |
| out.release() |
| if debug: print(f"✅ Vídeo salvo com sucesso: {output_path}") |
|
|
| def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None): |
| """Processo filho (worker) que executa o upscaling em uma GPU dedicada.""" |
| os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id) |
| os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync") |
| |
| import torch |
| from src.core.model_manager import configure_runner |
| from src.core.generation import generation_loop |
| |
| try: |
| frames_tensor = torch.from_numpy(frames_np).to(torch.float16) |
| |
| callback = (lambda b, t, _, m: progress_queue.put((proc_idx, b, t, m))) if progress_queue else None |
|
|
| runner = configure_runner(shared_args["model"], shared_args["model_dir"], shared_args["preserve_vram"], shared_args["debug"]) |
| result_tensor = generation_loop( |
| runner=runner, images=frames_tensor, cfg_scale=1.0, seed=shared_args["seed"], |
| res_w=shared_args["resolution"], batch_size=shared_args["batch_size"], |
| preserve_vram=shared_args["preserve_vram"], temporal_overlap=0, |
| debug=shared_args["debug"], progress_callback=callback |
| ) |
| return_queue.put((proc_idx, result_tensor.cpu().numpy())) |
| except Exception as e: |
| import traceback |
| error_msg = f"ERRO no worker {proc_idx}: {e}\n{traceback.format_exc()}" |
| print(error_msg) |
| if progress_queue: progress_queue.put((proc_idx, -1, -1, error_msg)) |
| return_queue.put((proc_idx, error_msg)) |
|
|
| |
| |
| |
|
|
| class SeedVRServer: |
| def __init__(self, **kwargs): |
| """Inicializa o servidor, define os caminhos e prepara o ambiente.""" |
| print("⚙️ SeedVRServer inicializando...") |
| self.SEEDVR_ROOT = SEEDVR_REPO_PATH |
| self.CKPTS_ROOT = Path("/data/seedvr_models_fp16") |
| self.OUTPUT_ROOT = Path(os.getenv("OUTPUT_ROOT", "/app/outputs")) |
| self.INPUT_ROOT = Path(os.getenv("INPUT_ROOT", "/app/inputs")) |
| self.HF_HOME_CACHE = Path(os.getenv("HF_HOME", "/data/.cache/huggingface")) |
| self.REPO_URL = os.getenv("SEEDVR_GIT_URL", "https://github.com/numz/ComfyUI-SeedVR2_VideoUpscaler") |
| self.NUM_GPUS_TOTAL = torch.cuda.device_count() |
|
|
| for p in [self.CKPTS_ROOT, self.OUTPUT_ROOT, self.INPUT_ROOT, self.HF_HOME_CACHE]: |
| p.mkdir(parents=True, exist_ok=True) |
|
|
| self.setup_dependencies() |
| print("📦 SeedVRServer pronto.") |
|
|
| def setup_dependencies(self): |
| """Garante que o repositório e os modelos estão presentes.""" |
| |
| if not (self.SEEDVR_ROOT / ".git").exists(): |
| print(f"[SeedVRServer] Clonando repositório para {self.SEEDVR_ROOT}...") |
| subprocess.run(["git", "clone", "--depth", "1", self.REPO_URL, str(self.SEEDVR_ROOT)], check=True) |
| else: |
| print("[SeedVRServer] Repositório SeedVR já existe.") |
| |
| |
| print(f"[SeedVRServer] Verificando checkpoints em {self.CKPTS_ROOT}...") |
| model_files = { |
| "seedvr2_ema_3b_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses", |
| "ema_vae_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses" |
| } |
| for filename, repo_id in model_files.items(): |
| if not (self.CKPTS_ROOT / filename).exists(): |
| print(f"Baixando {filename}...") |
| from huggingface_hub import hf_hub_download |
| hf_hub_download( |
| repo_id=repo_id, filename=filename, local_dir=str(self.CKPTS_ROOT), |
| cache_dir=str(self.HF_HOME_CACHE), token=os.getenv("HF_TOKEN") |
| ) |
| print("[SeedVRServer] Checkpoints estão no local correto.") |
|
|
| def run_inference( |
| self, |
| file_path: str, *, |
| seed: int, |
| resolution: int, |
| batch_size: int, |
| model: str = "seedvr2_ema_3b_fp16.safetensors", |
| fps: Optional[float] = None, |
| debug: bool = False, |
| preserve_vram: bool = True, |
| progress: Optional[Callable] = None |
| ) -> str: |
| """ |
| Executa o pipeline completo de upscaling de vídeo e retorna o caminho do arquivo de saída. |
| """ |
| if progress: progress(0.01, "⌛ Inicializando...") |
| |
| |
| if progress: progress(0.05, "🎬 Extraindo frames do vídeo...") |
| frames_tensor, original_fps = extract_frames_from_video(file_path, debug) |
|
|
| |
| device_list = list(range(self.NUM_GPUS_TOTAL)) |
| num_devices = len(device_list) |
| chunks = torch.chunk(frames_tensor, num_devices, dim=0) |
| |
| manager = mp.Manager() |
| return_queue = manager.Queue() |
| progress_queue = manager.Queue() if progress else None |
| |
| shared_args = { |
| "model": model, "model_dir": str(self.CKPTS_ROOT), "preserve_vram": preserve_vram, |
| "debug": debug, "seed": seed, "resolution": resolution, "batch_size": batch_size |
| } |
|
|
| |
| if progress: progress(0.1, f"🚀 Iniciando geração em {num_devices} GPUs...") |
| workers = [] |
| for idx, device_id in enumerate(device_list): |
| p = mp.Process(target=_worker_process, args=(idx, device_id, chunks[idx].cpu().numpy(), shared_args, return_queue, progress_queue)) |
| p.start() |
| workers.append(p) |
| |
| |
| results_np = [None] * num_devices |
| finished_workers = 0 |
| worker_progress = [0.0] * num_devices |
| while finished_workers < num_devices: |
| |
| if progress_queue: |
| while not progress_queue.empty(): |
| try: |
| p_idx, b_idx, b_total, msg = progress_queue.get_nowait() |
| if b_idx == -1: raise RuntimeError(f"Erro no Worker {p_idx}: {msg}") |
| if b_total > 0: worker_progress[p_idx] = b_idx / b_total |
| total_progress = sum(worker_progress) / num_devices |
| progress(0.1 + total_progress * 0.85, desc=f"GPU {p_idx+1}/{num_devices}: {msg}") |
| except queue.Empty: pass |
| |
| |
| try: |
| proc_idx, result = return_queue.get(timeout=0.2) |
| if isinstance(result, str): raise RuntimeError(f"Worker {proc_idx} falhou: {result}") |
| results_np[proc_idx] = result |
| worker_progress[proc_idx] = 1.0 |
| finished_workers += 1 |
| except queue.Empty: pass |
|
|
| for p in workers: p.join() |
|
|
| if any(r is None for r in results_np): |
| raise RuntimeError("Um ou mais workers falharam ao retornar um resultado.") |
|
|
| |
| result_tensor = torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16) |
|
|
| if progress: progress(0.95, "💾 Salvando o vídeo final...") |
| |
| out_dir = self.OUTPUT_ROOT / f"run_{int(time.time())}_{Path(file_path).stem}" |
| out_dir.mkdir(parents=True, exist_ok=True) |
| output_filepath = out_dir / f"result_{Path(file_path).stem}.mp4" |
|
|
| final_fps = fps if fps and fps > 0 else original_fps |
| save_frames_to_video(result_tensor, str(output_filepath), final_fps, debug) |
| |
| print(f"✅ Vídeo salvo com sucesso em: {output_filepath}") |
| return str(output_filepath) |
|
|
| |
| |
| |
|
|
| if __name__ == "__main__": |
| |
| print("🚀 Executando o servidor SeedVR em modo autônomo...") |
| try: |
| server = SeedVRServer() |
| print("✅ Servidor inicializado com sucesso. Pronto para receber chamadas.") |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| except Exception as e: |
| print(f"❌ Falha ao inicializar o servidor: {e}") |
| import traceback |
| traceback.print_exc() |
| sys.exit(1) |