# app.py (VERSÃO FINAL COM LAZY IMPORT) import gradio as gr import os import uuid import shutil import subprocess import mimetypes from pathlib import Path # A importação do torch.hub foi REMOVIDA daqui # --- BLOCO DE CONFIGURAÇÃO E DOWNLOAD DE MODELO --- APP_DIR = "/app"; SEEDVR_DIR = os.path.join(APP_DIR, "SeedVR") MODEL_CACHE_DIR = "/tmp/models"; CKPTS_DIR = os.path.join(MODEL_CACHE_DIR, "ckpts") os.makedirs(CKPTS_DIR, exist_ok=True) print("Verificando e baixando modelos para /tmp/models/ckpts...") # A importação é feita apenas quando necessária. from torch.hub import download_url_to_file files_to_download = { "seedvr2_ema_3b.pth": "https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth", "ema_vae.pth": "https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth", "pos_emb.pt": "https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt", "neg_emb.pt": "https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt", } for filename, url in files_to_download.items(): destination_path = os.path.join(CKPTS_DIR, filename) if not os.path.exists(destination_path): print(f"Baixando {filename}..."); download_url_to_file(url, destination_path) else: print(f"{filename} já existe.") print("Verificação de modelos concluída.") # --- O resto do código permanece o mesmo --- # (Cole o resto do seu app.py funcional aqui) def run_inference(video_path, seed, res_h, res_w): if video_path is None: raise gr.Error("Por favor, faça o upload de um arquivo.") job_id = str(uuid.uuid4()); input_dir = os.path.join("/tmp", "temp_inputs", job_id); output_dir = os.path.join("/tmp", "temp_outputs", job_id) os.makedirs(input_dir, exist_ok=True); os.makedirs(output_dir, exist_ok=True) shutil.copy(video_path, input_dir) log_output = "" patched_script_path = os.path.join("/tmp", f"inference_patched_{job_id}.py") try: original_script_path = os.path.join(SEEDVR_DIR, "projects", "inference_seedvr2_3b.py") with open(original_script_path, 'r') as f: script_content = f.read() script_content = script_content.replace("'./ckpts/seedvr2_ema_3b.pth'", f"'{os.path.join(CKPTS_DIR, 'seedvr2_ema_3b.pth')}'") script_content = script_content.replace("runner.configure_vae_model()", f"runner.configure_vae_model(checkpoint_path='{os.path.join(CKPTS_DIR, 'ema_vae.pth')}')") script_content = script_content.replace("'pos_emb.pt'", f"'{os.path.join(CKPTS_DIR, 'pos_emb.pt')}'") script_content = script_content.replace("'neg_emb.pt'", f"'{os.path.join(CKPTS_DIR, 'neg_emb.pt')}'") with open(patched_script_path, 'w') as f: f.write(script_content) input_folder_relative = os.path.relpath(input_dir, SEEDVR_DIR); output_folder_relative = os.path.relpath(output_dir, SEEDVR_DIR) patched_script_relative_path = os.path.relpath(patched_script_path, SEEDVR_DIR) command = ["torchrun", "--nproc-per-node=4", patched_script_relative_path, "--video_path", input_folder_relative, "--output_dir", output_folder_relative, "--seed", str(seed), "--res_h", str(res_h), "--res_w", str(res_w)] env = os.environ.copy(); env["PYTHONUNBUFFERED"] = "1" log_output += f"Executando comando: {' '.join(command)}\n\n" yield None, None, log_output process = subprocess.Popen(command, cwd=SEEDVR_DIR, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, encoding='utf-8', env=env) while True: output = process.stdout.readline() if output == '' and process.poll() is not None: break if output: log_output += output; yield None, None, log_output if process.poll() != 0: raise gr.Error(f"A inferência falhou.") output_files = [f for f in os.listdir(output_dir) if f.endswith(('.mp4', '.png', '.jpg', '.jpeg'))] if not output_files: raise gr.Error("Nenhum arquivo de saída foi encontrado.") result_path = os.path.join(output_dir, output_files[0]) media_type, _ = mimetypes.guess_type(result_path) if media_type and media_type.startswith("image"): yield result_path, None, log_output else: yield None, result_path, log_output finally: shutil.rmtree(input_dir, ignore_errors=True) if os.path.exists(patched_script_path): os.remove(patched_script_path) with gr.Blocks(css="footer {display: none !important}") as demo: gr.Markdown("# 🚀 Interface de Inferência para SeedVR2") gr.Markdown("Faça o upload de um vídeo ou imagem, ajuste os parâmetros e clique em 'Executar'.") with gr.Row(): with gr.Column(scale=1): input_media = gr.Video(label="Upload de Vídeo ou Imagem") seed = gr.Number(value=666, label="Seed") with gr.Accordion("Configurações Avançadas", open=False): res_h = gr.Number(value=720, label="Altura da Saída (res_h)") res_w = gr.Number(value=1280, label="Largura da Saída (res_w)") run_button = gr.Button("Executar", variant="primary") with gr.Column(scale=2): output_image = gr.Image(label="Saída de Imagem") output_video = gr.Video(label="Saída de Vídeo") log_box = gr.Textbox(label="Logs em Tempo Real", lines=15, autoscroll=True, interactive=False) run_button.click(fn=run_inference, inputs=[input_media, seed, res_h, res_w], outputs=[output_image, output_video, log_box]) demo.queue(max_size=10).launch()