Carlex22222 commited on
Commit
eecde3e
·
verified ·
1 Parent(s): d4386a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -1
app.py CHANGED
@@ -39,6 +39,86 @@ download_models()
39
 
40
  # --- 2. LÓGICA DE INFERÊNCIA ---
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def run_subprocess_with_logs(command, cwd):
43
  """Função genérica para rodar um subprocesso e streamar os logs para o Gradio."""
44
  log_output = f"Executando comando:\n{' '.join(command)}\n\n"
@@ -141,4 +221,29 @@ with gr.Blocks() as demo:
141
  vincie_edit_prompts = gr.Textbox(label="Prompts de Edição (separados por ';')", lines=5, placeholder="Ex: Add a crown to her head; Change the background...")
142
  vincie_edit_button = gr.Button("Executar Edição", variant="primary")
143
  with gr.Column(scale=2):
144
- vincie_edit_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # --- 2. LÓGICA DE INFERÊNCIA ---
41
 
42
+ def run_subprocess_with_logs(command, cwd):
43
+ """Função genérica para rodar um subprocesso e streamar os logs para o Gradio."""
44
+ log_output = f"Executando comando:\n{' '.join(command)}\n\n"
45
+ yield [], log_output
46
+
47
+ env = os.environ.copy(); env["PYTHONUNBUFFERED"] = "1"
48
+ process = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, encoding='utf-8', env=env)
49
+
50
+ while True:
51
+ output = process.stdout.readline()
52
+ if output == '' and process.poll() is not None: break
53
+ if output: log_output += output; yield [], log_output
54
+
55
+ if process.poll() != 0: raise gr.Error("A inferência falhou. Verifique os logs.")
56
+
57
+ def run_seedvr_inference(video_path, seed):
58
+ # (Lógica específica do SeedVR)
59
+ if video_path is None: raise gr.Error("Por favor, faça o upload de um arquivo para o SeedVR.")
60
+ job_id = str(uuid.uuid4())
61
+ input_dir = os.path.join("/app", "temp_inputs", job_id); os.makedirs(input_dir, exist_ok=True)
62
+ output_dir = os.path.join("/app", "temp_outputs", job_id); os.makedirs(output_dir, exist_ok=True)
63
+ shutil.copy(video_path, input_dir)
64
+
65
+ input_folder_relative = os.path.relpath(input_dir, SEEDVR_DIR)
66
+ output_folder_relative = os.path.relpath(output_dir, SEEDVR_DIR)
67
+ command = ["torchrun", "--nproc-per-node=4", "projects/inference_seedvr2_3b.py", "--video_path", input_folder_relative, "--output_dir", output_folder_relative, "--seed", str(seed), "--res_h", "720", "--res_w", "1280"]
68
+
69
+ for gallery, logs in run_subprocess_with_logs(command, SEEDVR_DIR):
70
+ yield None, logs # Retorna None para o output enquanto os logs são atualizados
71
+
72
+ output_files = [os.path.join(output_dir, f) for f in os.listdir(output_dir) if f.endswith(('.mp4', '.png'))]
73
+ if not output_files: raise gr.Error("Nenhum arquivo de saída encontrado.")
74
+ yield output_files[0], logs
75
+
76
+ def run_vincie_edit_inference(files, prompts_str):
77
+ # (Lógica específica para o modo de EDIÇÃO do V-INT)
78
+ if not files: raise gr.Error("Por favor, faça o upload de uma imagem para editar.")
79
+ if not prompts_str: raise gr.Error("Por favor, forneça os prompts de edição.")
80
+
81
+ # app.py (Interface final com 3 abas: SeedVR, V-INT Edit, V-INT Pipeline)
82
+
83
+ import gradio as gr
84
+ import os
85
+ import subprocess
86
+ import shutil
87
+ import uuid
88
+ from huggingface_hub import snapshot_download
89
+ import spaces
90
+
91
+ # --- 1. CONFIGURAÇÃO E DOWNLOAD DOS MODELOS ---
92
+
93
+ SEEDVR_DIR = "/app/SeedVR"
94
+ VINCIE_DIR = "/app/VINCIE"
95
+ HF_TOKEN = os.environ.get("HF_TOKEN")
96
+
97
+ @spaces.GPU
98
+ def download_models():
99
+ """Baixa os modelos para SeedVR e V-INT se eles não existirem."""
100
+ # Download do SeedVR
101
+ if not os.path.exists(os.path.join(SEEDVR_DIR, "ckpts", "seedvr2_ema_3b.pth")):
102
+ print("Baixando modelo do SeedVR-3B...")
103
+ snapshot_download(repo_id="ByteDance-Seed/SeedVR2-3B", local_dir=os.path.join(SEEDVR_DIR, "ckpts"), token=HF_TOKEN, local_dir_use_symlinks=False)
104
+ else: print("Modelo do SeedVR já existe.")
105
+
106
+ # Download do V-INT
107
+ if not os.path.exists(os.path.join(VINCIE_DIR, "ckpt", "VINCIE-3B")):
108
+ print("Baixando modelo do VINCIE-3B...")
109
+ snapshot_download(repo_id="ByteDance-Seed/VINCIE-3B", local_dir=os.path.join(VINCIE_DIR, "ckpt", "VINCIE-3B"), token=HF_TOKEN, local_dir_use_symlinks=False)
110
+ else: print("Modelo do VINCIE já existe.")
111
+
112
+ # Download de assets e configs do V-INT (para os exemplos)
113
+ if not os.path.exists(os.path.join(VINCIE_DIR, "assets", "woman_pineapple.png")):
114
+ print("Baixando assets e configs do V-INT...")
115
+ snapshot_download(repo_id="ByteDance-Seed/VINCIE", repo_type="space", local_dir=VINCIE_DIR, token=HF_TOKEN, allow_patterns=["assets/*", "configs/*"], local_dir_use_symlinks=False)
116
+ else: print("Assets e configs do V-INT já existem.")
117
+
118
+ download_models()
119
+
120
+ # --- 2. LÓGICA DE INFERÊNCIA ---
121
+
122
  def run_subprocess_with_logs(command, cwd):
123
  """Função genérica para rodar um subprocesso e streamar os logs para o Gradio."""
124
  log_output = f"Executando comando:\n{' '.join(command)}\n\n"
 
221
  vincie_edit_prompts = gr.Textbox(label="Prompts de Edição (separados por ';')", lines=5, placeholder="Ex: Add a crown to her head; Change the background...")
222
  vincie_edit_button = gr.Button("Executar Edição", variant="primary")
223
  with gr.Column(scale=2):
224
+ vincie_edit_output = gr.Gallery(label="Imagens Editadas")
225
+ vincie_edit_logs = gr.Textbox(label="Logs", lines=10, interactive=False)
226
+ gr.Examples(examples=[[[os.path.join(VINCIE_DIR, "assets/woman_pineapple.png")], "Lower the pineapple beside her face; Add a crown to the woman's head."]], inputs=[vincie_edit_input, vincie_edit_prompts])
227
+ vincie_edit_button.click(fn=run_vincie_edit_inference, inputs=[vincie_edit_input, vincie_edit_prompts], outputs=[vincie_edit_output, vincie_edit_logs])
228
+
229
+ # --- NOVA ABA V-INT PIPELINE ---
230
+ with gr.TabItem("V-INT (Pipeline/Composição)"):
231
+ with gr.Row():
232
+ with gr.Column(scale=1):
233
+ vincie_pipe_inputs = gr.Files(label="Upload de Múltiplas Imagens para Composição (<IMG0>, <IMG1>, etc.)", file_types=["image"])
234
+ vincie_pipe_prompt = gr.Textbox(label="Prompt de Composição Final", lines=5, placeholder="Ex: Based on <IMG0> and <IMG1>, a woman in <IMG0> is holding the cat from <IMG1>. Output <IMG2>:")
235
+ vincie_pipe_button = gr.Button("Executar Pipeline", variant="primary")
236
+ with gr.Column(scale=2):
237
+ vincie_pipe_output = gr.Gallery(label="Imagem Composta")
238
+ vincie_pipe_logs = gr.Textbox(label="Logs", lines=10, interactive=False)
239
+
240
+ # Prepara os caminhos para os arquivos de exemplo
241
+ father_path = os.path.join(VINCIE_DIR, "assets/father.png")
242
+ mother_path = os.path.join(VINCIE_DIR, "assets/mother.png")
243
+ son_path = os.path.join(VINCIE_DIR, "assets/son.png")
244
+ family_prompt = "Based on <IMG0>, <IMG1>, and <IMG2>, A smiling family with the father from <IMG0>, mother from <IMG1>, and son from <IMG2>, poses for a portrait amidst the sunlit trees. Output <IMG3>:"
245
+ gr.Examples(examples=[[[father_path, mother_path, son_path], family_prompt]], inputs=[vincie_pipe_inputs, vincie_pipe_prompt])
246
+
247
+ vincie_pipe_button.click(fn=run_vincie_pipeline_inference, inputs=[vincie_pipe_inputs, vincie_pipe_prompt], outputs=[vincie_pipe_output, vincie_pipe_logs])
248
+
249
+ demo.queue().launch()