aducsdr commited on
Commit
5553cc8
·
verified ·
1 Parent(s): 9ef5f50

Update aduc_framework/managers/seedvr_manager.py

Browse files
aduc_framework/managers/seedvr_manager.py CHANGED
@@ -2,10 +2,14 @@
2
  #
3
  # Copyright (C) 2025 Carlos Rodrigues dos Santos
4
  #
5
- # Version: 6.1.0 (Force 3B Model)
6
  #
7
- # Esta versão remove a seleção de modelo e força o uso exclusivo do SeedVR 3B.
8
- # Também otimiza o setup para não baixar o checkpoint do modelo 7B.
 
 
 
 
9
 
10
  import torch
11
  import os
@@ -13,7 +17,6 @@ import gc
13
  import logging
14
  import sys
15
  import subprocess
16
- import threading
17
  from pathlib import Path
18
  from urllib.parse import urlparse
19
  from torch.hub import download_url_to_file
@@ -32,23 +35,21 @@ DEPS_DIR = APP_ROOT / "deps"
32
  SEEDVR_SPACE_DIR = DEPS_DIR / "SeedVR_Space"
33
  SEEDVR_SPACE_URL = "https://huggingface.co/spaces/ByteDance-Seed/SeedVR2-3B"
34
 
35
- class SeedVrWorker:
36
- """Representa uma única instância do pipeline SeedVR em um dispositivo isolado."""
37
  def __init__(self, device_id: str):
38
  self.global_device_id = device_id
39
- self.local_device_name = 'cuda:0'
40
  self.gpu_index = self.global_device_id.split(':')[-1]
 
41
  self.runner = None
42
- self.is_initialized = False
43
- self.setup_complete = self._check_and_run_global_setup()
44
- logger.info(f"SeedVR Worker inicializado para a GPU global {self.global_device_id}.")
45
 
46
  @staticmethod
47
  def _check_and_run_global_setup():
48
- """Executa o setup de arquivos uma única vez para toda a aplicação."""
49
  setup_flag = DEPS_DIR / "seedvr.setup.complete"
50
- if str(APP_ROOT) not in sys.path:
51
- sys.path.insert(0, str(APP_ROOT))
52
  if setup_flag.exists(): return True
53
 
54
  logger.info("--- Iniciando Setup Global do SeedVR (primeira execução) ---")
@@ -61,20 +62,38 @@ class SeedVrWorker:
61
  source, target = SEEDVR_SPACE_DIR / dirname, APP_ROOT / dirname
62
  if not target.exists(): shutil.copytree(source, target)
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  try:
65
  import apex
66
  except ImportError:
67
- apex_url = 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl'
68
  apex_wheel_path = _load_file_from_url(url=apex_url, model_dir=str(DEPS_DIR))
69
  subprocess.run(f"pip install {apex_wheel_path}", check=True, shell=True)
70
 
71
  ckpt_dir = APP_ROOT / 'ckpts'
72
  ckpt_dir.mkdir(exist_ok=True)
73
-
74
- # <<< MODIFICAÇÃO: Removido o download do modelo 7B >>>
75
  model_urls = {
76
  'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
77
  'dit_3b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
 
78
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
79
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt'
80
  }
@@ -83,10 +102,9 @@ class SeedVrWorker:
83
 
84
  setup_flag.touch()
85
  logger.info("--- Setup Global do SeedVR Concluído ---")
86
- return True
87
 
88
- def initialize_runner(self):
89
- """Carrega o modelo 3B para a VRAM do dispositivo."""
90
  if self.runner is not None: return
91
 
92
  os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index
@@ -94,9 +112,8 @@ class SeedVrWorker:
94
  from projects.video_diffusion_sr.infer import VideoDiffusionInfer
95
  from common.config import load_config
96
 
97
- logger.info(f"Worker {self.global_device_id}: Inicializando runner SeedVR 3B... (Processo vê apenas {self.local_device_name})")
98
 
99
- # <<< MODIFICAÇÃO: Caminhos fixados para o modelo 3B >>>
100
  config_path = APP_ROOT / 'configs_3b' / 'main.yaml'
101
  checkpoint_path = APP_ROOT / 'ckpts' / 'seedvr2_ema_3b.pth'
102
 
@@ -104,134 +121,80 @@ class SeedVrWorker:
104
  self.runner = VideoDiffusionInfer(config)
105
  OmegaConf.set_readonly(self.runner.config, False)
106
 
 
107
  self.runner.configure_dit_model(device=self.local_device_name, checkpoint=str(checkpoint_path))
108
  self.runner.configure_vae_model()
109
 
110
- self.is_initialized = True
111
- logger.info(f"Worker {self.global_device_id}: Runner 3B pronto na VRAM.")
112
 
113
- def unload_runner(self):
114
  """Descarrega os modelos da VRAM e limpa o ambiente."""
115
  if self.runner is not None:
116
- del self.runner
117
- self.runner = None
118
- gc.collect()
119
- torch.cuda.empty_cache()
120
- self.is_initialized = False
121
- logger.info(f"Worker {self.global_device_id}: Runner descarregado da VRAM.")
122
-
123
- if 'CUDA_VISIBLE_DEVICES' in os.environ:
124
- del os.environ['CUDA_VISIBLE_DEVICES']
125
-
126
- def process_video_internal(self, input_video_path, output_video_path, prompt, steps, seed):
127
- """Executa a inferência em um ambiente de GPU isolado."""
128
- os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index
129
- device = torch.device(self.local_device_name)
130
-
131
- from common.seed import set_seed
132
- from data.image.transforms.divisible_crop import DivisibleCrop
133
- from data.image.transforms.na_resize import NaResize
134
- from data.video.transforms.rearrange import Rearrange
135
- from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
136
- from torchvision.transforms import Compose, Lambda, Normalize
137
- from torchvision.io.video import read_video
138
-
139
- set_seed(seed, same_across_ranks=True)
140
- self.runner.config.diffusion.timesteps.sampling.steps = steps
141
- self.runner.configure_diffusion()
142
-
143
- video_tensor = read_video(input_video_path, output_format="TCHW")[0] / 255.0
144
- res_h, res_w = video_tensor.shape[-2:]
145
- video_transform = Compose([
146
- NaResize(resolution=(res_h * res_w) ** 0.5, mode="area", downsample_only=False),
147
- Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
148
- DivisibleCrop((16, 16)),
149
- Normalize(0.5, 0.5),
150
- Rearrange("t c h w -> c t h w"),
151
- ])
152
- cond_latents = [video_transform(video_tensor.to(device))]
153
- self.runner.dit.to("cpu")
154
- self.runner.vae.to(device)
155
- cond_latents = self.runner.vae_encode(cond_latents)
156
- self.runner.vae.to("cpu"); gc.collect(); torch.cuda.empty_cache()
157
- self.runner.dit.to(device)
158
-
159
- pos_emb = torch.load(APP_ROOT / 'ckpts' / 'pos_emb.pt').to(device)
160
- neg_emb = torch.load(APP_ROOT / 'ckpts' / 'neg_emb.pt').to(device)
161
- text_embeds_dict = {"texts_pos": [pos_emb], "texts_neg": [neg_emb]}
162
-
163
- noises = [torch.randn_like(latent) for latent in cond_latents]
164
- conditions = [self.runner.get_condition(noise, latent_blur=latent, task="sr") for noise, latent in zip(noises, cond_latents)]
165
-
166
- with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
167
- video_tensors = self.runner.inference(noises=noises, conditions=conditions, dit_offload=True, **text_embeds_dict)
168
-
169
- self.runner.dit.to("cpu"); gc.collect(); torch.cuda.empty_cache()
170
- self.runner.vae.to(device)
171
- samples = self.runner.vae_decode(video_tensors)
172
- final_sample = samples[0]
173
- input_video_sample = cond_latents[0]
174
- if final_sample.shape[1] < input_video_sample.shape[1]:
175
- input_video_sample = input_video_sample[:, :final_sample.shape[1]]
176
-
177
- final_sample = wavelet_reconstruction(rearrange(final_sample, "c t h w -> t c h w"), rearrange(input_video_sample, "c t h w -> t c h w"))
178
- final_sample = rearrange(final_sample, "t c h w -> t h w c")
179
- final_sample = final_sample.clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round()
180
- final_sample_np = final_sample.to(torch.uint8).cpu().numpy()
181
-
182
- mediapy.write_video(output_video_path, final_sample_np, fps=24)
183
 
184
  if 'CUDA_VISIBLE_DEVICES' in os.environ:
185
  del os.environ['CUDA_VISIBLE_DEVICES']
186
-
187
- return output_video_path
188
-
189
- class SeedVrPoolManager:
190
- """Gerencia um pool de SeedVrWorkers para processamento em GPUs dedicadas."""
191
- def __init__(self, device_ids: list[str]):
192
- logger.info(f"SEEDVR POOL MANAGER: Criando workers para os dispositivos: {device_ids}")
193
- if not device_ids or 'cpu' in device_ids:
194
- raise ValueError("SeedVrPoolManager requer GPUs dedicadas.")
195
- self.workers = [SeedVrWorker(device_id) for device_id in device_ids]
196
- self.current_worker_index = 0
197
- self.lock = threading.Lock()
198
- self.last_cleanup_thread = None
199
 
200
- def _cleanup_worker_thread(self, worker: SeedVrWorker):
201
- """Thread para descarregar o worker em segundo plano."""
202
- logger.info(f"SEEDVR CLEANUP THREAD: Iniciando limpeza de {worker.global_device_id} em background...")
203
- worker.unload_runner()
204
-
205
- # <<< MODIFICAÇÃO: Removido o argumento 'model_version' da assinatura pública >>>
206
  def process_video(self, input_video_path: str, output_video_path: str, prompt: str,
207
  steps: int = 100, seed: int = 666) -> str:
208
- worker_to_use = None
209
  try:
210
- with self.lock:
211
- if self.last_cleanup_thread and self.last_cleanup_thread.is_alive():
212
- self.last_cleanup_thread.join()
213
-
214
- worker_to_use = self.workers[self.current_worker_index]
215
- previous_worker_index = (self.current_worker_index - 1 + len(self.workers)) % len(self.workers)
216
- worker_to_cleanup = self.workers[previous_worker_index]
217
-
218
- cleanup_thread = threading.Thread(target=self._cleanup_worker_thread, args=(worker_to_cleanup,))
219
- cleanup_thread.start()
220
- self.last_cleanup_thread = cleanup_thread
221
-
222
- # Chama initialize_runner sem argumentos, pois ele agora sabe que deve usar o 3B
223
- worker_to_use.initialize_runner()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
- self.current_worker_index = (self.current_worker_index + 1) % len(self.workers)
 
 
 
 
226
 
227
- logger.info(f"SEEDVR POOL MANAGER: Processando vídeo na GPU {worker_to_use.global_device_id}...")
228
- # Passa os argumentos para a função interna, sem 'model_version'
229
- return worker_to_use.process_video_internal(
230
- input_video_path, output_video_path, prompt, steps, seed
231
- )
232
- except Exception as e:
233
- logger.error(f"SEEDVR POOL MANAGER: Erro durante o processamento de vídeo: {e}", exc_info=True)
234
- raise e
 
 
235
 
236
  def _load_file_from_url(url, model_dir='./', file_name=None):
237
  os.makedirs(model_dir, exist_ok=True)
@@ -248,16 +211,20 @@ class SeedVrPlaceholder:
248
  return input_video_path
249
 
250
  try:
251
- with open("config.yaml", 'r') as f:
252
- config = yaml.safe_load(f)
253
  seedvr_gpus_required = config['specialists'].get('seedvr', {}).get('gpus_required', 0)
254
- seedvr_device_ids = hardware_manager.allocate_gpus('SeedVR', seedvr_gpus_required)
255
- if seedvr_gpus_required > 0 and 'cpu' not in seedvr_device_ids:
256
- seedvr_manager_singleton = SeedVrPoolManager(device_ids=seedvr_device_ids)
257
- logger.info("Especialista de Masterização HD (SeedVR Pool) pronto.")
 
 
 
 
 
258
  else:
259
  seedvr_manager_singleton = SeedVrPlaceholder()
260
- logger.warning("SeedVR Pool Manager não foi inicializado.")
261
  except Exception as e:
262
  logger.critical(f"Falha CRÍTICA ao inicializar o SeedVrManager: {e}", exc_info=True)
263
  seedvr_manager_singleton = SeedVrPlaceholder()
 
2
  #
3
  # Copyright (C) 2025 Carlos Rodrigues dos Santos
4
  #
5
+ # Version: 10.0.0 (Definitive Monkey Patch)
6
  #
7
+ # Esta é a arquitetura final e mais robusta. O paralelismo problemático
8
+ # é desativado programaticamente via "monkey patching" no decorador `master_only`.
9
+ # Isso elimina a necessidade de gerenciar `torch.distributed`, simplificando
10
+ # o código e resolvendo a causa raiz de todos os erros de paralelismo.
11
+ # A isolação de GPU com CUDA_VISIBLE_DEVICES é mantida como a melhor
12
+ # prática para o gerenciamento de hardware.
13
 
14
  import torch
15
  import os
 
17
  import logging
18
  import sys
19
  import subprocess
 
20
  from pathlib import Path
21
  from urllib.parse import urlparse
22
  from torch.hub import download_url_to_file
 
35
  SEEDVR_SPACE_DIR = DEPS_DIR / "SeedVR_Space"
36
  SEEDVR_SPACE_URL = "https://huggingface.co/spaces/ByteDance-Seed/SeedVR2-3B"
37
 
38
+ class SeedVrManager:
39
+ """Gerencia uma única instância do pipeline SeedVR em uma GPU dedicada e isolada."""
40
  def __init__(self, device_id: str):
41
  self.global_device_id = device_id
42
+ self.local_device_name = 'cuda:0' # O que o processo enxergará
43
  self.gpu_index = self.global_device_id.split(':')[-1]
44
+
45
  self.runner = None
46
+ self._check_and_run_global_setup()
47
+ logger.info(f"SeedVR Manager (Single Instance) inicializado para operar na GPU {self.global_device_id}.")
 
48
 
49
  @staticmethod
50
  def _check_and_run_global_setup():
 
51
  setup_flag = DEPS_DIR / "seedvr.setup.complete"
52
+ if str(APP_ROOT) not in sys.path: sys.path.insert(0, str(APP_ROOT))
 
53
  if setup_flag.exists(): return True
54
 
55
  logger.info("--- Iniciando Setup Global do SeedVR (primeira execução) ---")
 
62
  source, target = SEEDVR_SPACE_DIR / dirname, APP_ROOT / dirname
63
  if not target.exists(): shutil.copytree(source, target)
64
 
65
+ # <<< --- MONKEY PATCH PARA DESATIVAR O PARALELISMO --- >>>
66
+ try:
67
+ from common import decorators
68
+ import functools
69
+
70
+ # Define um decorador que não faz nada e apenas retorna a função original
71
+ def _passthrough_decorator(func):
72
+ @functools.wraps(func)
73
+ def wrapped(*args, **kwargs):
74
+ return func(*args, **kwargs)
75
+ return wrapped
76
+
77
+ # Substitui o decorador problemático pelo nosso decorador inofensivo
78
+ decorators.master_only = _passthrough_decorator
79
+ logger.info("Monkey patch aplicado com sucesso em 'common.decorators.master_only' para desativar o paralelismo.")
80
+ except Exception as e:
81
+ logger.error(f"Falha ao aplicar o monkey patch: {e}", exc_info=True)
82
+ # Continua mesmo se falhar, pode funcionar em alguns casos.
83
+
84
  try:
85
  import apex
86
  except ImportError:
87
+ apex_url = 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp310-cp310-linux_x_86_64.whl'
88
  apex_wheel_path = _load_file_from_url(url=apex_url, model_dir=str(DEPS_DIR))
89
  subprocess.run(f"pip install {apex_wheel_path}", check=True, shell=True)
90
 
91
  ckpt_dir = APP_ROOT / 'ckpts'
92
  ckpt_dir.mkdir(exist_ok=True)
 
 
93
  model_urls = {
94
  'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
95
  'dit_3b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
96
+ 'dit_7b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-7B/resolve/main/seedvr2_ema_7b.pth',
97
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
98
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt'
99
  }
 
102
 
103
  setup_flag.touch()
104
  logger.info("--- Setup Global do SeedVR Concluído ---")
 
105
 
106
+ def _initialize_runner(self):
107
+ """Carrega o modelo 3B em um ambiente de GPU isolado."""
108
  if self.runner is not None: return
109
 
110
  os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index
 
112
  from projects.video_diffusion_sr.infer import VideoDiffusionInfer
113
  from common.config import load_config
114
 
115
+ logger.info(f"Manager na GPU {self.global_device_id}: Inicializando runner SeedVR 3B...")
116
 
 
117
  config_path = APP_ROOT / 'configs_3b' / 'main.yaml'
118
  checkpoint_path = APP_ROOT / 'ckpts' / 'seedvr2_ema_3b.pth'
119
 
 
121
  self.runner = VideoDiffusionInfer(config)
122
  OmegaConf.set_readonly(self.runner.config, False)
123
 
124
+ # Agora o código decorado dentro desta função usará nosso patch inofensivo
125
  self.runner.configure_dit_model(device=self.local_device_name, checkpoint=str(checkpoint_path))
126
  self.runner.configure_vae_model()
127
 
128
+ logger.info(f"Manager na GPU {self.global_device_id}: Runner 3B pronto na VRAM.")
 
129
 
130
+ def _unload_runner(self):
131
  """Descarrega os modelos da VRAM e limpa o ambiente."""
132
  if self.runner is not None:
133
+ del self.runner; self.runner = None
134
+ gc.collect(); torch.cuda.empty_cache()
135
+ logger.info(f"Manager na GPU {self.global_device_id}: Runner descarregado da VRAM.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  if 'CUDA_VISIBLE_DEVICES' in os.environ:
138
  del os.environ['CUDA_VISIBLE_DEVICES']
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
 
 
 
 
 
 
140
  def process_video(self, input_video_path: str, output_video_path: str, prompt: str,
141
  steps: int = 100, seed: int = 666) -> str:
142
+ """Ciclo completo de carga, processamento e descarga para uma única tarefa."""
143
  try:
144
+ self._initialize_runner()
145
+
146
+ device = torch.device(self.local_device_name)
147
+
148
+ from common.seed import set_seed
149
+ from data.image.transforms.divisible_crop import DivisibleCrop
150
+ from data.image.transforms.na_resize import NaResize
151
+ from data.video.transforms.rearrange import Rearrange
152
+ from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
153
+ from torchvision.transforms import Compose, Lambda, Normalize
154
+ from torchvision.io.video import read_video
155
+
156
+ set_seed(seed, same_across_ranks=True)
157
+ self.runner.config.diffusion.timesteps.sampling.steps = steps
158
+ self.runner.configure_diffusion()
159
+
160
+ video_tensor = read_video(input_video_path, output_format="TCHW")[0] / 255.0
161
+ res_h, res_w = video_tensor.shape[-2:]
162
+ video_transform = Compose([
163
+ NaResize(resolution=(res_h * res_w) ** 0.5, mode="area", downsample_only=False),
164
+ Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
165
+ DivisibleCrop((16, 16)), Normalize(0.5, 0.5), Rearrange("t c h w -> c t h w"),
166
+ ])
167
+ cond_latents = [video_transform(video_tensor.to(device))]
168
+ self.runner.dit.to("cpu"); self.runner.vae.to(device)
169
+ cond_latents = self.runner.vae_encode(cond_latents)
170
+ self.runner.vae.to("cpu"); gc.collect(); torch.cuda.empty_cache(); self.runner.dit.to(device)
171
+
172
+ pos_emb = torch.load(APP_ROOT / 'ckpts' / 'pos_emb.pt').to(device)
173
+ neg_emb = torch.load(APP_ROOT / 'ckpts' / 'neg_emb.pt').to(device)
174
+ text_embeds_dict = {"texts_pos": [pos_emb], "texts_neg": [neg_emb]}
175
+
176
+ noises = [torch.randn_like(latent) for latent in cond_latents]
177
+ conditions = [self.runner.get_condition(noise, latent_blur=latent, task="sr") for noise, latent in zip(noises, cond_latents)]
178
+
179
+ with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
180
+ video_tensors = self.runner.inference(noises=noises, conditions=conditions, dit_offload=True, **text_embeds_dict)
181
 
182
+ self.runner.dit.to("cpu"); gc.collect(); torch.cuda.empty_cache(); self.runner.vae.to(device)
183
+ samples = self.runner.vae_decode(video_tensors)
184
+ final_sample, input_video_sample = samples[0], cond_latents[0]
185
+ if final_sample.shape[1] < input_video_sample.shape[1]:
186
+ input_video_sample = input_video_sample[:, :final_sample.shape[1]]
187
 
188
+ final_sample = wavelet_reconstruction(rearrange(final_sample, "c t h w -> t c h w"), rearrange(input_video_sample, "c t h w -> t c h w"))
189
+ final_sample = rearrange(final_sample, "t c h w -> t h w c")
190
+ final_sample = final_sample.clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round()
191
+ final_sample_np = final_sample.to(torch.uint8).cpu().numpy()
192
+
193
+ mediapy.write_video(output_video_path, final_sample_np, fps=24)
194
+ return output_video_path
195
+ finally:
196
+ self._unload_runner()
197
+
198
 
199
  def _load_file_from_url(url, model_dir='./', file_name=None):
200
  os.makedirs(model_dir, exist_ok=True)
 
211
  return input_video_path
212
 
213
  try:
214
+ with open("config.yaml", 'r') as f: config = yaml.safe_load(f)
 
215
  seedvr_gpus_required = config['specialists'].get('seedvr', {}).get('gpus_required', 0)
216
+
217
+ if seedvr_gpus_required > 0:
218
+ seedvr_device_ids = hardware_manager.allocate_gpus('SeedVR', seedvr_gpus_required)
219
+ if 'cpu' not in seedvr_device_ids:
220
+ device_to_use = seedvr_device_ids[0]
221
+ seedvr_manager_singleton = SeedVrManager(device_id=device_to_use)
222
+ logger.info(f"Especialista de Masterização HD (SeedVR Single Instance) pronto para usar a GPU {device_to_use}.")
223
+ else:
224
+ seedvr_manager_singleton = SeedVrPlaceholder()
225
  else:
226
  seedvr_manager_singleton = SeedVrPlaceholder()
227
+ logger.warning("SeedVR Manager não foi inicializado (gpus_required: 0).")
228
  except Exception as e:
229
  logger.critical(f"Falha CRÍTICA ao inicializar o SeedVrManager: {e}", exc_info=True)
230
  seedvr_manager_singleton = SeedVrPlaceholder()