eeuuia commited on
Commit
e6999b3
·
verified ·
1 Parent(s): 34ff926

Update api/ltx/ltx_aduc_pipeline.py

Browse files
Files changed (1) hide show
  1. api/ltx/ltx_aduc_pipeline.py +4 -38
api/ltx/ltx_aduc_pipeline.py CHANGED
@@ -25,7 +25,6 @@ from api.ltx.vae_aduc_pipeline import vae_aduc_pipeline
25
  from tools.video_encode_tool import video_encode_tool_singleton
26
 
27
 
28
-
29
  # ==============================================================================
30
  # --- SETUP E IMPORTAÇÕES DO PROJETO ---
31
  # ==============================================================================
@@ -206,7 +205,7 @@ class LtxAducPipeline:
206
  if is_narrative and i < num_chunks - 1:
207
  overlap_latents = chunk_latents[:, :, -overlap_frames:, :, :].clone()
208
  overlap_condition_item = LatentConditioningItem(
209
- latent_tensor=overlap_latents.cpu(),
210
  media_frame_number=0,
211
  conditioning_strength=1.0
212
  )
@@ -225,10 +224,7 @@ class LtxAducPipeline:
225
  all_tensors_cpu = [torch.load(p) for p in temp_latent_paths]
226
  final_latents = torch.cat(all_tensors_cpu, dim=2)
227
 
228
- for path in temp_latent_paths:
229
- if path.exists(): path.unlink()
230
- self.finalize()
231
-
232
  video_path, latents_path = self._finalize_generation(final_latents, base_filename, used_seed)
233
  return video_path, latents_path, used_seed
234
 
@@ -237,37 +233,6 @@ class LtxAducPipeline:
237
  # --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
238
  # ==========================================================================
239
 
240
- @log_function_io
241
- def _log_conditioning_items(self, items: List[LatentConditioningItem]):
242
- """
243
- Logs detailed information about a list of ConditioningItem objects.
244
- This is a dedicated debug helper function.
245
- """
246
- # Só imprime o log se o nível de logging for DEBUG
247
- if logging.getLogger().isEnabledFor(logging.INFO):
248
- log_str = ["\n" + "="*10 + " INFO: Conditioning Items " + "="*10]
249
- if not items:
250
- log_str.append(" -> Lista de conditioning_items está vazia.")
251
- else:
252
- for i, item in enumerate(items):
253
- if hasattr(item, 'media_item') and isinstance(item.media_item, torch.Tensor):
254
- t = item.media_item
255
- log_str.append(
256
- f" -> Item [{i}]: "
257
- f"Tensor(shape={list(t.shape)}, "
258
- f"device='{t.device}', "
259
- f"dtype={t.dtype}), "
260
- f"Target Frame = {item.media_frame_number}, "
261
- f"Strength = {item.conditioning_strength:.2f}"
262
- )
263
- else:
264
- log_str.append(f" -> Item [{i}]: Não contém um tensor válido.")
265
-
266
- log_str.append("="*30 + "\n")
267
-
268
- # Usa o logger de debug para imprimir a mensagem completa
269
- logging.info("\n".join(log_str))
270
-
271
  @log_function_io
272
  def _finalize_generation(self, final_latents: torch.Tensor, base_filename: str, seed: int) -> Tuple[str, str]:
273
  """Delegates final decoding and encoding to specialist services."""
@@ -291,7 +256,8 @@ class LtxAducPipeline:
291
  if ui_value and ui_value > 0:
292
  config_dict[key] = ui_value
293
  logging.info(f"Override: '{key}' set to {ui_value} by UI.")
294
-
 
295
  def _save_and_log_video(self, pixel_tensor: torch.Tensor, base_filename: str) -> Path:
296
  with tempfile.TemporaryDirectory() as temp_dir:
297
  temp_path = os.path.join(temp_dir, f"{base_filename}.mp4")
 
25
  from tools.video_encode_tool import video_encode_tool_singleton
26
 
27
 
 
28
  # ==============================================================================
29
  # --- SETUP E IMPORTAÇÕES DO PROJETO ---
30
  # ==============================================================================
 
205
  if is_narrative and i < num_chunks - 1:
206
  overlap_latents = chunk_latents[:, :, -overlap_frames:, :, :].clone()
207
  overlap_condition_item = LatentConditioningItem(
208
+ latent_tensor=overlap_latents,
209
  media_frame_number=0,
210
  conditioning_strength=1.0
211
  )
 
224
  all_tensors_cpu = [torch.load(p) for p in temp_latent_paths]
225
  final_latents = torch.cat(all_tensors_cpu, dim=2)
226
 
227
+
 
 
 
228
  video_path, latents_path = self._finalize_generation(final_latents, base_filename, used_seed)
229
  return video_path, latents_path, used_seed
230
 
 
233
  # --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
234
  # ==========================================================================
235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  @log_function_io
237
  def _finalize_generation(self, final_latents: torch.Tensor, base_filename: str, seed: int) -> Tuple[str, str]:
238
  """Delegates final decoding and encoding to specialist services."""
 
256
  if ui_value and ui_value > 0:
257
  config_dict[key] = ui_value
258
  logging.info(f"Override: '{key}' set to {ui_value} by UI.")
259
+
260
+ @log_function_io
261
  def _save_and_log_video(self, pixel_tensor: torch.Tensor, base_filename: str) -> Path:
262
  with tempfile.TemporaryDirectory() as temp_dir:
263
  temp_path = os.path.join(temp_dir, f"{base_filename}.mp4")