Carlos s commited on
Commit
6d5f8fa
·
verified ·
1 Parent(s): 3871f89

Update api/ltx_server.py

Browse files
Files changed (1) hide show
  1. api/ltx_server.py +21 -16
api/ltx_server.py CHANGED
@@ -503,7 +503,7 @@ class VideoService:
503
  torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
504
  self._log_gpu_memory("Início da Geração")
505
 
506
- ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
507
 
508
  if mode == "image-to-video" and not start_image_filepath:
509
  raise ValueError("A imagem de início é obrigatória para o modo image-to-video")
@@ -602,32 +602,37 @@ class VideoService:
602
  #
603
  first_pass_kwargs = call_kwargs.copy()
604
  first_pass_kwargs.update(first_pass_args)
 
 
 
 
 
605
 
606
  print("[DEBUG] Executando FIRST PASS (pipeline base)...")
607
  with ctx:
608
  result_first = self.pipeline(**first_pass_kwargs)
609
-
610
- latents_first = result_first.latents if hasattr(result_first, "latents") else result_first
611
- print(f"[DEBUG] Latentes FIRST PASS: {tuple(latents_first.shape)}")
612
-
 
 
 
 
 
 
 
613
  # --- SECOND PASS ---
614
  print("[DEBUG] Executando SECOND PASS (latent_upsampler)...")
615
  with ctx:
616
- result_second = self.latent_upsampler(
617
  latents=latents_first,
618
  **second_pass_args
619
  )
620
-
621
- latents_final = result_second.latents if hasattr(result_second, "latents") else result_second
622
- print(f"[DEBUG] Latentes SECOND PASS: {tuple(latents_final.shape)}")
623
-
624
 
625
- print("[DEBUG] Chamando multi_scale_pipeline...")
626
- t_ms = time.perf_counter()
627
- ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
628
- with ctx:
629
- result = multi_scale_pipeline(**multi_scale_call_kwargs)
630
- print(f"[DEBUG] multi_scale_pipeline tempo={time.perf_counter()-t_ms:.3f}s")
631
 
632
  if hasattr(result, "latents"):
633
  latents = result.latents
 
503
  torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
504
  self._log_gpu_memory("Início da Geração")
505
 
506
+ #ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
507
 
508
  if mode == "image-to-video" and not start_image_filepath:
509
  raise ValueError("A imagem de início é obrigatória para o modo image-to-video")
 
602
  #
603
  first_pass_kwargs = call_kwargs.copy()
604
  first_pass_kwargs.update(first_pass_args)
605
+
606
+
607
+ print("[DEBUG] Chamando multi_scale_pipeline...")
608
+ t_ms = time.perf_counter()
609
+ ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
610
 
611
  print("[DEBUG] Executando FIRST PASS (pipeline base)...")
612
  with ctx:
613
  result_first = self.pipeline(**first_pass_kwargs)
614
+
615
+
616
+ if hasattr(result_first, "latents"):
617
+ latents = result_first.latents
618
+ elif hasattr(result_first, "images") and isinstance(result.images, torch.Tensor):
619
+ latents = result_first.images
620
+ else:
621
+ latents = result_first
622
+ print(f"[DEBUG] Latentes (single-pass): shape={tuple(latents.shape)}")
623
+ latents_first = latents
624
+
625
  # --- SECOND PASS ---
626
  print("[DEBUG] Executando SECOND PASS (latent_upsampler)...")
627
  with ctx:
628
+ result = self.latent_upsampler(
629
  latents=latents_first,
630
  **second_pass_args
631
  )
 
 
 
 
632
 
633
+ #with ctx:
634
+ # result = multi_scale_pipeline(**multi_scale_call_kwargs)
635
+ #print(f"[DEBUG] multi_scale_pipeline tempo={time.perf_counter()-t_ms:.3f}s")
 
 
 
636
 
637
  if hasattr(result, "latents"):
638
  latents = result.latents