Carlos s
commited on
Update api/ltx_server.py
Browse files- api/ltx_server.py +5 -4
api/ltx_server.py
CHANGED
|
@@ -602,8 +602,9 @@ class VideoService:
|
|
| 602 |
#
|
| 603 |
first_pass_kwargs = call_kwargs.copy()
|
| 604 |
first_pass_kwargs.update(first_pass_args)
|
| 605 |
-
|
| 606 |
-
|
|
|
|
| 607 |
print("[DEBUG] Chamando multi_scale_pipeline...")
|
| 608 |
t_ms = time.perf_counter()
|
| 609 |
ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
|
|
@@ -626,8 +627,8 @@ class VideoService:
|
|
| 626 |
print("[DEBUG] Executando SECOND PASS (latent_upsampler)...")
|
| 627 |
with ctx:
|
| 628 |
result = self.latent_upsampler(
|
| 629 |
-
latents=
|
| 630 |
-
**
|
| 631 |
)
|
| 632 |
|
| 633 |
#with ctx:
|
|
|
|
| 602 |
#
|
| 603 |
first_pass_kwargs = call_kwargs.copy()
|
| 604 |
first_pass_kwargs.update(first_pass_args)
|
| 605 |
+
second_pass_kwargs = call_kwargs.copy()
|
| 606 |
+
second_pass_kwargs.update(second_pass_args)
|
| 607 |
+
|
| 608 |
print("[DEBUG] Chamando multi_scale_pipeline...")
|
| 609 |
t_ms = time.perf_counter()
|
| 610 |
ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
|
|
|
|
| 627 |
print("[DEBUG] Executando SECOND PASS (latent_upsampler)...")
|
| 628 |
with ctx:
|
| 629 |
result = self.latent_upsampler(
|
| 630 |
+
latents=result_first,
|
| 631 |
+
**second_pass_kwargs
|
| 632 |
)
|
| 633 |
|
| 634 |
#with ctx:
|