Update api/ltx_server.py
Browse files- api/ltx_server.py +5 -3
api/ltx_server.py
CHANGED
|
@@ -256,14 +256,16 @@ class LatentConditioningItem:
|
|
| 256 |
media_frame_number: int
|
| 257 |
conditioning_strength: float
|
| 258 |
|
|
|
|
|
|
|
| 259 |
def _aduc_prepare_conditioning_patch(
|
| 260 |
self: "LTXVideoPipeline",
|
| 261 |
-
|
| 262 |
init_latents: torch.Tensor,
|
| 263 |
num_frames: int,
|
| 264 |
-
height:
|
| 265 |
width: int,
|
| 266 |
-
vae_per_channel_normalize: bool =
|
| 267 |
generator=None,
|
| 268 |
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
|
| 269 |
if not conditioning_items:
|
|
|
|
| 256 |
media_frame_number: int
|
| 257 |
conditioning_strength: float
|
| 258 |
|
| 259 |
+
|
| 260 |
+
|
| 261 |
def _aduc_prepare_conditioning_patch(
|
| 262 |
self: "LTXVideoPipeline",
|
| 263 |
+
conditioning_items: Optional[List[Union["ConditioningItem", "LatentConditioningItem"]]],
|
| 264 |
init_latents: torch.Tensor,
|
| 265 |
num_frames: int,
|
| 266 |
+
height: int,
|
| 267 |
width: int,
|
| 268 |
+
vae_per_channel_normalize: bool = true,
|
| 269 |
generator=None,
|
| 270 |
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
|
| 271 |
if not conditioning_items:
|