Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -262,13 +262,15 @@ def encode_prompt(
|
|
| 262 |
print(error_msg)
|
| 263 |
return None, prompt, error_msg
|
| 264 |
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
|
|
|
|
|
|
| 269 |
return 120
|
| 270 |
|
| 271 |
-
@spaces.GPU(duration=
|
| 272 |
def generate_video(
|
| 273 |
input_image,
|
| 274 |
prompt: str,
|
|
@@ -279,6 +281,7 @@ def generate_video(
|
|
| 279 |
randomize_seed: bool = True,
|
| 280 |
height: int = DEFAULT_1_STAGE_HEIGHT,
|
| 281 |
width: int = DEFAULT_1_STAGE_WIDTH,
|
|
|
|
| 282 |
progress=gr.Progress(track_tqdm=True),
|
| 283 |
):
|
| 284 |
gc.collect()
|
|
@@ -346,7 +349,6 @@ def generate_video(
|
|
| 346 |
# ---------------------------
|
| 347 |
# We instantiate the pipeline inside the GPU function to ensure LoRAs are applied correctly
|
| 348 |
# for this specific run without global state pollution.
|
| 349 |
-
# Since 'model_ledger' and checkpoints are already downloaded/cached, this is fast.
|
| 350 |
pipeline = DistilledPipeline(
|
| 351 |
device=torch.device("cuda"),
|
| 352 |
checkpoint_path=checkpoint_path,
|
|
@@ -389,6 +391,7 @@ def generate_video(
|
|
| 389 |
return None, current_seed
|
| 390 |
|
| 391 |
def generate_video_example(input_image, prompt, lora_adapter, duration):
|
|
|
|
| 392 |
output, seed = generate_video(
|
| 393 |
input_image=input_image,
|
| 394 |
prompt=prompt,
|
|
@@ -398,7 +401,8 @@ def generate_video_example(input_image, prompt, lora_adapter, duration):
|
|
| 398 |
seed=42,
|
| 399 |
randomize_seed=True,
|
| 400 |
height=DEFAULT_1_STAGE_HEIGHT,
|
| 401 |
-
width=DEFAULT_1_STAGE_WIDTH
|
|
|
|
| 402 |
)
|
| 403 |
return output
|
| 404 |
|
|
@@ -414,6 +418,9 @@ def apply_duration(duration: str):
|
|
| 414 |
duration_s = int(duration[:-1])
|
| 415 |
return duration_s
|
| 416 |
|
|
|
|
|
|
|
|
|
|
| 417 |
class RadioAnimated(gr.HTML):
|
| 418 |
def __init__(self, choices, value=None, **kwargs):
|
| 419 |
if not choices or len(choices) < 2:
|
|
@@ -531,11 +538,13 @@ css = """
|
|
| 531 |
.dark .ra-label { color: var(--neutral-400); }
|
| 532 |
.dark .ra-highlight { background: var(--neutral-600); }
|
| 533 |
.dark .ra-input:checked + .ra-label { color: white; }
|
|
|
|
|
|
|
| 534 |
"""
|
| 535 |
|
| 536 |
with gr.Blocks() as demo:
|
| 537 |
with gr.Column(elem_id="col-container"):
|
| 538 |
-
gr.Markdown("# **LTX-2-LoRAs-Camera-Control-Dolly**")
|
| 539 |
gr.Markdown("Create cinematic video from text or image using [LTX-2 Distilled](https://huggingface.co/Lightricks/LTX-2/blob/main/ltx-2-19b-distilled-lora-384.safetensors) model. Select LoRA adapters for specific camera movements or styles.")
|
| 540 |
|
| 541 |
with gr.Row():
|
|
@@ -589,6 +598,17 @@ with gr.Blocks() as demo:
|
|
| 589 |
width = gr.Number(value=DEFAULT_1_STAGE_WIDTH, visible=False)
|
| 590 |
height = gr.Number(value=DEFAULT_1_STAGE_HEIGHT, visible=False)
|
| 591 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 592 |
with gr.Accordion("Advanced Settings", open=False):
|
| 593 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=DEFAULT_SEED, step=1)
|
| 594 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
|
@@ -596,10 +616,11 @@ with gr.Blocks() as demo:
|
|
| 596 |
# Wire up events
|
| 597 |
radioanimated_duration.change(fn=apply_duration, inputs=radioanimated_duration, outputs=[duration], api_visibility="private")
|
| 598 |
radioanimated_resolution.change(fn=apply_resolution, inputs=radioanimated_resolution, outputs=[width, height], api_visibility="private")
|
|
|
|
| 599 |
|
| 600 |
generate_btn.click(
|
| 601 |
fn=generate_video,
|
| 602 |
-
inputs=[input_image, prompt, lora_adapter, duration, enhance_prompt, seed, randomize_seed, height, width],
|
| 603 |
outputs=[output_video, seed]
|
| 604 |
)
|
| 605 |
|
|
|
|
| 262 |
print(error_msg)
|
| 263 |
return None, prompt, error_msg
|
| 264 |
|
| 265 |
+
# Function to calculate timeout based on the arguments passed to the GPU decorated function
|
| 266 |
+
def calc_timeout_duration(input_image, prompt, lora_adapter, duration, enhance_prompt, seed, randomize_seed, height, width, gpu_timeout, progress=None):
|
| 267 |
+
# Determine timeout based on the last argument (gpu_timeout) passed to generate_video
|
| 268 |
+
try:
|
| 269 |
+
return int(gpu_timeout)
|
| 270 |
+
except:
|
| 271 |
return 120
|
| 272 |
|
| 273 |
+
@spaces.GPU(duration=calc_timeout_duration)
|
| 274 |
def generate_video(
|
| 275 |
input_image,
|
| 276 |
prompt: str,
|
|
|
|
| 281 |
randomize_seed: bool = True,
|
| 282 |
height: int = DEFAULT_1_STAGE_HEIGHT,
|
| 283 |
width: int = DEFAULT_1_STAGE_WIDTH,
|
| 284 |
+
gpu_timeout: int = 90,
|
| 285 |
progress=gr.Progress(track_tqdm=True),
|
| 286 |
):
|
| 287 |
gc.collect()
|
|
|
|
| 349 |
# ---------------------------
|
| 350 |
# We instantiate the pipeline inside the GPU function to ensure LoRAs are applied correctly
|
| 351 |
# for this specific run without global state pollution.
|
|
|
|
| 352 |
pipeline = DistilledPipeline(
|
| 353 |
device=torch.device("cuda"),
|
| 354 |
checkpoint_path=checkpoint_path,
|
|
|
|
| 391 |
return None, current_seed
|
| 392 |
|
| 393 |
def generate_video_example(input_image, prompt, lora_adapter, duration):
|
| 394 |
+
# We pass a default 90s timeout for examples
|
| 395 |
output, seed = generate_video(
|
| 396 |
input_image=input_image,
|
| 397 |
prompt=prompt,
|
|
|
|
| 401 |
seed=42,
|
| 402 |
randomize_seed=True,
|
| 403 |
height=DEFAULT_1_STAGE_HEIGHT,
|
| 404 |
+
width=DEFAULT_1_STAGE_WIDTH,
|
| 405 |
+
gpu_timeout=90
|
| 406 |
)
|
| 407 |
return output
|
| 408 |
|
|
|
|
| 418 |
duration_s = int(duration[:-1])
|
| 419 |
return duration_s
|
| 420 |
|
| 421 |
+
def apply_gpu_duration(val: str):
|
| 422 |
+
return int(val)
|
| 423 |
+
|
| 424 |
class RadioAnimated(gr.HTML):
|
| 425 |
def __init__(self, choices, value=None, **kwargs):
|
| 426 |
if not choices or len(choices) < 2:
|
|
|
|
| 538 |
.dark .ra-label { color: var(--neutral-400); }
|
| 539 |
.dark .ra-highlight { background: var(--neutral-600); }
|
| 540 |
.dark .ra-input:checked + .ra-label { color: white; }
|
| 541 |
+
|
| 542 |
+
#main-title h1 { font-size: 2.2em !important; }
|
| 543 |
"""
|
| 544 |
|
| 545 |
with gr.Blocks() as demo:
|
| 546 |
with gr.Column(elem_id="col-container"):
|
| 547 |
+
gr.Markdown("# **LTX-2-LoRAs-Camera-Control-Dolly**", elem_id="main-title")
|
| 548 |
gr.Markdown("Create cinematic video from text or image using [LTX-2 Distilled](https://huggingface.co/Lightricks/LTX-2/blob/main/ltx-2-19b-distilled-lora-384.safetensors) model. Select LoRA adapters for specific camera movements or styles.")
|
| 549 |
|
| 550 |
with gr.Row():
|
|
|
|
| 598 |
width = gr.Number(value=DEFAULT_1_STAGE_WIDTH, visible=False)
|
| 599 |
height = gr.Number(value=DEFAULT_1_STAGE_HEIGHT, visible=False)
|
| 600 |
|
| 601 |
+
# New GPU Duration Row below Resolution
|
| 602 |
+
with gr.Row():
|
| 603 |
+
with gr.Column():
|
| 604 |
+
gr.Markdown("**GPU Duration**")
|
| 605 |
+
radioanimated_gpu_duration = RadioAnimated(
|
| 606 |
+
choices=["120", "240", "300"],
|
| 607 |
+
value="120",
|
| 608 |
+
elem_id="radioanimated_gpu_duration"
|
| 609 |
+
)
|
| 610 |
+
gpu_duration_state = gr.Number(value=90, visible=False)
|
| 611 |
+
|
| 612 |
with gr.Accordion("Advanced Settings", open=False):
|
| 613 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=DEFAULT_SEED, step=1)
|
| 614 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
|
|
|
| 616 |
# Wire up events
|
| 617 |
radioanimated_duration.change(fn=apply_duration, inputs=radioanimated_duration, outputs=[duration], api_visibility="private")
|
| 618 |
radioanimated_resolution.change(fn=apply_resolution, inputs=radioanimated_resolution, outputs=[width, height], api_visibility="private")
|
| 619 |
+
radioanimated_gpu_duration.change(fn=apply_gpu_duration, inputs=radioanimated_gpu_duration, outputs=[gpu_duration_state], api_visibility="private")
|
| 620 |
|
| 621 |
generate_btn.click(
|
| 622 |
fn=generate_video,
|
| 623 |
+
inputs=[input_image, prompt, lora_adapter, duration, enhance_prompt, seed, randomize_seed, height, width, gpu_duration_state],
|
| 624 |
outputs=[output_video, seed]
|
| 625 |
)
|
| 626 |
|