Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- README.md +10 -18
- app.py +592 -833
- requirements.txt +15 -48
README.md
CHANGED
|
@@ -1,21 +1,13 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
|
|
|
|
|
|
|
|
|
| 3 |
sdk: gradio
|
| 4 |
-
|
| 5 |
-
sdk_version: 4.38.1
|
| 6 |
app_file: app.py
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
- Restoring
|
| 13 |
-
- Image-to-Image
|
| 14 |
-
- Image-2-Image
|
| 15 |
-
- Img-to-Img
|
| 16 |
-
- Img-2-Img
|
| 17 |
-
- language models
|
| 18 |
-
- LLMs
|
| 19 |
-
short_description: Restore blurred or small images with prompt
|
| 20 |
-
suggested_hardware: zero-a10g
|
| 21 |
-
---
|
|
|
|
| 1 |
---
|
| 2 |
+
title: LTX Video Fast
|
| 3 |
+
emoji: 🎥
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: pink
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.29.1
|
|
|
|
| 8 |
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
short_description: ultra-fast video model, LTX 0.9.7 13B distilled
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
|
@@ -1,864 +1,623 @@
|
|
| 1 |
-
import os
|
| 2 |
import gradio as gr
|
| 3 |
-
import argparse
|
| 4 |
-
import numpy as np
|
| 5 |
import torch
|
| 6 |
-
import einops
|
| 7 |
-
import copy
|
| 8 |
-
import math
|
| 9 |
-
import time
|
| 10 |
-
import random
|
| 11 |
import spaces
|
| 12 |
-
import
|
| 13 |
-
import
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
| 16 |
from PIL import Image
|
| 17 |
-
from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
|
| 18 |
from huggingface_hub import hf_hub_download
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
"
|
| 91 |
-
"
|
| 92 |
-
1.0,
|
| 93 |
-
True,
|
| 94 |
-
False,
|
| 95 |
-
default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
|
| 96 |
-
0.,
|
| 97 |
-
"v0-Q",
|
| 98 |
-
"input",
|
| 99 |
-
179
|
| 100 |
-
]
|
| 101 |
-
|
| 102 |
-
def check_and_update(input_image):
|
| 103 |
-
if input_image is None:
|
| 104 |
-
raise gr.Error("Please provide an image to restore.")
|
| 105 |
-
return gr.update(visible = True)
|
| 106 |
-
|
| 107 |
-
@spaces.GPU(duration=420)
|
| 108 |
-
def stage1_process(
|
| 109 |
-
input_image,
|
| 110 |
-
gamma_correction,
|
| 111 |
-
diff_dtype,
|
| 112 |
-
ae_dtype
|
| 113 |
-
):
|
| 114 |
-
print('stage1_process ==>>')
|
| 115 |
-
if torch.cuda.device_count() == 0:
|
| 116 |
-
gr.Warning('Set this space to GPU config to make it work.')
|
| 117 |
-
return None, None
|
| 118 |
-
torch.cuda.set_device(SUPIR_device)
|
| 119 |
-
LQ = HWC3(np.array(Image.open(input_image)))
|
| 120 |
-
LQ = fix_resize(LQ, 512)
|
| 121 |
-
# stage1
|
| 122 |
-
LQ = np.array(LQ) / 255 * 2 - 1
|
| 123 |
-
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
| 124 |
-
|
| 125 |
-
model.ae_dtype = convert_dtype(ae_dtype)
|
| 126 |
-
model.model.dtype = convert_dtype(diff_dtype)
|
| 127 |
-
|
| 128 |
-
LQ = model.batchify_denoise(LQ, is_stage1=True)
|
| 129 |
-
LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8)
|
| 130 |
-
# gamma correction
|
| 131 |
-
LQ = LQ / 255.0
|
| 132 |
-
LQ = np.power(LQ, gamma_correction)
|
| 133 |
-
LQ *= 255.0
|
| 134 |
-
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
| 135 |
-
print('<<== stage1_process')
|
| 136 |
-
return LQ, gr.update(visible = True)
|
| 137 |
-
|
| 138 |
-
def stage2_process(*args, **kwargs):
|
| 139 |
-
try:
|
| 140 |
-
return restore_in_Xmin(*args, **kwargs)
|
| 141 |
-
except Exception as e:
|
| 142 |
-
# NO_GPU_MESSAGE_INQUEUE
|
| 143 |
-
print("gradio.exceptions.Error 'No GPU is currently available for you after 60s'")
|
| 144 |
-
print('str(type(e)): ' + str(type(e))) # <class 'gradio.exceptions.Error'>
|
| 145 |
-
print('str(e): ' + str(e)) # You have exceeded your GPU quota...
|
| 146 |
-
try:
|
| 147 |
-
print('e.message: ' + e.message) # No GPU is currently available for you after 60s
|
| 148 |
-
except Exception as e2:
|
| 149 |
-
print('Failure')
|
| 150 |
-
if str(e).startswith("No GPU is currently available for you after 60s"):
|
| 151 |
-
print('Exception identified!!!')
|
| 152 |
-
#if str(type(e)) == "<class 'gradio.exceptions.Error'>":
|
| 153 |
-
#print('Exception of name ' + type(e).__name__)
|
| 154 |
-
raise e
|
| 155 |
-
|
| 156 |
-
def restore_in_Xmin(
|
| 157 |
-
noisy_image,
|
| 158 |
-
rotation,
|
| 159 |
-
denoise_image,
|
| 160 |
-
prompt,
|
| 161 |
-
a_prompt,
|
| 162 |
-
n_prompt,
|
| 163 |
-
num_samples,
|
| 164 |
-
min_size,
|
| 165 |
-
downscale,
|
| 166 |
-
upscale,
|
| 167 |
-
edm_steps,
|
| 168 |
-
s_stage1,
|
| 169 |
-
s_stage2,
|
| 170 |
-
s_cfg,
|
| 171 |
-
randomize_seed,
|
| 172 |
-
seed,
|
| 173 |
-
s_churn,
|
| 174 |
-
s_noise,
|
| 175 |
-
color_fix_type,
|
| 176 |
-
diff_dtype,
|
| 177 |
-
ae_dtype,
|
| 178 |
-
gamma_correction,
|
| 179 |
-
linear_CFG,
|
| 180 |
-
linear_s_stage2,
|
| 181 |
-
spt_linear_CFG,
|
| 182 |
-
spt_linear_s_stage2,
|
| 183 |
-
model_select,
|
| 184 |
-
output_format,
|
| 185 |
-
allocation
|
| 186 |
-
):
|
| 187 |
-
print("noisy_image:\n" + str(noisy_image))
|
| 188 |
-
print("denoise_image:\n" + str(denoise_image))
|
| 189 |
-
print("rotation: " + str(rotation))
|
| 190 |
-
print("prompt: " + str(prompt))
|
| 191 |
-
print("a_prompt: " + str(a_prompt))
|
| 192 |
-
print("n_prompt: " + str(n_prompt))
|
| 193 |
-
print("num_samples: " + str(num_samples))
|
| 194 |
-
print("min_size: " + str(min_size))
|
| 195 |
-
print("downscale: " + str(downscale))
|
| 196 |
-
print("upscale: " + str(upscale))
|
| 197 |
-
print("edm_steps: " + str(edm_steps))
|
| 198 |
-
print("s_stage1: " + str(s_stage1))
|
| 199 |
-
print("s_stage2: " + str(s_stage2))
|
| 200 |
-
print("s_cfg: " + str(s_cfg))
|
| 201 |
-
print("randomize_seed: " + str(randomize_seed))
|
| 202 |
-
print("seed: " + str(seed))
|
| 203 |
-
print("s_churn: " + str(s_churn))
|
| 204 |
-
print("s_noise: " + str(s_noise))
|
| 205 |
-
print("color_fix_type: " + str(color_fix_type))
|
| 206 |
-
print("diff_dtype: " + str(diff_dtype))
|
| 207 |
-
print("ae_dtype: " + str(ae_dtype))
|
| 208 |
-
print("gamma_correction: " + str(gamma_correction))
|
| 209 |
-
print("linear_CFG: " + str(linear_CFG))
|
| 210 |
-
print("linear_s_stage2: " + str(linear_s_stage2))
|
| 211 |
-
print("spt_linear_CFG: " + str(spt_linear_CFG))
|
| 212 |
-
print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
|
| 213 |
-
print("model_select: " + str(model_select))
|
| 214 |
-
print("GPU time allocation: " + str(allocation) + " min")
|
| 215 |
-
print("output_format: " + str(output_format))
|
| 216 |
-
|
| 217 |
-
input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
|
| 218 |
-
|
| 219 |
-
if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
|
| 220 |
-
gr.Warning('Invalid image format. Please first convert into *.png, *.webp, *.jpg, *.jpeg, *.gif, *.bmp or *.heic.')
|
| 221 |
-
return None, None, None, None
|
| 222 |
-
|
| 223 |
-
if output_format == "input":
|
| 224 |
-
if noisy_image is None:
|
| 225 |
-
output_format = "png"
|
| 226 |
-
else:
|
| 227 |
-
output_format = input_format
|
| 228 |
-
print("final output_format: " + str(output_format))
|
| 229 |
-
|
| 230 |
-
if prompt is None:
|
| 231 |
-
prompt = ""
|
| 232 |
-
|
| 233 |
-
if a_prompt is None:
|
| 234 |
-
a_prompt = ""
|
| 235 |
-
|
| 236 |
-
if n_prompt is None:
|
| 237 |
-
n_prompt = ""
|
| 238 |
-
|
| 239 |
-
if prompt != "" and a_prompt != "":
|
| 240 |
-
a_prompt = prompt + ", " + a_prompt
|
| 241 |
-
else:
|
| 242 |
-
a_prompt = prompt + a_prompt
|
| 243 |
-
print("Final prompt: " + str(a_prompt))
|
| 244 |
-
|
| 245 |
-
denoise_image = np.array(Image.open(noisy_image if denoise_image is None else denoise_image))
|
| 246 |
-
|
| 247 |
-
if rotation == 90:
|
| 248 |
-
denoise_image = np.array(list(zip(*denoise_image[::-1])))
|
| 249 |
-
elif rotation == 180:
|
| 250 |
-
denoise_image = np.array(list(zip(*denoise_image[::-1])))
|
| 251 |
-
denoise_image = np.array(list(zip(*denoise_image[::-1])))
|
| 252 |
-
elif rotation == -90:
|
| 253 |
-
denoise_image = np.array(list(zip(*denoise_image))[::-1])
|
| 254 |
-
|
| 255 |
-
if 1 < downscale:
|
| 256 |
-
input_height, input_width, input_channel = denoise_image.shape
|
| 257 |
-
denoise_image = np.array(Image.fromarray(denoise_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
|
| 258 |
-
|
| 259 |
-
denoise_image = HWC3(denoise_image)
|
| 260 |
-
|
| 261 |
-
if torch.cuda.device_count() == 0:
|
| 262 |
-
gr.Warning('Set this space to GPU config to make it work.')
|
| 263 |
-
return [noisy_image, denoise_image], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = [denoise_image]), None, gr.update(visible=True)
|
| 264 |
-
|
| 265 |
-
if model_select != model.current_model:
|
| 266 |
-
print('load ' + model_select)
|
| 267 |
-
if model_select == 'v0-Q':
|
| 268 |
-
model.load_state_dict(ckpt_Q, strict=False)
|
| 269 |
-
elif model_select == 'v0-F':
|
| 270 |
-
model.load_state_dict(ckpt_F, strict=False)
|
| 271 |
-
model.current_model = model_select
|
| 272 |
-
|
| 273 |
-
model.ae_dtype = convert_dtype(ae_dtype)
|
| 274 |
-
model.model.dtype = convert_dtype(diff_dtype)
|
| 275 |
-
|
| 276 |
-
return restore_on_gpu(
|
| 277 |
-
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
| 278 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
|
| 312 |
@spaces.GPU(duration=get_duration)
|
| 313 |
-
def
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True), gr.update(visible=True)
|
| 403 |
-
|
| 404 |
-
def load_and_reset(param_setting):
|
| 405 |
-
print('load_and_reset ==>>')
|
| 406 |
-
if torch.cuda.device_count() == 0:
|
| 407 |
-
gr.Warning('Set this space to GPU config to make it work.')
|
| 408 |
-
return None, None, None, None, None, None, None, None, None, None, None, None, None, None
|
| 409 |
-
edm_steps = default_setting.edm_steps
|
| 410 |
-
s_stage2 = 1.0
|
| 411 |
-
s_stage1 = -1.0
|
| 412 |
-
s_churn = 5
|
| 413 |
-
s_noise = 1.003
|
| 414 |
-
a_prompt = 'Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - ' \
|
| 415 |
-
'realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore ' \
|
| 416 |
-
'detailing, hyper sharpness, perfect without deformations.'
|
| 417 |
-
n_prompt = 'painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, ' \
|
| 418 |
-
'3D render, unreal engine, blurring, dirty, messy, worst quality, low quality, frames, watermark, ' \
|
| 419 |
-
'signature, jpeg artifacts, deformed, lowres, over-smooth'
|
| 420 |
-
color_fix_type = 'Wavelet'
|
| 421 |
-
spt_linear_s_stage2 = 0.0
|
| 422 |
-
linear_s_stage2 = False
|
| 423 |
-
linear_CFG = True
|
| 424 |
-
if param_setting == "Quality":
|
| 425 |
-
s_cfg = default_setting.s_cfg_Quality
|
| 426 |
-
spt_linear_CFG = default_setting.spt_linear_CFG_Quality
|
| 427 |
-
model_select = "v0-Q"
|
| 428 |
-
elif param_setting == "Fidelity":
|
| 429 |
-
s_cfg = default_setting.s_cfg_Fidelity
|
| 430 |
-
spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
|
| 431 |
-
model_select = "v0-F"
|
| 432 |
else:
|
| 433 |
-
raise
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
print(
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
|
|
|
|
|
|
|
|
|
| 456 |
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 465 |
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 470 |
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 482 |
"""
|
| 483 |
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
with gr.Row():
|
| 488 |
-
gr.HTML("""
|
| 489 |
-
<p style="background-color: red;"><big><big><big><b>⚠️To use SUPIR, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
|
| 490 |
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
downscale = gr.Radio([["/1", 1], ["/2", 2], ["/3", 3], ["/4", 4], ["/5", 5], ["/6", 6], ["/7", 7], ["/8", 8], ["/9", 9], ["/10", 10]], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
|
| 531 |
with gr.Row():
|
| 532 |
-
|
| 533 |
-
model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
|
| 534 |
-
interactive=True)
|
| 535 |
-
with gr.Column():
|
| 536 |
-
color_fix_type = gr.Radio([["None", "None"], ["AdaIn (improve as a photo)", "AdaIn"], ["Wavelet (for JPEG artifacts)", "Wavelet"]], label="Color-Fix Type", info="AdaIn=Improve following a style, Wavelet=For JPEG artifacts", value="AdaIn",
|
| 537 |
-
interactive=True)
|
| 538 |
-
s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
|
| 539 |
-
value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
|
| 540 |
-
s_stage2 = gr.Slider(label="Restoring Guidance Strength", minimum=0., maximum=1., value=1., step=0.05)
|
| 541 |
-
s_stage1 = gr.Slider(label="Pre-denoising Guidance Strength", minimum=-1.0, maximum=6.0, value=-1.0, step=1.0)
|
| 542 |
-
s_churn = gr.Slider(label="S-Churn", minimum=0, maximum=40, value=5, step=1)
|
| 543 |
-
s_noise = gr.Slider(label="S-Noise", minimum=1.0, maximum=1.1, value=1.003, step=0.001)
|
| 544 |
with gr.Row():
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
with gr.Column():
|
| 566 |
-
diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id = "process_button")
|
| 567 |
-
reset_btn = gr.Button(value="🧹 Reinit page", variant="stop", elem_id="reset_button", visible = False)
|
| 568 |
-
|
| 569 |
-
warning = gr.HTML(value = "<center><big>Your computer must <u>not</u> enter into standby mode.</big><br/>On Chrome, you can force to keep a tab alive in <code>chrome://discards/</code></center>", visible = False)
|
| 570 |
-
restore_information = gr.HTML(value = "Restart the process to get another result.", visible = False)
|
| 571 |
-
result_slider = ImageSlider(label = 'Comparator', show_label = False, interactive = False, elem_id = "slider1", show_download_button = False)
|
| 572 |
-
result_gallery = gr.Gallery(label = 'Downloadable results', show_label = True, interactive = False, elem_id = "gallery1")
|
| 573 |
-
|
| 574 |
-
gr.Examples(
|
| 575 |
examples = [
|
| 576 |
[
|
| 577 |
-
"
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
"Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
|
| 581 |
-
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
| 582 |
-
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
| 583 |
-
2,
|
| 584 |
-
1024,
|
| 585 |
-
1,
|
| 586 |
-
8,
|
| 587 |
-
100,
|
| 588 |
-
-1,
|
| 589 |
-
1,
|
| 590 |
-
7.5,
|
| 591 |
-
False,
|
| 592 |
-
42,
|
| 593 |
-
5,
|
| 594 |
-
1.003,
|
| 595 |
-
"AdaIn",
|
| 596 |
-
"fp16",
|
| 597 |
-
"bf16",
|
| 598 |
-
1.0,
|
| 599 |
-
True,
|
| 600 |
-
4,
|
| 601 |
-
False,
|
| 602 |
-
0.,
|
| 603 |
-
"v0-Q",
|
| 604 |
-
"input",
|
| 605 |
-
179
|
| 606 |
-
],
|
| 607 |
-
[
|
| 608 |
-
"./Examples/Example2.jpeg",
|
| 609 |
-
0,
|
| 610 |
-
None,
|
| 611 |
-
"La cabeza de un gato atigrado, en una casa, fotorrealista, 8k, extremadamente detallada",
|
| 612 |
-
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
| 613 |
-
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
| 614 |
-
1,
|
| 615 |
-
1024,
|
| 616 |
-
1,
|
| 617 |
-
1,
|
| 618 |
-
200,
|
| 619 |
-
-1,
|
| 620 |
-
1,
|
| 621 |
-
7.5,
|
| 622 |
-
False,
|
| 623 |
-
42,
|
| 624 |
-
5,
|
| 625 |
-
1.003,
|
| 626 |
-
"Wavelet",
|
| 627 |
-
"fp16",
|
| 628 |
-
"bf16",
|
| 629 |
-
1.0,
|
| 630 |
-
True,
|
| 631 |
-
4,
|
| 632 |
-
False,
|
| 633 |
-
0.,
|
| 634 |
-
"v0-Q",
|
| 635 |
-
"input",
|
| 636 |
-
179
|
| 637 |
-
],
|
| 638 |
-
[
|
| 639 |
-
"./Examples/Example3.webp",
|
| 640 |
-
0,
|
| 641 |
None,
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
"
|
| 645 |
-
|
| 646 |
-
|
| 647 |
-
1,
|
| 648 |
-
1,
|
| 649 |
-
200,
|
| 650 |
-
-1,
|
| 651 |
-
1,
|
| 652 |
-
7.5,
|
| 653 |
-
False,
|
| 654 |
42,
|
| 655 |
-
5,
|
| 656 |
-
1.003,
|
| 657 |
-
"Wavelet",
|
| 658 |
-
"fp16",
|
| 659 |
-
"bf16",
|
| 660 |
-
1.0,
|
| 661 |
True,
|
| 662 |
-
4,
|
| 663 |
-
False,
|
| 664 |
-
0.,
|
| 665 |
-
"v0-Q",
|
| 666 |
-
"input",
|
| 667 |
-
179
|
| 668 |
-
],
|
| 669 |
-
[
|
| 670 |
-
"./Examples/Example3.webp",
|
| 671 |
-
0,
|
| 672 |
-
None,
|
| 673 |
-
"A red marble",
|
| 674 |
-
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
| 675 |
-
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
| 676 |
-
1,
|
| 677 |
-
1024,
|
| 678 |
-
1,
|
| 679 |
-
1,
|
| 680 |
-
200,
|
| 681 |
-
-1,
|
| 682 |
1,
|
| 683 |
-
|
| 684 |
-
False,
|
| 685 |
-
42,
|
| 686 |
-
5,
|
| 687 |
-
1.003,
|
| 688 |
-
"Wavelet",
|
| 689 |
-
"fp16",
|
| 690 |
-
"bf16",
|
| 691 |
-
1.0,
|
| 692 |
-
True,
|
| 693 |
-
4,
|
| 694 |
-
False,
|
| 695 |
-
0.,
|
| 696 |
-
"v0-Q",
|
| 697 |
-
"input",
|
| 698 |
-
179
|
| 699 |
],
|
| 700 |
],
|
| 701 |
run_on_click = True,
|
| 702 |
-
fn =
|
| 703 |
-
inputs = [
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
|
| 707 |
-
|
| 708 |
-
|
| 709 |
-
n_prompt,
|
| 710 |
-
num_samples,
|
| 711 |
-
min_size,
|
| 712 |
-
downscale,
|
| 713 |
-
upscale,
|
| 714 |
-
edm_steps,
|
| 715 |
-
s_stage1,
|
| 716 |
-
s_stage2,
|
| 717 |
-
s_cfg,
|
| 718 |
-
randomize_seed,
|
| 719 |
-
seed,
|
| 720 |
-
s_churn,
|
| 721 |
-
s_noise,
|
| 722 |
-
color_fix_type,
|
| 723 |
-
diff_dtype,
|
| 724 |
-
ae_dtype,
|
| 725 |
-
gamma_correction,
|
| 726 |
-
linear_CFG,
|
| 727 |
-
linear_s_stage2,
|
| 728 |
-
spt_linear_CFG,
|
| 729 |
-
spt_linear_s_stage2,
|
| 730 |
-
model_select,
|
| 731 |
-
output_format,
|
| 732 |
-
allocation
|
| 733 |
-
],
|
| 734 |
-
outputs = [
|
| 735 |
-
result_slider,
|
| 736 |
-
result_gallery,
|
| 737 |
-
restore_information,
|
| 738 |
-
reset_btn
|
| 739 |
-
],
|
| 740 |
-
cache_examples = False,
|
| 741 |
)
|
| 742 |
|
| 743 |
-
|
| 744 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 745 |
|
| 746 |
-
|
| 747 |
-
|
| 748 |
-
|
| 749 |
-
|
| 750 |
-
|
| 751 |
-
|
| 752 |
-
|
| 753 |
-
|
| 754 |
-
|
| 755 |
-
|
| 756 |
-
|
| 757 |
-
|
| 758 |
-
|
| 759 |
-
|
| 760 |
-
|
| 761 |
-
|
| 762 |
-
|
| 763 |
-
|
| 764 |
-
|
| 765 |
-
|
| 766 |
-
|
| 767 |
-
|
| 768 |
-
|
| 769 |
-
|
| 770 |
-
|
| 771 |
-
|
| 772 |
-
|
| 773 |
-
|
| 774 |
-
|
| 775 |
-
|
| 776 |
-
|
| 777 |
-
|
| 778 |
-
|
| 779 |
-
|
| 780 |
-
|
| 781 |
-
|
| 782 |
-
|
| 783 |
-
|
| 784 |
-
|
| 785 |
-
|
| 786 |
-
|
| 787 |
-
|
| 788 |
-
|
| 789 |
-
|
| 790 |
-
|
| 791 |
-
|
| 792 |
-
|
| 793 |
-
|
| 794 |
-
|
| 795 |
-
|
| 796 |
-
|
| 797 |
-
|
| 798 |
-
|
| 799 |
-
|
| 800 |
-
|
| 801 |
-
|
| 802 |
-
|
| 803 |
-
|
| 804 |
-
|
| 805 |
-
|
| 806 |
-
|
| 807 |
-
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
|
| 812 |
-
|
| 813 |
-
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
edm_steps,
|
| 817 |
-
s_cfg,
|
| 818 |
-
s_stage2,
|
| 819 |
-
s_stage1,
|
| 820 |
-
s_churn,
|
| 821 |
-
s_noise,
|
| 822 |
-
a_prompt,
|
| 823 |
-
n_prompt,
|
| 824 |
-
color_fix_type,
|
| 825 |
-
linear_CFG,
|
| 826 |
-
linear_s_stage2,
|
| 827 |
-
spt_linear_CFG,
|
| 828 |
-
spt_linear_s_stage2,
|
| 829 |
-
model_select
|
| 830 |
-
])
|
| 831 |
-
|
| 832 |
-
reset_btn.click(fn = reset, inputs = [], outputs = [
|
| 833 |
-
input_image,
|
| 834 |
-
rotation,
|
| 835 |
-
denoise_image,
|
| 836 |
-
prompt,
|
| 837 |
-
a_prompt,
|
| 838 |
-
n_prompt,
|
| 839 |
-
num_samples,
|
| 840 |
-
min_size,
|
| 841 |
-
downscale,
|
| 842 |
-
upscale,
|
| 843 |
-
edm_steps,
|
| 844 |
-
s_stage1,
|
| 845 |
-
s_stage2,
|
| 846 |
-
s_cfg,
|
| 847 |
-
randomize_seed,
|
| 848 |
-
seed,
|
| 849 |
-
s_churn,
|
| 850 |
-
s_noise,
|
| 851 |
-
color_fix_type,
|
| 852 |
-
diff_dtype,
|
| 853 |
-
ae_dtype,
|
| 854 |
-
gamma_correction,
|
| 855 |
-
linear_CFG,
|
| 856 |
-
linear_s_stage2,
|
| 857 |
-
spt_linear_CFG,
|
| 858 |
-
spt_linear_s_stage2,
|
| 859 |
-
model_select,
|
| 860 |
-
output_format,
|
| 861 |
-
allocation
|
| 862 |
-
], queue = False, show_progress = False)
|
| 863 |
-
|
| 864 |
-
interface.queue(10).launch()
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
| 2 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import spaces
|
| 4 |
+
import numpy as np
|
| 5 |
+
import random
|
| 6 |
+
import os
|
| 7 |
+
import yaml
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
import imageio
|
| 10 |
+
import tempfile
|
| 11 |
from PIL import Image
|
|
|
|
| 12 |
from huggingface_hub import hf_hub_download
|
| 13 |
+
import shutil
|
| 14 |
+
|
| 15 |
+
from inference import (
|
| 16 |
+
create_ltx_video_pipeline,
|
| 17 |
+
create_latent_upsampler,
|
| 18 |
+
load_image_to_tensor_with_resize_and_crop,
|
| 19 |
+
seed_everething,
|
| 20 |
+
get_device,
|
| 21 |
+
calculate_padding,
|
| 22 |
+
load_media_file
|
| 23 |
+
)
|
| 24 |
+
from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline, LTXVideoPipeline
|
| 25 |
+
from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
|
| 26 |
+
|
| 27 |
+
image_i2v_debug_value = None
|
| 28 |
+
i2v_prompt_debug_value = None
|
| 29 |
+
height_input_debug_value = None
|
| 30 |
+
width_input_debug_value = None
|
| 31 |
+
duration_input_debug_value = None
|
| 32 |
+
config_file_path = "configs/ltxv-13b-0.9.7-distilled.yaml"
|
| 33 |
+
with open(config_file_path, "r") as file:
|
| 34 |
+
PIPELINE_CONFIG_YAML = yaml.safe_load(file)
|
| 35 |
+
|
| 36 |
+
LTX_REPO = "Lightricks/LTX-Video"
|
| 37 |
+
MAX_IMAGE_SIZE = PIPELINE_CONFIG_YAML.get("max_resolution", 1280)
|
| 38 |
+
MAX_NUM_FRAMES = 257
|
| 39 |
+
|
| 40 |
+
FPS = 30.0
|
| 41 |
+
|
| 42 |
+
# --- Global variables for loaded models ---
|
| 43 |
+
pipeline_instance = None
|
| 44 |
+
latent_upsampler_instance = None
|
| 45 |
+
models_dir = "downloaded_models_gradio_cpu_init"
|
| 46 |
+
Path(models_dir).mkdir(parents=True, exist_ok=True)
|
| 47 |
+
|
| 48 |
+
print("Downloading models (if not present)...")
|
| 49 |
+
distilled_model_actual_path = hf_hub_download(
|
| 50 |
+
repo_id=LTX_REPO,
|
| 51 |
+
filename=PIPELINE_CONFIG_YAML["checkpoint_path"],
|
| 52 |
+
local_dir=models_dir,
|
| 53 |
+
local_dir_use_symlinks=False
|
| 54 |
+
)
|
| 55 |
+
PIPELINE_CONFIG_YAML["checkpoint_path"] = distilled_model_actual_path
|
| 56 |
+
print(f"Distilled model path: {distilled_model_actual_path}")
|
| 57 |
+
|
| 58 |
+
SPATIAL_UPSCALER_FILENAME = PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"]
|
| 59 |
+
spatial_upscaler_actual_path = hf_hub_download(
|
| 60 |
+
repo_id=LTX_REPO,
|
| 61 |
+
filename=SPATIAL_UPSCALER_FILENAME,
|
| 62 |
+
local_dir=models_dir,
|
| 63 |
+
local_dir_use_symlinks=False
|
| 64 |
+
)
|
| 65 |
+
PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"] = spatial_upscaler_actual_path
|
| 66 |
+
print(f"Spatial upscaler model path: {spatial_upscaler_actual_path}")
|
| 67 |
+
|
| 68 |
+
print("Creating LTX Video pipeline on CPU...")
|
| 69 |
+
pipeline_instance = create_ltx_video_pipeline(
|
| 70 |
+
ckpt_path=PIPELINE_CONFIG_YAML["checkpoint_path"],
|
| 71 |
+
precision=PIPELINE_CONFIG_YAML["precision"],
|
| 72 |
+
text_encoder_model_name_or_path=PIPELINE_CONFIG_YAML["text_encoder_model_name_or_path"],
|
| 73 |
+
sampler=PIPELINE_CONFIG_YAML["sampler"],
|
| 74 |
+
device="cpu",
|
| 75 |
+
enhance_prompt=False,
|
| 76 |
+
prompt_enhancer_image_caption_model_name_or_path=PIPELINE_CONFIG_YAML["prompt_enhancer_image_caption_model_name_or_path"],
|
| 77 |
+
prompt_enhancer_llm_model_name_or_path=PIPELINE_CONFIG_YAML["prompt_enhancer_llm_model_name_or_path"],
|
| 78 |
+
)
|
| 79 |
+
print("LTX Video pipeline created on CPU.")
|
| 80 |
+
|
| 81 |
+
if PIPELINE_CONFIG_YAML.get("spatial_upscaler_model_path"):
|
| 82 |
+
print("Creating latent upsampler on CPU...")
|
| 83 |
+
latent_upsampler_instance = create_latent_upsampler(
|
| 84 |
+
PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"],
|
| 85 |
+
device="cpu"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
)
|
| 87 |
+
print("Latent upsampler created on CPU.")
|
| 88 |
+
|
| 89 |
+
target_inference_device = "cuda"
|
| 90 |
+
print(f"Target inference device: {target_inference_device}")
|
| 91 |
+
pipeline_instance.to(target_inference_device)
|
| 92 |
+
if latent_upsampler_instance:
|
| 93 |
+
latent_upsampler_instance.to(target_inference_device)
|
| 94 |
|
| 95 |
+
|
| 96 |
+
# --- Helper function for dimension calculation ---
|
| 97 |
+
MIN_DIM_SLIDER = 256 # As defined in the sliders minimum attribute
|
| 98 |
+
TARGET_FIXED_SIDE = 768 # Desired fixed side length as per requirement
|
| 99 |
+
|
| 100 |
+
def calculate_new_dimensions(orig_w, orig_h):
|
| 101 |
+
"""
|
| 102 |
+
Calculates new dimensions for height and width sliders based on original media dimensions.
|
| 103 |
+
Ensures one side is TARGET_FIXED_SIDE, the other is scaled proportionally,
|
| 104 |
+
both are multiples of 32, and within [MIN_DIM_SLIDER, MAX_IMAGE_SIZE].
|
| 105 |
+
"""
|
| 106 |
+
if orig_w == 0 or orig_h == 0:
|
| 107 |
+
# Default to TARGET_FIXED_SIDE square if original dimensions are invalid
|
| 108 |
+
return int(TARGET_FIXED_SIDE), int(TARGET_FIXED_SIDE)
|
| 109 |
+
|
| 110 |
+
if orig_w >= orig_h: # Landscape or square
|
| 111 |
+
new_h = TARGET_FIXED_SIDE
|
| 112 |
+
aspect_ratio = orig_w / orig_h
|
| 113 |
+
new_w_ideal = new_h * aspect_ratio
|
| 114 |
+
|
| 115 |
+
# Round to nearest multiple of 32
|
| 116 |
+
new_w = round(new_w_ideal / 32) * 32
|
| 117 |
+
|
| 118 |
+
# Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
|
| 119 |
+
new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
|
| 120 |
+
# Ensure new_h is also clamped (TARGET_FIXED_SIDE should be within these bounds if configured correctly)
|
| 121 |
+
new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
|
| 122 |
+
else: # Portrait
|
| 123 |
+
new_w = TARGET_FIXED_SIDE
|
| 124 |
+
aspect_ratio = orig_h / orig_w # Use H/W ratio for portrait scaling
|
| 125 |
+
new_h_ideal = new_w * aspect_ratio
|
| 126 |
+
|
| 127 |
+
# Round to nearest multiple of 32
|
| 128 |
+
new_h = round(new_h_ideal / 32) * 32
|
| 129 |
+
|
| 130 |
+
# Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
|
| 131 |
+
new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
|
| 132 |
+
# Ensure new_w is also clamped
|
| 133 |
+
new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
|
| 134 |
+
|
| 135 |
+
return int(new_h), int(new_w)
|
| 136 |
+
|
| 137 |
+
def get_duration(prompt, negative_prompt, input_image_filepath, input_video_filepath,
|
| 138 |
+
height_ui, width_ui, mode,
|
| 139 |
+
duration_ui, # Removed ui_steps
|
| 140 |
+
ui_frames_to_use,
|
| 141 |
+
seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
|
| 142 |
+
progress):
|
| 143 |
+
if duration_ui > 7:
|
| 144 |
+
return 75
|
| 145 |
+
else:
|
| 146 |
+
return 60
|
| 147 |
|
| 148 |
@spaces.GPU(duration=get_duration)
|
| 149 |
+
def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath,
|
| 150 |
+
height_ui, width_ui, mode,
|
| 151 |
+
duration_ui,
|
| 152 |
+
ui_frames_to_use,
|
| 153 |
+
seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
|
| 154 |
+
progress=gr.Progress(track_tqdm=True)):
|
| 155 |
+
|
| 156 |
+
if i2v_prompt_debug_value is not None:
|
| 157 |
+
prompt = i2v_prompt_debug_value
|
| 158 |
+
global i2v_prompt_debug_value
|
| 159 |
+
i2v_prompt_debug_value = None
|
| 160 |
+
|
| 161 |
+
if image_i2v_debug_value is not None:
|
| 162 |
+
input_image_filepath = image_i2v_debug_value
|
| 163 |
+
global image_i2v_debug_value
|
| 164 |
+
image_i2v_debug_value = None
|
| 165 |
+
|
| 166 |
+
if height_input_debug_value is not None:
|
| 167 |
+
height_ui = height_input_debug_value
|
| 168 |
+
global height_input_debug_value
|
| 169 |
+
height_input_debug_value = None
|
| 170 |
+
|
| 171 |
+
if width_input_debug_value is not None:
|
| 172 |
+
width_ui = width_input_debug_value
|
| 173 |
+
global width_input_debug_value
|
| 174 |
+
width_input_debug_value = None
|
| 175 |
+
|
| 176 |
+
if duration_input_debug_value is not None:
|
| 177 |
+
duration_ui = duration_input_debug_value
|
| 178 |
+
global duration_input_debug_value
|
| 179 |
+
duration_input_debug_value = None
|
| 180 |
+
|
| 181 |
+
if randomize_seed:
|
| 182 |
+
seed_ui = random.randint(0, 2**32 - 1)
|
| 183 |
+
seed_everething(int(seed_ui))
|
| 184 |
+
|
| 185 |
+
target_frames_ideal = duration_ui * FPS
|
| 186 |
+
target_frames_rounded = round(target_frames_ideal)
|
| 187 |
+
if target_frames_rounded < 1:
|
| 188 |
+
target_frames_rounded = 1
|
| 189 |
+
|
| 190 |
+
n_val = round((float(target_frames_rounded) - 1.0) / 8.0)
|
| 191 |
+
actual_num_frames = int(n_val * 8 + 1)
|
| 192 |
+
|
| 193 |
+
actual_num_frames = max(9, actual_num_frames)
|
| 194 |
+
actual_num_frames = min(MAX_NUM_FRAMES, actual_num_frames)
|
| 195 |
+
|
| 196 |
+
actual_height = int(height_ui)
|
| 197 |
+
actual_width = int(width_ui)
|
| 198 |
+
|
| 199 |
+
height_padded = ((actual_height - 1) // 32 + 1) * 32
|
| 200 |
+
width_padded = ((actual_width - 1) // 32 + 1) * 32
|
| 201 |
+
num_frames_padded = ((actual_num_frames - 2) // 8 + 1) * 8 + 1
|
| 202 |
+
if num_frames_padded != actual_num_frames:
|
| 203 |
+
print(f"Warning: actual_num_frames ({actual_num_frames}) and num_frames_padded ({num_frames_padded}) differ. Using num_frames_padded for pipeline.")
|
| 204 |
+
|
| 205 |
+
padding_values = calculate_padding(actual_height, actual_width, height_padded, width_padded)
|
| 206 |
+
|
| 207 |
+
call_kwargs = {
|
| 208 |
+
"prompt": prompt,
|
| 209 |
+
"negative_prompt": negative_prompt,
|
| 210 |
+
"height": height_padded,
|
| 211 |
+
"width": width_padded,
|
| 212 |
+
"num_frames": num_frames_padded,
|
| 213 |
+
"frame_rate": int(FPS),
|
| 214 |
+
"generator": torch.Generator(device=target_inference_device).manual_seed(int(seed_ui)),
|
| 215 |
+
"output_type": "pt",
|
| 216 |
+
"conditioning_items": None,
|
| 217 |
+
"media_items": None,
|
| 218 |
+
"decode_timestep": PIPELINE_CONFIG_YAML["decode_timestep"],
|
| 219 |
+
"decode_noise_scale": PIPELINE_CONFIG_YAML["decode_noise_scale"],
|
| 220 |
+
"stochastic_sampling": PIPELINE_CONFIG_YAML["stochastic_sampling"],
|
| 221 |
+
"image_cond_noise_scale": 0.15,
|
| 222 |
+
"is_video": True,
|
| 223 |
+
"vae_per_channel_normalize": True,
|
| 224 |
+
"mixed_precision": (PIPELINE_CONFIG_YAML["precision"] == "mixed_precision"),
|
| 225 |
+
"offload_to_cpu": False,
|
| 226 |
+
"enhance_prompt": False,
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
stg_mode_str = PIPELINE_CONFIG_YAML.get("stg_mode", "attention_values")
|
| 230 |
+
if stg_mode_str.lower() in ["stg_av", "attention_values"]:
|
| 231 |
+
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionValues
|
| 232 |
+
elif stg_mode_str.lower() in ["stg_as", "attention_skip"]:
|
| 233 |
+
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionSkip
|
| 234 |
+
elif stg_mode_str.lower() in ["stg_r", "residual"]:
|
| 235 |
+
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.Residual
|
| 236 |
+
elif stg_mode_str.lower() in ["stg_t", "transformer_block"]:
|
| 237 |
+
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.TransformerBlock
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
else:
|
| 239 |
+
raise ValueError(f"Invalid stg_mode: {stg_mode_str}")
|
| 240 |
+
|
| 241 |
+
if mode == "image-to-video" and input_image_filepath:
|
| 242 |
+
try:
|
| 243 |
+
media_tensor = load_image_to_tensor_with_resize_and_crop(
|
| 244 |
+
input_image_filepath, actual_height, actual_width
|
| 245 |
+
)
|
| 246 |
+
media_tensor = torch.nn.functional.pad(media_tensor, padding_values)
|
| 247 |
+
call_kwargs["conditioning_items"] = [ConditioningItem(media_tensor.to(target_inference_device), 0, 1.0)]
|
| 248 |
+
except Exception as e:
|
| 249 |
+
print(f"Error loading image {input_image_filepath}: {e}")
|
| 250 |
+
raise gr.Error(f"Could not load image: {e}")
|
| 251 |
+
elif mode == "video-to-video" and input_video_filepath:
|
| 252 |
+
try:
|
| 253 |
+
call_kwargs["media_items"] = load_media_file(
|
| 254 |
+
media_path=input_video_filepath,
|
| 255 |
+
height=actual_height,
|
| 256 |
+
width=actual_width,
|
| 257 |
+
max_frames=int(ui_frames_to_use),
|
| 258 |
+
padding=padding_values
|
| 259 |
+
).to(target_inference_device)
|
| 260 |
+
except Exception as e:
|
| 261 |
+
print(f"Error loading video {input_video_filepath}: {e}")
|
| 262 |
+
raise gr.Error(f"Could not load video: {e}")
|
| 263 |
+
|
| 264 |
+
print(f"Moving models to {target_inference_device} for inference (if not already there)...")
|
| 265 |
|
| 266 |
+
active_latent_upsampler = None
|
| 267 |
+
if improve_texture_flag and latent_upsampler_instance:
|
| 268 |
+
active_latent_upsampler = latent_upsampler_instance
|
| 269 |
+
|
| 270 |
+
result_images_tensor = None
|
| 271 |
+
if improve_texture_flag:
|
| 272 |
+
if not active_latent_upsampler:
|
| 273 |
+
raise gr.Error("Spatial upscaler model not loaded or improve_texture not selected, cannot use multi-scale.")
|
| 274 |
+
|
| 275 |
+
multi_scale_pipeline_obj = LTXMultiScalePipeline(pipeline_instance, active_latent_upsampler)
|
| 276 |
+
|
| 277 |
+
first_pass_args = PIPELINE_CONFIG_YAML.get("first_pass", {}).copy()
|
| 278 |
+
first_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
|
| 279 |
+
# num_inference_steps will be derived from len(timesteps) in the pipeline
|
| 280 |
+
first_pass_args.pop("num_inference_steps", None)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
second_pass_args = PIPELINE_CONFIG_YAML.get("second_pass", {}).copy()
|
| 284 |
+
second_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
|
| 285 |
+
# num_inference_steps will be derived from len(timesteps) in the pipeline
|
| 286 |
+
second_pass_args.pop("num_inference_steps", None)
|
| 287 |
+
|
| 288 |
+
multi_scale_call_kwargs = call_kwargs.copy()
|
| 289 |
+
multi_scale_call_kwargs.update({
|
| 290 |
+
"downscale_factor": PIPELINE_CONFIG_YAML["downscale_factor"],
|
| 291 |
+
"first_pass": first_pass_args,
|
| 292 |
+
"second_pass": second_pass_args,
|
| 293 |
+
})
|
| 294 |
+
|
| 295 |
+
print(f"Calling multi-scale pipeline (eff. HxW: {actual_height}x{actual_width}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
|
| 296 |
+
result_images_tensor = multi_scale_pipeline_obj(**multi_scale_call_kwargs).images
|
| 297 |
+
else:
|
| 298 |
+
single_pass_call_kwargs = call_kwargs.copy()
|
| 299 |
+
first_pass_config_from_yaml = PIPELINE_CONFIG_YAML.get("first_pass", {})
|
| 300 |
+
|
| 301 |
+
single_pass_call_kwargs["timesteps"] = first_pass_config_from_yaml.get("timesteps")
|
| 302 |
+
single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
|
| 303 |
+
single_pass_call_kwargs["stg_scale"] = first_pass_config_from_yaml.get("stg_scale")
|
| 304 |
+
single_pass_call_kwargs["rescaling_scale"] = first_pass_config_from_yaml.get("rescaling_scale")
|
| 305 |
+
single_pass_call_kwargs["skip_block_list"] = first_pass_config_from_yaml.get("skip_block_list")
|
| 306 |
+
|
| 307 |
+
# Remove keys that might conflict or are not used in single pass / handled by above
|
| 308 |
+
single_pass_call_kwargs.pop("num_inference_steps", None)
|
| 309 |
+
single_pass_call_kwargs.pop("first_pass", None)
|
| 310 |
+
single_pass_call_kwargs.pop("second_pass", None)
|
| 311 |
+
single_pass_call_kwargs.pop("downscale_factor", None)
|
| 312 |
+
|
| 313 |
+
print(f"Calling base pipeline (padded HxW: {height_padded}x{width_padded}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
|
| 314 |
+
result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
|
| 315 |
+
|
| 316 |
+
if result_images_tensor is None:
|
| 317 |
+
raise gr.Error("Generation failed.")
|
| 318 |
+
|
| 319 |
+
pad_left, pad_right, pad_top, pad_bottom = padding_values
|
| 320 |
+
slice_h_end = -pad_bottom if pad_bottom > 0 else None
|
| 321 |
+
slice_w_end = -pad_right if pad_right > 0 else None
|
| 322 |
|
| 323 |
+
result_images_tensor = result_images_tensor[
|
| 324 |
+
:, :, :actual_num_frames, pad_top:slice_h_end, pad_left:slice_w_end
|
| 325 |
+
]
|
| 326 |
|
| 327 |
+
video_np = result_images_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy()
|
| 328 |
+
|
| 329 |
+
video_np = np.clip(video_np, 0, 1)
|
| 330 |
+
video_np = (video_np * 255).astype(np.uint8)
|
| 331 |
|
| 332 |
+
temp_dir = tempfile.mkdtemp()
|
| 333 |
+
timestamp = random.randint(10000,99999)
|
| 334 |
+
output_video_path = os.path.join(temp_dir, f"output_{timestamp}.mp4")
|
| 335 |
+
|
| 336 |
+
try:
|
| 337 |
+
with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], macro_block_size=1) as video_writer:
|
| 338 |
+
for frame_idx in range(video_np.shape[0]):
|
| 339 |
+
progress(frame_idx / video_np.shape[0], desc="Saving video")
|
| 340 |
+
video_writer.append_data(video_np[frame_idx])
|
| 341 |
+
except Exception as e:
|
| 342 |
+
print(f"Error saving video with macro_block_size=1: {e}")
|
| 343 |
+
try:
|
| 344 |
+
with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], format='FFMPEG', codec='libx264', quality=8) as video_writer:
|
| 345 |
+
for frame_idx in range(video_np.shape[0]):
|
| 346 |
+
progress(frame_idx / video_np.shape[0], desc="Saving video (fallback ffmpeg)")
|
| 347 |
+
video_writer.append_data(video_np[frame_idx])
|
| 348 |
+
except Exception as e2:
|
| 349 |
+
print(f"Fallback video saving error: {e2}")
|
| 350 |
+
raise gr.Error(f"Failed to save video: {e2}")
|
| 351 |
+
|
| 352 |
+
return output_video_path, seed_ui
|
| 353 |
+
|
| 354 |
+
def update_task_image():
|
| 355 |
+
return "image-to-video"
|
| 356 |
+
|
| 357 |
+
def update_task_text():
|
| 358 |
+
return "text-to-video"
|
| 359 |
+
|
| 360 |
+
def update_task_video():
|
| 361 |
+
return "video-to-video"
|
| 362 |
+
|
| 363 |
+
# --- Gradio UI Definition ---
|
| 364 |
+
css="""
|
| 365 |
+
#col-container {
|
| 366 |
+
margin: 0 auto;
|
| 367 |
+
max-width: 900px;
|
| 368 |
+
}
|
| 369 |
"""
|
| 370 |
|
| 371 |
+
with gr.Blocks(css=css) as demo:
|
| 372 |
+
gr.Markdown("# LTX Video 0.9.7 Distilled")
|
| 373 |
+
gr.Markdown("Fast high quality video generation. [Model](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-13b-0.9.7-distilled.safetensors) [GitHub](https://github.com/Lightricks/LTX-Video) [Diffusers](#)")
|
|
|
|
|
|
|
|
|
|
| 374 |
|
| 375 |
+
with gr.Row():
|
| 376 |
+
with gr.Column():
|
| 377 |
+
with gr.Tab("image-to-video") as image_tab:
|
| 378 |
+
video_i_hidden = gr.Textbox(label="video_i", visible=False, value=None)
|
| 379 |
+
image_i2v = gr.Image(label="Input Image", type="filepath", sources=["upload", "webcam", "clipboard"])
|
| 380 |
+
i2v_prompt = gr.Textbox(label="Prompt", value="The creature from the image starts to move", lines=3)
|
| 381 |
+
i2v_button = gr.Button("Generate Image-to-Video", variant="primary")
|
| 382 |
+
with gr.Tab("text-to-video") as text_tab:
|
| 383 |
+
image_n_hidden = gr.Textbox(label="image_n", visible=False, value=None)
|
| 384 |
+
video_n_hidden = gr.Textbox(label="video_n", visible=False, value=None)
|
| 385 |
+
t2v_prompt = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
|
| 386 |
+
t2v_button = gr.Button("Generate Text-to-Video", variant="primary")
|
| 387 |
+
with gr.Tab("video-to-video", visible=False) as video_tab:
|
| 388 |
+
image_v_hidden = gr.Textbox(label="image_v", visible=False, value=None)
|
| 389 |
+
video_v2v = gr.Video(label="Input Video", sources=["upload", "webcam"]) # type defaults to filepath
|
| 390 |
+
frames_to_use = gr.Slider(label="Frames to use from input video", minimum=9, maximum=MAX_NUM_FRAMES, value=9, step=8, info="Number of initial frames to use for conditioning/transformation. Must be N*8+1.")
|
| 391 |
+
v2v_prompt = gr.Textbox(label="Prompt", value="Change the style to cinematic anime", lines=3)
|
| 392 |
+
v2v_button = gr.Button("Generate Video-to-Video", variant="primary")
|
| 393 |
+
|
| 394 |
+
duration_input = gr.Slider(
|
| 395 |
+
label="Video Duration (seconds)",
|
| 396 |
+
minimum=0.3,
|
| 397 |
+
maximum=8.5,
|
| 398 |
+
value=2,
|
| 399 |
+
step=0.1,
|
| 400 |
+
info=f"Target video duration (0.3s to 8.5s)"
|
| 401 |
+
)
|
| 402 |
+
improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
|
| 403 |
+
|
| 404 |
+
with gr.Column():
|
| 405 |
+
output_video = gr.Video(label="Generated Video", interactive=False)
|
| 406 |
+
# gr.DeepLinkButton()
|
| 407 |
+
|
| 408 |
+
with gr.Accordion("Advanced settings", open=False):
|
| 409 |
+
mode = gr.Dropdown(["text-to-video", "image-to-video", "video-to-video"], label="task", value="image-to-video", visible=False)
|
| 410 |
+
negative_prompt_input = gr.Textbox(label="Negative Prompt", value="worst quality, inconsistent motion, blurry, jittery, distorted", lines=2)
|
| 411 |
+
with gr.Row():
|
| 412 |
+
seed_input = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=2**32-1)
|
| 413 |
+
randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
|
|
|
|
| 414 |
with gr.Row():
|
| 415 |
+
guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 416 |
with gr.Row():
|
| 417 |
+
height_input = gr.Slider(label="Height", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
|
| 418 |
+
width_input = gr.Slider(label="Width", value=704, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
|
| 419 |
+
|
| 420 |
+
with gr.Accordion("Debug", open=False):
|
| 421 |
+
image_i2v_debug = gr.Image(label="Input Image Debug", type="filepath", sources=["upload", "webcam", "clipboard"])
|
| 422 |
+
i2v_prompt_debug = gr.Textbox(label="Prompt Debug", value="", lines=3)
|
| 423 |
+
height_input_debug = gr.Slider(label="Height Debug", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
|
| 424 |
+
width_input_debug = gr.Slider(label="Width Debug", value=704, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
|
| 425 |
+
duration_input_debug = gr.Slider(
|
| 426 |
+
label="Video Duration Debug (seconds)",
|
| 427 |
+
minimum=0.3,
|
| 428 |
+
maximum=8.5,
|
| 429 |
+
value=6,
|
| 430 |
+
step=0.1,
|
| 431 |
+
info=f"Target video duration (0.3s to 8.5s)"
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
with gr.Row(visible=False):
|
| 435 |
+
gr.Examples(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 436 |
examples = [
|
| 437 |
[
|
| 438 |
+
"View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
|
| 439 |
+
"",
|
| 440 |
+
"./Example_LTX/Example1.png",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 441 |
None,
|
| 442 |
+
512,
|
| 443 |
+
800,
|
| 444 |
+
"image-to-video",
|
| 445 |
+
6,
|
| 446 |
+
9,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 447 |
42,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 449 |
1,
|
| 450 |
+
True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 451 |
],
|
| 452 |
],
|
| 453 |
run_on_click = True,
|
| 454 |
+
fn = generate,
|
| 455 |
+
inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
|
| 456 |
+
height_input, width_input, mode,
|
| 457 |
+
duration_input, frames_to_use,
|
| 458 |
+
seed_input, randomize_seed_input, guidance_scale_input, improve_texture],
|
| 459 |
+
outputs = [output_video, seed_input],
|
| 460 |
+
cache_examples = True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 461 |
)
|
| 462 |
|
| 463 |
+
def height_input_debug_change(value):
|
| 464 |
+
global height_input_debug_value
|
| 465 |
+
height_input_debug_value = value
|
| 466 |
+
return []
|
| 467 |
+
|
| 468 |
+
def width_input_debug_change(value):
|
| 469 |
+
global width_input_debug_value
|
| 470 |
+
width_input_debug_value = value
|
| 471 |
+
return []
|
| 472 |
+
|
| 473 |
+
def duration_input_debug_change(value):
|
| 474 |
+
global duration_input_debug_value
|
| 475 |
+
duration_input_debug_value = value
|
| 476 |
+
return []
|
| 477 |
+
|
| 478 |
+
def i2v_prompt_debug_change(prompt):
|
| 479 |
+
global i2v_prompt_debug_value
|
| 480 |
+
i2v_prompt_debug_value = prompt
|
| 481 |
+
return []
|
| 482 |
+
|
| 483 |
+
# --- Event handlers for updating dimensions on upload ---
|
| 484 |
+
def handle_image_upload_for_dims(image_filepath, current_h, current_w):
|
| 485 |
+
if not image_filepath: # Image cleared or no image initially
|
| 486 |
+
# Keep current slider values if image is cleared or no input
|
| 487 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 488 |
+
try:
|
| 489 |
+
img = Image.open(image_filepath)
|
| 490 |
+
orig_w, orig_h = img.size
|
| 491 |
+
new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
|
| 492 |
+
return gr.update(value=new_h), gr.update(value=new_w)
|
| 493 |
+
except Exception as e:
|
| 494 |
+
print(f"Error processing image for dimension update: {e}")
|
| 495 |
+
# Keep current slider values on error
|
| 496 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 497 |
+
|
| 498 |
+
def handle_image_debug_upload_for_dims(image_filepath, current_h, current_w):
|
| 499 |
+
global image_i2v_debug_value
|
| 500 |
+
image_i2v_debug_value = image_filepath
|
| 501 |
+
if not image_filepath: # Image cleared or no image initially
|
| 502 |
+
# Keep current slider values if image is cleared or no input
|
| 503 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 504 |
+
try:
|
| 505 |
+
img = Image.open(image_filepath)
|
| 506 |
+
orig_w, orig_h = img.size
|
| 507 |
+
new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
|
| 508 |
+
global height_input_debug_value
|
| 509 |
+
height_input_debug_value = new_h
|
| 510 |
+
global width_input_debug_value
|
| 511 |
+
width_input_debug_value = new_w
|
| 512 |
+
return gr.update(value=new_h), gr.update(value=new_w)
|
| 513 |
+
except Exception as e:
|
| 514 |
+
# Keep current slider values on error
|
| 515 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 516 |
+
|
| 517 |
+
def handle_video_upload_for_dims(video_filepath, current_h, current_w):
|
| 518 |
+
if not video_filepath: # Video cleared or no video initially
|
| 519 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 520 |
+
try:
|
| 521 |
+
# Ensure video_filepath is a string for os.path.exists and imageio
|
| 522 |
+
video_filepath_str = str(video_filepath)
|
| 523 |
+
if not os.path.exists(video_filepath_str):
|
| 524 |
+
print(f"Video file path does not exist for dimension update: {video_filepath_str}")
|
| 525 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 526 |
+
|
| 527 |
+
orig_w, orig_h = -1, -1
|
| 528 |
+
with imageio.get_reader(video_filepath_str) as reader:
|
| 529 |
+
meta = reader.get_meta_data()
|
| 530 |
+
if 'size' in meta:
|
| 531 |
+
orig_w, orig_h = meta['size']
|
| 532 |
+
else:
|
| 533 |
+
# Fallback: read first frame if 'size' not in metadata
|
| 534 |
+
try:
|
| 535 |
+
first_frame = reader.get_data(0)
|
| 536 |
+
# Shape is (h, w, c) for frames
|
| 537 |
+
orig_h, orig_w = first_frame.shape[0], first_frame.shape[1]
|
| 538 |
+
except Exception as e_frame:
|
| 539 |
+
print(f"Could not get video size from metadata or first frame: {e_frame}")
|
| 540 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 541 |
+
|
| 542 |
+
if orig_w == -1 or orig_h == -1: # If dimensions couldn't be determined
|
| 543 |
+
print(f"Could not determine dimensions for video: {video_filepath_str}")
|
| 544 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 545 |
+
|
| 546 |
+
new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
|
| 547 |
+
return gr.update(value=new_h), gr.update(value=new_w)
|
| 548 |
+
except Exception as e:
|
| 549 |
+
# Log type of video_filepath for debugging if it's not a path-like string
|
| 550 |
+
print(f"Error processing video for dimension update: {e} (Path: {video_filepath}, Type: {type(video_filepath)})")
|
| 551 |
+
return gr.update(value=current_h), gr.update(value=current_w)
|
| 552 |
+
|
| 553 |
|
| 554 |
+
image_i2v_debug.upload(
|
| 555 |
+
fn=handle_image_debug_upload_for_dims,
|
| 556 |
+
inputs=[image_i2v_debug, height_input_debug, width_input_debug],
|
| 557 |
+
outputs=[height_input_debug, width_input_debug]
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
image_i2v.upload(
|
| 561 |
+
fn=handle_image_upload_for_dims,
|
| 562 |
+
inputs=[image_i2v, height_input, width_input],
|
| 563 |
+
outputs=[height_input, width_input]
|
| 564 |
+
)
|
| 565 |
+
video_v2v.upload(
|
| 566 |
+
fn=handle_video_upload_for_dims,
|
| 567 |
+
inputs=[video_v2v, height_input, width_input],
|
| 568 |
+
outputs=[height_input, width_input]
|
| 569 |
+
)
|
| 570 |
+
i2v_prompt_debug.change(
|
| 571 |
+
fn=i2v_prompt_debug_change,
|
| 572 |
+
inputs=[image_i2v_debug],
|
| 573 |
+
outputs=[]
|
| 574 |
+
)
|
| 575 |
+
height_input_debug.change(
|
| 576 |
+
fn=height_input_debug_change,
|
| 577 |
+
inputs=[height_input_debug],
|
| 578 |
+
outputs=[]
|
| 579 |
+
)
|
| 580 |
+
width_input_debug.change(
|
| 581 |
+
fn=width_input_debug_change,
|
| 582 |
+
inputs=[width_input_debug],
|
| 583 |
+
outputs=[]
|
| 584 |
+
)
|
| 585 |
+
duration_input_debug.change(
|
| 586 |
+
fn=duration_input_debug_change,
|
| 587 |
+
inputs=[duration_input_debug],
|
| 588 |
+
outputs=[]
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
image_tab.select(
|
| 592 |
+
fn=update_task_image,
|
| 593 |
+
outputs=[mode]
|
| 594 |
+
)
|
| 595 |
+
text_tab.select(
|
| 596 |
+
fn=update_task_text,
|
| 597 |
+
outputs=[mode]
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
t2v_inputs = [t2v_prompt, negative_prompt_input, image_n_hidden, video_n_hidden,
|
| 601 |
+
height_input, width_input, mode,
|
| 602 |
+
duration_input, frames_to_use,
|
| 603 |
+
seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
|
| 604 |
+
|
| 605 |
+
i2v_inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
|
| 606 |
+
height_input, width_input, mode,
|
| 607 |
+
duration_input, frames_to_use,
|
| 608 |
+
seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
|
| 609 |
+
|
| 610 |
+
v2v_inputs = [v2v_prompt, negative_prompt_input, image_v_hidden, video_v2v,
|
| 611 |
+
height_input, width_input, mode,
|
| 612 |
+
duration_input, frames_to_use,
|
| 613 |
+
seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
|
| 614 |
+
|
| 615 |
+
t2v_button.click(fn=generate, inputs=t2v_inputs, outputs=[output_video, seed_input], api_name="text_to_video")
|
| 616 |
+
i2v_button.click(fn=generate, inputs=i2v_inputs, outputs=[output_video, seed_input], api_name="image_to_video")
|
| 617 |
+
v2v_button.click(fn=generate, inputs=v2v_inputs, outputs=[output_video, seed_input], api_name="video_to_video")
|
| 618 |
+
|
| 619 |
+
if __name__ == "__main__":
|
| 620 |
+
if os.path.exists(models_dir) and os.path.isdir(models_dir):
|
| 621 |
+
print(f"Model directory: {Path(models_dir).resolve()}")
|
| 622 |
+
|
| 623 |
+
demo.queue().launch(debug=True, share=False, mcp_server=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,48 +1,15 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
numpy
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
einops==0.8.0
|
| 17 |
-
einops-exts==0.0.4
|
| 18 |
-
timm==1.0.7
|
| 19 |
-
openai-clip==1.0.1
|
| 20 |
-
fsspec==2024.6.1
|
| 21 |
-
kornia==0.7.3
|
| 22 |
-
matplotlib==3.9.1
|
| 23 |
-
ninja==1.11.1.1
|
| 24 |
-
omegaconf==2.3.0
|
| 25 |
-
opencv-python==4.10.0.84
|
| 26 |
-
pandas==2.2.2
|
| 27 |
-
pillow==10.4.0
|
| 28 |
-
pytorch-lightning==2.3.3
|
| 29 |
-
PyYAML==6.0.1
|
| 30 |
-
scipy==1.14.0
|
| 31 |
-
tqdm==4.66.4
|
| 32 |
-
triton==2.3.1
|
| 33 |
-
urllib3==2.2.2
|
| 34 |
-
webdataset==0.2.86
|
| 35 |
-
xformers==0.0.27
|
| 36 |
-
facexlib==0.3.0
|
| 37 |
-
k-diffusion==0.1.1.post1
|
| 38 |
-
diffusers==0.30.0
|
| 39 |
-
pillow-heif==0.18.0
|
| 40 |
-
|
| 41 |
-
open-clip-torch==2.24.0
|
| 42 |
-
|
| 43 |
-
torchaudio
|
| 44 |
-
easydict==1.13
|
| 45 |
-
fairscale==0.4.13
|
| 46 |
-
torchsde==0.2.6
|
| 47 |
-
huggingface_hub==0.23.3
|
| 48 |
-
gradio
|
|
|
|
| 1 |
+
accelerate
|
| 2 |
+
transformers
|
| 3 |
+
sentencepiece
|
| 4 |
+
pillow
|
| 5 |
+
numpy
|
| 6 |
+
torchvision
|
| 7 |
+
huggingface_hub
|
| 8 |
+
spaces
|
| 9 |
+
opencv-python
|
| 10 |
+
imageio
|
| 11 |
+
imageio-ffmpeg
|
| 12 |
+
einops
|
| 13 |
+
timm
|
| 14 |
+
av
|
| 15 |
+
git+https://github.com/huggingface/diffusers.git@main
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|